v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
machine-operator.h
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_MACHINE_OPERATOR_H_
6#define V8_COMPILER_MACHINE_OPERATOR_H_
7
8#include <optional>
9
11#include "src/base/enum-set.h"
12#include "src/base/flags.h"
17#include "src/zone/zone.h"
18
19namespace v8 {
20namespace internal {
21namespace compiler {
22
23// Forward declarations.
24struct MachineOperatorGlobalCache;
25class Operator;
26
27
28// For operators that are not supported on all platforms.
29class OptionalOperator final {
30 public:
31 OptionalOperator(bool supported, const Operator* op)
32 : supported_(supported), op_(op) {}
33
34 bool IsSupported() const { return supported_; }
35 // Gets the operator only if it is supported.
36 const Operator* op() const {
38 return op_;
39 }
40 // Always gets the operator, even for unsupported operators. This is useful to
41 // use the operator as a placeholder in a graph, for instance.
42 const Operator* placeholder() const { return op_; }
43
44 private:
46 const Operator* const op_;
47};
48
49// A Load needs a MachineType.
51
54
55// A Word(32|64)AtomicLoad needs both a LoadRepresentation and a memory
56// order.
73
76
78
79V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AtomicLoadParameters);
80
83
97
100
102
103V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AtomicOpParameters);
104
107
141
143
144V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadTransformation);
145
150
152
153V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
155
158
162
168
169V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadLaneParameters);
170
173
174// A Store needs a MachineType and a WriteBarrierKind in order to emit the
175// correct write barrier, and needs to state whether it is storing into the
176// header word, so that the value can be packed, if necessary.
191
193 : public std::pair<StoreRepresentation, StoreRepresentation> {
196 friend std::ostream& operator<<(std::ostream& out,
197 const StorePairRepresentation rep);
198};
199
202
204
205V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreRepresentation);
206
209
212
213// A Word(32|64)AtomicStore needs both a StoreRepresentation and a memory order.
241
244
246
247V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
249
252
253// An UnalignedStore needs a MachineType.
255
258
264
266
269
271 public:
274
275 int size() const { return size_; }
276 int alignment() const { return alignment_; }
277 bool is_tagged() const { return is_tagged_; }
278
279 private:
280 int size_;
283};
284
288
290
291V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
293
296
298
299template <const int simd_size = kSimd128Size,
300 typename = std::enable_if_t<simd_size == kSimd128Size ||
301 simd_size == kSimd256Size>>
303 public:
304 explicit SimdImmediateParameter(const uint8_t immediate[simd_size]) {
305 std::copy(immediate, immediate + simd_size, immediate_.begin());
306 }
308 const std::array<uint8_t, simd_size>& immediate() const { return immediate_; }
309 const uint8_t* data() const { return immediate_.data(); }
310 uint8_t operator[](int x) const { return immediate_[x]; }
311
312 private:
313 std::array<uint8_t, simd_size> immediate_;
314};
315
318
319template <const int simd_size>
323 return (lhs.immediate() == rhs.immediate());
324}
325
326template <const int simd_size>
329 return !(lhs == rhs);
330}
331
332template <const int simd_size>
334 return base::hash_range(p.immediate().begin(), p.immediate().end());
335}
336
337template <const int simd_size>
338V8_EXPORT_PRIVATE inline std::ostream& operator<<(
339 std::ostream& os, SimdImmediateParameter<simd_size> const& p) {
340 for (int i = 0; i < simd_size; i++) {
341 const char* separator = (i < simd_size - 1) ? "," : "";
342 os << static_cast<uint32_t>(p[i]) << separator;
343 }
344 return os;
345}
346
349
352
354
355// ShiftKind::kShiftOutZeros means that it is guaranteed that the bits shifted
356// out of the left operand are all zeros. If this is not the case, undefined
357// behavior (i.e., incorrect optimizations) will happen.
358// This is mostly useful for Smi untagging.
360
361size_t hash_value(ShiftKind);
362V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ShiftKind);
364
365// TruncateKind::kSetOverflowToMin sets the result of a saturating float-to-int
366// conversion to INT_MIN if the conversion returns INT_MAX due to overflow. This
367// makes it easier to detect an overflow. This parameter is ignored on platforms
368// like x64 and ia32 where a range overflow does not result in INT_MAX.
370std::ostream& operator<<(std::ostream& os, TruncateKind kind);
372
373// Interface for building machine-level operators. These operators are
374// machine-level but machine-independent and thus define a language suitable
375// for generating code to run on architectures such as ia32, x64, arm, etc.
377 : public NON_EXPORTED_BASE(ZoneObject) {
378 public:
379 // Flags that specify which operations are available. This is useful
380 // for operations that are unsupported by some back-ends.
381 enum Flag : unsigned {
383 kFloat32RoundDown = 1u << 0,
384 kFloat64RoundDown = 1u << 1,
385 kFloat32RoundUp = 1u << 2,
386 kFloat64RoundUp = 1u << 3,
387 kFloat32RoundTruncate = 1u << 4,
388 kFloat64RoundTruncate = 1u << 5,
389 kFloat32RoundTiesEven = 1u << 6,
390 kFloat64RoundTiesEven = 1u << 7,
391 kFloat64RoundTiesAway = 1u << 8,
392 kInt32DivIsSafe = 1u << 9,
393 kUint32DivIsSafe = 1u << 10,
394 kWord32ShiftIsSafe = 1u << 11,
395 kWord32Ctz = 1u << 12,
396 kWord64Ctz = 1u << 13,
397 kWord64CtzLowerable = 1u << 14,
398 kWord32Popcnt = 1u << 15,
399 kWord64Popcnt = 1u << 16,
400 kWord32ReverseBits = 1u << 17,
401 kWord64ReverseBits = 1u << 18,
402 kFloat32Select = 1u << 19,
403 kFloat64Select = 1u << 20,
404 kInt32AbsWithOverflow = 1u << 21,
405 kInt64AbsWithOverflow = 1u << 22,
406 kWord32Rol = 1u << 23,
407 kWord64Rol = 1u << 24,
408 kWord64RolLowerable = 1u << 25,
409 kSatConversionIsSafe = 1u << 26,
410 kWord32Select = 1u << 27,
411 kWord64Select = 1u << 28,
412 kLoadStorePairs = 1u << 29,
413 kFloat16 = 1u << 30,
414 kFloat16RawBitsConversion = 1u << 31,
415 kAllOptionalOps =
416 kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
417 kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
418 kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
419 kWord32Ctz | kWord64Ctz | kWord64CtzLowerable | kWord32Popcnt |
420 kWord64Popcnt | kWord32ReverseBits | kWord64ReverseBits |
421 kInt32AbsWithOverflow | kInt64AbsWithOverflow | kWord32Rol |
422 kWord64Rol | kWord64RolLowerable | kSatConversionIsSafe |
423 kFloat32Select | kFloat64Select | kWord32Select | kWord64Select |
424 kLoadStorePairs | kFloat16 | kFloat16RawBitsConversion
425 };
427
429 public:
430 enum UnalignedAccessSupport { kNoSupport, kSomeSupport, kFullSupport };
431
433 return IsUnalignedSupported(unalignedLoadUnsupportedTypes_, rep);
434 }
435
437 return IsUnalignedSupported(unalignedStoreUnsupportedTypes_, rep);
438 }
439
447 base::EnumSet<MachineRepresentation> unalignedLoadUnsupportedTypes,
448 base::EnumSet<MachineRepresentation> unalignedStoreUnsupportedTypes) {
449 return AlignmentRequirements(kSomeSupport, unalignedLoadUnsupportedTypes,
450 unalignedStoreUnsupportedTypes);
451 }
452
453 private:
456 base::EnumSet<MachineRepresentation> unalignedLoadUnsupportedTypes =
458 base::EnumSet<MachineRepresentation> unalignedStoreUnsupportedTypes =
460 : unalignedSupport_(unalignedAccessSupport),
461 unalignedLoadUnsupportedTypes_(unalignedLoadUnsupportedTypes),
462 unalignedStoreUnsupportedTypes_(unalignedStoreUnsupportedTypes) {}
463
465 MachineRepresentation rep) const {
466 // All accesses of bytes in memory are aligned.
467 DCHECK_NE(MachineRepresentation::kWord8, rep);
468 switch (unalignedSupport_) {
469 case kFullSupport:
470 return true;
471 case kNoSupport:
472 return false;
473 case kSomeSupport:
474 return !unsupported.contains(rep);
475 }
476 UNREACHABLE();
477 }
478
482 };
483
484 explicit MachineOperatorBuilder(
485 Zone* zone,
486 MachineRepresentation word = MachineType::PointerRepresentation(),
487 Flags supportedOperators = kNoFlags,
488 AlignmentRequirements alignmentRequirements =
489 AlignmentRequirements::FullUnalignedAccessSupport());
490
493
494 const Operator* Comment(const char* msg);
495 const Operator* AbortCSADcheck();
496 const Operator* DebugBreak();
497
503 const Operator* Word32Sar(ShiftKind kind);
504 const Operator* Word32Sar() { return Word32Sar(ShiftKind::kNormal); }
506 return Word32Sar(ShiftKind::kShiftOutZeros);
507 }
522
523 // Return true if the target's Word32 shift implementation is directly
524 // compatible with JavaScript's specification. Otherwise, we have to manually
525 // generate a mask with 0x1f on the amount ahead of generating the shift.
526 bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
527
528 // Return true if the target's implementation of float-to-int-conversions is a
529 // saturating conversion rounding towards 0. Otherwise, we have to manually
530 // generate the correct value if a saturating conversion is requested.
531 bool SatConversionIsSafe() const { return flags_ & kSatConversionIsSafe; }
532
533 // Return true if the target suppoerts performing a pair of loads/stores in
534 // a single operation.
536 return !v8_flags.enable_unconditional_write_barriers &&
537 (flags_ & kLoadStorePairs);
538 }
539
545 const Operator* Word64Sar(ShiftKind kind);
546 const Operator* Word64Sar() { return Word64Sar(ShiftKind::kNormal); }
548 return Word64Sar(ShiftKind::kShiftOutZeros);
549 }
550
551 // 64-bit rol, ror, clz and ctz operators have two versions: the non-suffixed
552 // ones are meant to be used in 64-bit systems and have no control input. The
553 // "Lowerable"-suffixed ones are meant to be temporary operators in 32-bit
554 // systems and will be lowered to 32-bit operators. They have a control input
555 // to enable the lowering.
564
566
573
590 bool Int32DivIsSafe() const { return flags_ & kInt32DivIsSafe; }
591 bool Uint32DivIsSafe() const { return flags_ & kUint32DivIsSafe; }
592
609
610 // This operator reinterprets the bits of a tagged pointer as a word.
611 const Operator* BitcastTaggedToWord();
612
613 // This operator reinterprets the bits of a tagged value as a word preserving
614 // non-pointer bits (all the bits that are not modified by GC):
615 // 1) smi tag
616 // 2) weak tag
617 // 3) smi payload if the tagged value is a smi.
618 // Note, that it's illegal to "look" at the pointer bits of non-smi values.
620
621 // This operator reinterprets the bits of a tagged Tagged<MaybeObject> pointer
622 // as word.
623 const Operator* BitcastMaybeObjectToWord();
624
625 // This operator reinterprets the bits of a word as tagged pointer.
626 const Operator* BitcastWordToTagged();
627
628 // This operator reinterprets the bits of a word as a Smi.
630
631 // JavaScript float64 to int32/uint32 truncation.
633
634 // These operators change the representation of numbers while preserving the
635 // value of the number. Narrowing operators assume the input is representable
636 // in the target type and are *not* defined for other inputs.
637 // Use narrowing change operators only when there is a static guarantee that
638 // the input value is representable in the target value.
639 //
640 // Some operators can have the behaviour on overflow change through specifying
641 // TruncateKind. The exact semantics are documented in the tests in
642 // test/cctest/compiler/test-run-machops.cc .
644 const Operator* ChangeFloat64ToInt32(); // narrowing
646 const Operator* ChangeFloat64ToUint32(); // narrowing
648 const Operator* TruncateFloat64ToInt64(TruncateKind kind);
650 const Operator* TruncateFloat32ToInt32(TruncateKind kind);
651 const Operator* TruncateFloat32ToUint32(TruncateKind kind);
664
665 // These operators truncate or round numbers, both changing the representation
666 // of the number and mapping multiple input values onto the same output value.
668 const OptionalOperator ChangeFloat16RawBitsToFloat64();
669 const OptionalOperator TruncateFloat64ToFloat16RawBits();
678
679 // These operators reinterpret the bits of a floating point number as an
680 // integer and vice versa.
685
686 // These operators sign-extend to Int32/Int64
692
693 // Floating point operators always operate with IEEE 754 round-to-nearest
694 // (single-precision).
700
701 // Floating point operators always operate with IEEE 754 round-to-nearest
702 // (double-precision).
709
710 // Floating point comparisons complying to IEEE 754 (single-precision).
714
715 // Floating point comparisons complying to IEEE 754 (double-precision).
719
720 // Floating point min/max complying to ECMAScript 6 (double-precision).
723 // Floating point min/max complying to WebAssembly (single-precision).
726
727 // Floating point abs complying to IEEE 754 (single-precision).
729
730 // Floating point abs complying to IEEE 754 (double-precision).
732
733 // Floating point rounding.
743
744 // Conditional selects. Input 1 is the condition, Input 2 is the result value
745 // if the condition is {true}, Input 3 is the result value if the condition is
746 // false.
751
752 // Floating point neg.
755
756 // Floating point trigonometric functions (double-precision).
770
771 // Floating point exponential functions (double-precision).
775
776 // Floating point logarithm (double-precision).
781
782 // Floating point cube root (double-precision).
784
785 // Floating point bit representation.
790
791 // Change signalling NaN to quiet NaN.
792 // Identity for any input that is not signalling NaN.
794
795 // SIMD operators also used outside of Wasm (e.g. swisstable).
799
800#if V8_ENABLE_WEBASSEMBLY
801 // SIMD operators.
802 const Operator* F64x2Splat();
803 const Operator* F64x2Abs();
804 const Operator* F64x2Neg();
805 const Operator* F64x2Sqrt();
806 const Operator* F64x2Add();
807 const Operator* F64x2Sub();
808 const Operator* F64x2Mul();
809 const Operator* F64x2Div();
810 const Operator* F64x2ExtractLane(int32_t);
811 const Operator* F64x2Min();
812 const Operator* F64x2Max();
813 const Operator* F64x2ReplaceLane(int32_t);
814 const Operator* F64x2Eq();
815 const Operator* F64x2Ne();
816 const Operator* F64x2Lt();
817 const Operator* F64x2Le();
818 const Operator* F64x2Qfma();
819 const Operator* F64x2Qfms();
820 const Operator* F64x2Pmin();
821 const Operator* F64x2Pmax();
822 const Operator* F64x2Ceil();
823 const Operator* F64x2Floor();
824 const Operator* F64x2Trunc();
825 const Operator* F64x2NearestInt();
826 const Operator* F64x2ConvertLowI32x4S();
827 const Operator* F64x2ConvertLowI32x4U();
828 const Operator* F64x2PromoteLowF32x4();
829
830 const Operator* F32x4Splat();
831 const Operator* F32x4ExtractLane(int32_t);
832 const Operator* F32x4ReplaceLane(int32_t);
833 const Operator* F32x4SConvertI32x4();
834 const Operator* F32x4UConvertI32x4();
835 const Operator* F32x4Abs();
836 const Operator* F32x4Neg();
837 const Operator* F32x4Sqrt();
838 const Operator* F32x4Add();
839 const Operator* F32x4Sub();
840 const Operator* F32x4Mul();
841 const Operator* F32x4Div();
842 const Operator* F32x4Min();
843 const Operator* F32x4Max();
844 const Operator* F32x4Eq();
845 const Operator* F32x4Ne();
846 const Operator* F32x4Lt();
847 const Operator* F32x4Le();
848 const Operator* F32x4Qfma();
849 const Operator* F32x4Qfms();
850 const Operator* F32x4Pmin();
851 const Operator* F32x4Pmax();
852 const Operator* F32x4Ceil();
853 const Operator* F32x4Floor();
854 const Operator* F32x4Trunc();
855 const Operator* F32x4NearestInt();
856 const Operator* F32x4DemoteF64x2Zero();
857
858 const Operator* F16x8Splat();
859 const Operator* F16x8ExtractLane(int32_t);
860 const Operator* F16x8ReplaceLane(int32_t);
861 const Operator* F16x8Abs();
862 const Operator* F16x8Neg();
863 const Operator* F16x8Sqrt();
864 const Operator* F16x8Ceil();
865 const Operator* F16x8Floor();
866 const Operator* F16x8Trunc();
867 const Operator* F16x8NearestInt();
868 const Operator* F16x8Add();
869 const Operator* F16x8Sub();
870 const Operator* F16x8Mul();
871 const Operator* F16x8Div();
872 const Operator* F16x8Min();
873 const Operator* F16x8Max();
874 const Operator* F16x8Pmin();
875 const Operator* F16x8Pmax();
876 const Operator* F16x8Eq();
877 const Operator* F16x8Ne();
878 const Operator* F16x8Lt();
879 const Operator* F16x8Le();
880 const Operator* F16x8SConvertI16x8();
881 const Operator* F16x8UConvertI16x8();
882 const Operator* I16x8SConvertF16x8();
883 const Operator* I16x8UConvertF16x8();
884 const Operator* F32x4PromoteLowF16x8();
885 const Operator* F16x8DemoteF32x4Zero();
886 const Operator* F16x8DemoteF64x2Zero();
887 const Operator* F16x8Qfma();
888 const Operator* F16x8Qfms();
889
890 const Operator* I64x2Splat();
891 const Operator* I64x2SplatI32Pair();
892 const Operator* I64x2ExtractLane(int32_t);
893 const Operator* I64x2ReplaceLane(int32_t);
894 const Operator* I64x2ReplaceLaneI32Pair(int32_t);
895 const Operator* I64x2Abs();
896 const Operator* I64x2Neg();
897 const Operator* I64x2SConvertI32x4Low();
898 const Operator* I64x2SConvertI32x4High();
899 const Operator* I64x2UConvertI32x4Low();
900 const Operator* I64x2UConvertI32x4High();
901 const Operator* I64x2BitMask();
902 const Operator* I64x2Shl();
903 const Operator* I64x2ShrS();
904 const Operator* I64x2Add();
905 const Operator* I64x2Sub();
906 const Operator* I64x2Mul();
907 const Operator* I64x2Eq();
908 const Operator* I64x2Ne();
909 const Operator* I64x2GtS();
910 const Operator* I64x2GeS();
911 const Operator* I64x2ShrU();
912 const Operator* I64x2ExtMulLowI32x4S();
913 const Operator* I64x2ExtMulHighI32x4S();
914 const Operator* I64x2ExtMulLowI32x4U();
915 const Operator* I64x2ExtMulHighI32x4U();
916
917 const Operator* I32x4Splat();
918 const Operator* I32x4ExtractLane(int32_t);
919 const Operator* I32x4ReplaceLane(int32_t);
920 const Operator* I32x4SConvertF32x4();
921 const Operator* I32x4SConvertI16x8Low();
922 const Operator* I32x4SConvertI16x8High();
923 const Operator* I32x4Neg();
924 const Operator* I32x4Shl();
925 const Operator* I32x4ShrS();
926 const Operator* I32x4Add();
927 const Operator* I32x4Sub();
928 const Operator* I32x4Mul();
929 const Operator* I32x4MinS();
930 const Operator* I32x4MaxS();
931 const Operator* I32x4Eq();
932 const Operator* I32x4Ne();
933 const Operator* I32x4GtS();
934 const Operator* I32x4GeS();
935
936 const Operator* I32x4UConvertF32x4();
937 const Operator* I32x4UConvertI16x8Low();
938 const Operator* I32x4UConvertI16x8High();
939 const Operator* I32x4ShrU();
940 const Operator* I32x4MinU();
941 const Operator* I32x4MaxU();
942 const Operator* I32x4GtU();
943 const Operator* I32x4GeU();
944 const Operator* I32x4Abs();
945 const Operator* I32x4BitMask();
946 const Operator* I32x4DotI16x8S();
947 const Operator* I32x4ExtMulLowI16x8S();
948 const Operator* I32x4ExtMulHighI16x8S();
949 const Operator* I32x4ExtMulLowI16x8U();
950 const Operator* I32x4ExtMulHighI16x8U();
951 const Operator* I32x4ExtAddPairwiseI16x8S();
952 const Operator* I32x4ExtAddPairwiseI16x8U();
953 const Operator* I32x4TruncSatF64x2SZero();
954 const Operator* I32x4TruncSatF64x2UZero();
955
956 const Operator* I16x8Splat();
957 const Operator* I16x8ExtractLaneU(int32_t);
958 const Operator* I16x8ExtractLaneS(int32_t);
959 const Operator* I16x8ReplaceLane(int32_t);
960 const Operator* I16x8SConvertI8x16Low();
961 const Operator* I16x8SConvertI8x16High();
962 const Operator* I16x8Neg();
963 const Operator* I16x8Shl();
964 const Operator* I16x8ShrS();
965 const Operator* I16x8SConvertI32x4();
966 const Operator* I16x8Add();
967 const Operator* I16x8AddSatS();
968 const Operator* I16x8Sub();
969 const Operator* I16x8SubSatS();
970 const Operator* I16x8Mul();
971 const Operator* I16x8MinS();
972 const Operator* I16x8MaxS();
973 const Operator* I16x8Eq();
974 const Operator* I16x8Ne();
975 const Operator* I16x8GtS();
976 const Operator* I16x8GeS();
977
978 const Operator* I16x8UConvertI8x16Low();
979 const Operator* I16x8UConvertI8x16High();
980 const Operator* I16x8ShrU();
981 const Operator* I16x8UConvertI32x4();
982 const Operator* I16x8AddSatU();
983 const Operator* I16x8SubSatU();
984 const Operator* I16x8MinU();
985 const Operator* I16x8MaxU();
986 const Operator* I16x8GtU();
987 const Operator* I16x8GeU();
988 const Operator* I16x8RoundingAverageU();
989 const Operator* I16x8Q15MulRSatS();
990 const Operator* I16x8Abs();
991 const Operator* I16x8BitMask();
992 const Operator* I16x8ExtMulLowI8x16S();
993 const Operator* I16x8ExtMulHighI8x16S();
994 const Operator* I16x8ExtMulLowI8x16U();
995 const Operator* I16x8ExtMulHighI8x16U();
996 const Operator* I16x8ExtAddPairwiseI8x16S();
997 const Operator* I16x8ExtAddPairwiseI8x16U();
998
999 const Operator* I8x16ExtractLaneU(int32_t);
1000 const Operator* I8x16ExtractLaneS(int32_t);
1001 const Operator* I8x16ReplaceLane(int32_t);
1002 const Operator* I8x16Neg();
1003 const Operator* I8x16Shl();
1004 const Operator* I8x16ShrS();
1005 const Operator* I8x16SConvertI16x8();
1006 const Operator* I8x16Add();
1007 const Operator* I8x16AddSatS();
1008 const Operator* I8x16Sub();
1009 const Operator* I8x16SubSatS();
1010 const Operator* I8x16MinS();
1011 const Operator* I8x16MaxS();
1012 const Operator* I8x16Ne();
1013 const Operator* I8x16GtS();
1014 const Operator* I8x16GeS();
1015
1016 const Operator* I8x16ShrU();
1017 const Operator* I8x16UConvertI16x8();
1018 const Operator* I8x16AddSatU();
1019 const Operator* I8x16SubSatU();
1020 const Operator* I8x16MinU();
1021 const Operator* I8x16MaxU();
1022 const Operator* I8x16GtU();
1023 const Operator* I8x16GeU();
1024 const Operator* I8x16RoundingAverageU();
1025 const Operator* I8x16Popcnt();
1026 const Operator* I8x16Abs();
1027
1028 const Operator* S128Const(const uint8_t value[16]);
1029
1030 const Operator* S128Zero();
1031 const Operator* S128And();
1032 const Operator* S128Or();
1033 const Operator* S128Xor();
1034 const Operator* S128Not();
1035 const Operator* S128Select();
1036 const Operator* S128AndNot();
1037
1038 const Operator* I8x16Swizzle(bool relaxed = false);
1039 // Helper for turboshaft/recreate-schedule.cc.
1040 const Operator* I8x16RelaxedSwizzle() { return I8x16Swizzle(true); }
1041 const Operator* I8x16Shuffle(const uint8_t shuffle[16]);
1042
1043 const Operator* V128AnyTrue();
1044 const Operator* I64x2AllTrue();
1045 const Operator* I32x4AllTrue();
1046 const Operator* I16x8AllTrue();
1047 const Operator* I8x16AllTrue();
1048
1049 // Relaxed SIMD operators.
1050 const Operator* I8x16RelaxedLaneSelect();
1051 const Operator* I16x8RelaxedLaneSelect();
1052 const Operator* I32x4RelaxedLaneSelect();
1053 const Operator* I64x2RelaxedLaneSelect();
1054 const Operator* F32x4RelaxedMin();
1055 const Operator* F32x4RelaxedMax();
1056 const Operator* F64x2RelaxedMin();
1057 const Operator* F64x2RelaxedMax();
1058 const Operator* I32x4RelaxedTruncF32x4S();
1059 const Operator* I32x4RelaxedTruncF32x4U();
1060 const Operator* I32x4RelaxedTruncF64x2SZero();
1061 const Operator* I32x4RelaxedTruncF64x2UZero();
1062 const Operator* I16x8RelaxedQ15MulRS();
1063 const Operator* I16x8DotI8x16I7x16S();
1064 const Operator* I32x4DotI8x16I7x16AddS();
1065
1066 // SIMD256
1067 const Operator* F64x4Min();
1068 const Operator* F64x4Max();
1069 const Operator* F64x4Add();
1070 const Operator* F64x4Abs();
1071 const Operator* F64x4Neg();
1072 const Operator* F64x4Sqrt();
1073 const Operator* F32x8Abs();
1074 const Operator* F32x8Neg();
1075 const Operator* F32x8Sqrt();
1076 const Operator* F32x8Add();
1077 const Operator* I64x4Add();
1078 const Operator* I32x8Add();
1079 const Operator* I16x16Add();
1080 const Operator* I8x32Add();
1081 const Operator* F64x4Sub();
1082 const Operator* F32x8Sub();
1083 const Operator* I64x4Sub();
1084 const Operator* I32x8Sub();
1085 const Operator* I16x16Sub();
1086 const Operator* I8x32Sub();
1087 const Operator* F64x4Mul();
1088 const Operator* F32x8Mul();
1089 const Operator* I64x4Mul();
1090 const Operator* I32x8Mul();
1091 const Operator* I16x16Mul();
1092 const Operator* F64x4Div();
1093 const Operator* F32x8Div();
1094 const Operator* I16x16AddSatS();
1095 const Operator* I8x32AddSatS();
1096 const Operator* I16x16AddSatU();
1097 const Operator* I8x32AddSatU();
1098 const Operator* I16x16SubSatS();
1099 const Operator* I8x32SubSatS();
1100 const Operator* I16x16SubSatU();
1101 const Operator* I8x32SubSatU();
1102 const Operator* F32x8Min();
1103 const Operator* F32x8Max();
1104 const Operator* F32x8Pmin();
1105 const Operator* F32x8Pmax();
1106 const Operator* F32x8Eq();
1107 const Operator* F64x4Eq();
1108 const Operator* I64x4Eq();
1109 const Operator* I32x8Eq();
1110 const Operator* I16x16Eq();
1111 const Operator* I8x32Eq();
1112 const Operator* F32x8Ne();
1113 const Operator* F64x4Ne();
1114 const Operator* I64x4GtS();
1115 const Operator* I32x8GtS();
1116 const Operator* I16x16GtS();
1117 const Operator* I8x32GtS();
1118 const Operator* F64x4Lt();
1119 const Operator* F32x8Lt();
1120 const Operator* F64x4Le();
1121 const Operator* F32x8Le();
1122 const Operator* I32x8MinS();
1123 const Operator* I16x16MinS();
1124 const Operator* I8x32MinS();
1125 const Operator* I32x8MinU();
1126 const Operator* I16x16MinU();
1127 const Operator* I8x32MinU();
1128 const Operator* I32x8MaxS();
1129 const Operator* I16x16MaxS();
1130 const Operator* I8x32MaxS();
1131 const Operator* I32x8MaxU();
1132 const Operator* I16x16MaxU();
1133 const Operator* I8x32MaxU();
1134 const Operator* I64x4Ne();
1135 const Operator* I64x4GeS();
1136 const Operator* I32x8Ne();
1137 const Operator* I32x8GtU();
1138 const Operator* I32x8GeS();
1139 const Operator* I32x8GeU();
1140 const Operator* I16x16Ne();
1141 const Operator* I16x16GtU();
1142 const Operator* I16x16GeS();
1143 const Operator* I16x16GeU();
1144 const Operator* I8x32Ne();
1145 const Operator* I8x32GtU();
1146 const Operator* I8x32GeS();
1147 const Operator* I8x32GeU();
1148 const Operator* I32x8SConvertF32x8();
1149 const Operator* I32x8UConvertF32x8();
1150 const Operator* F64x4ConvertI32x4S();
1151 const Operator* F32x8SConvertI32x8();
1152 const Operator* F32x8UConvertI32x8();
1153 const Operator* F32x4DemoteF64x4();
1154 const Operator* I64x4SConvertI32x4();
1155 const Operator* I64x4UConvertI32x4();
1156 const Operator* I32x8SConvertI16x8();
1157 const Operator* I32x8UConvertI16x8();
1158 const Operator* I16x16SConvertI8x16();
1159 const Operator* I16x16UConvertI8x16();
1160 const Operator* I16x16SConvertI32x8();
1161 const Operator* I16x16UConvertI32x8();
1162 const Operator* I8x32SConvertI16x16();
1163 const Operator* I8x32UConvertI16x16();
1164 const Operator* I32x8Neg();
1165 const Operator* I32x8Abs();
1166 const Operator* I16x16Neg();
1167 const Operator* I16x16Abs();
1168 const Operator* I8x32Neg();
1169 const Operator* I8x32Abs();
1170 const Operator* I64x4Shl();
1171 const Operator* I64x4ShrU();
1172 const Operator* I32x8Shl();
1173 const Operator* I32x8ShrS();
1174 const Operator* I32x8ShrU();
1175 const Operator* I16x16Shl();
1176 const Operator* I16x16ShrS();
1177 const Operator* I16x16ShrU();
1178 const Operator* I32x8DotI16x16S();
1179 const Operator* I16x16RoundingAverageU();
1180 const Operator* I8x32RoundingAverageU();
1181 const Operator* I64x4ExtMulI32x4S();
1182 const Operator* I64x4ExtMulI32x4U();
1183 const Operator* I32x8ExtMulI16x8S();
1184 const Operator* I32x8ExtMulI16x8U();
1185 const Operator* I16x16ExtMulI8x16S();
1186 const Operator* I16x16ExtMulI8x16U();
1187 const Operator* I32x8ExtAddPairwiseI16x16S();
1188 const Operator* I32x8ExtAddPairwiseI16x16U();
1189 const Operator* I16x16ExtAddPairwiseI8x32S();
1190 const Operator* I16x16ExtAddPairwiseI8x32U();
1191 const Operator* ExtractF128(int32_t lane_index);
1192 const Operator* I64x4Splat();
1193 const Operator* I32x8Splat();
1194 const Operator* I16x16Splat();
1195 const Operator* I8x32Splat();
1196 const Operator* F64x4Pmin();
1197 const Operator* F64x4Pmax();
1198 const Operator* F64x4Splat();
1199 const Operator* F32x8Splat();
1200 const Operator* I8x32Shuffle(const uint8_t shuffle[32]);
1201
1202 const Operator* S256Const(const uint8_t value[32]);
1203 const Operator* S256Zero();
1204 const Operator* S256And();
1205 const Operator* S256Or();
1206 const Operator* S256Xor();
1207 const Operator* S256Not();
1208 const Operator* S256Select();
1209 const Operator* S256AndNot();
1210 // 256-bit relaxed SIMD
1211 const Operator* F32x8Qfma();
1212 const Operator* F32x8Qfms();
1213 const Operator* F64x4Qfma();
1214 const Operator* F64x4Qfms();
1215 const Operator* I64x4RelaxedLaneSelect();
1216 const Operator* I32x8RelaxedLaneSelect();
1217 const Operator* I16x16RelaxedLaneSelect();
1218 const Operator* I8x32RelaxedLaneSelect();
1219 const Operator* I32x8DotI8x32I7x32AddS();
1220 const Operator* I16x16DotI8x32I7x32S();
1221 const Operator* F32x8RelaxedMin();
1222 const Operator* F32x8RelaxedMax();
1223 const Operator* F64x4RelaxedMin();
1224 const Operator* F64x4RelaxedMax();
1225 const Operator* I32x8RelaxedTruncF32x8S();
1226 const Operator* I32x8RelaxedTruncF32x8U();
1227
1228 const Operator* LoadTransform(MemoryAccessKind kind,
1229 LoadTransformation transform);
1230
1231 // SIMD load: replace a specified lane with [base + index].
1232 const Operator* LoadLane(MemoryAccessKind kind, LoadRepresentation rep,
1233 uint8_t laneidx);
1234
1235 // SIMD store: store a specified lane of value into [base + index].
1237 uint8_t laneidx);
1238
1239#endif // V8_ENABLE_WEBASSEMBLY
1240
1241 const Operator* TraceInstruction(uint32_t markid);
1242
1243 // load [base + index]
1244 const Operator* Load(LoadRepresentation rep);
1245 const Operator* LoadImmutable(LoadRepresentation rep);
1246 const Operator* ProtectedLoad(LoadRepresentation rep);
1247 const Operator* LoadTrapOnNull(LoadRepresentation rep);
1248
1249 // store [base + index], value
1250 const Operator* Store(StoreRepresentation rep);
1251 std::optional<const Operator*> TryStorePair(StoreRepresentation rep1,
1252 StoreRepresentation rep2);
1253 const Operator* StoreIndirectPointer(WriteBarrierKind write_barrier_kind);
1254 const Operator* ProtectedStore(MachineRepresentation rep);
1255 const Operator* StoreTrapOnNull(StoreRepresentation rep);
1256
1257 // unaligned load [base + index]
1258 const Operator* UnalignedLoad(LoadRepresentation rep);
1259
1260 // unaligned store [base + index], value
1261 const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
1262
1263 const Operator* StackSlot(int size, int alignment = 0,
1264 bool is_tagged = false);
1265 const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
1266
1267 // Note: Only use this operator to:
1268 // - Load from a constant offset.
1269 // - Store to a constant offset with {kNoWriteBarrier}.
1270 // These are the only usages supported by the instruction selector.
1272
1273 // Access to the machine stack.
1276#if V8_ENABLE_WEBASSEMBLY
1277 const Operator* LoadStackPointer();
1278 const Operator* SetStackPointer();
1279#endif
1280
1281 // Compares: stack_pointer [- offset] > value. The offset is optionally
1282 // applied for kFunctionEntry stack checks.
1283 const Operator* StackPointerGreaterThan(StackCheckKind kind);
1284
1285 // Loads the offset that should be applied to the current stack
1286 // pointer before a stack check. Used as input to the
1287 // Runtime::kStackGuardWithGap call.
1289
1290 const Operator* MemoryBarrier(AtomicMemoryOrder order);
1291
1292 // atomic-load [base + index]
1293 const Operator* Word32AtomicLoad(AtomicLoadParameters params);
1294 // atomic-load [base + index]
1295 const Operator* Word64AtomicLoad(AtomicLoadParameters params);
1296 // atomic-store [base + index], value
1297 const Operator* Word32AtomicStore(AtomicStoreParameters params);
1298 // atomic-store [base + index], value
1299 const Operator* Word64AtomicStore(AtomicStoreParameters params);
1300 // atomic-exchange [base + index], value
1301 const Operator* Word32AtomicExchange(AtomicOpParameters params);
1302 // atomic-exchange [base + index], value
1303 const Operator* Word64AtomicExchange(AtomicOpParameters params);
1304 // atomic-compare-exchange [base + index], old_value, new_value
1305 const Operator* Word32AtomicCompareExchange(AtomicOpParameters params);
1306 // atomic-compare-exchange [base + index], old_value, new_value
1307 const Operator* Word64AtomicCompareExchange(AtomicOpParameters params);
1308 // atomic-add [base + index], value
1309 const Operator* Word32AtomicAdd(AtomicOpParameters params);
1310 // atomic-sub [base + index], value
1311 const Operator* Word32AtomicSub(AtomicOpParameters params);
1312 // atomic-and [base + index], value
1313 const Operator* Word32AtomicAnd(AtomicOpParameters params);
1314 // atomic-or [base + index], value
1315 const Operator* Word32AtomicOr(AtomicOpParameters params);
1316 // atomic-xor [base + index], value
1317 const Operator* Word32AtomicXor(AtomicOpParameters params);
1318 // atomic-add [base + index], value
1319 const Operator* Word64AtomicAdd(AtomicOpParameters params);
1320 // atomic-sub [base + index], value
1321 const Operator* Word64AtomicSub(AtomicOpParameters params);
1322 // atomic-and [base + index], value
1323 const Operator* Word64AtomicAnd(AtomicOpParameters params);
1324 // atomic-or [base + index], value
1325 const Operator* Word64AtomicOr(AtomicOpParameters params);
1326 // atomic-xor [base + index], value
1327 const Operator* Word64AtomicXor(AtomicOpParameters params);
1328 // atomic-pair-load [base + index]
1329 const Operator* Word32AtomicPairLoad(AtomicMemoryOrder order);
1330 // atomic-pair-sub [base + index], value_high, value-low
1331 const Operator* Word32AtomicPairStore(AtomicMemoryOrder order);
1332 // atomic-pair-add [base + index], value_high, value_low
1333 const Operator* Word32AtomicPairAdd();
1334 // atomic-pair-sub [base + index], value_high, value-low
1335 const Operator* Word32AtomicPairSub();
1336 // atomic-pair-and [base + index], value_high, value_low
1337 const Operator* Word32AtomicPairAnd();
1338 // atomic-pair-or [base + index], value_high, value_low
1339 const Operator* Word32AtomicPairOr();
1340 // atomic-pair-xor [base + index], value_high, value_low
1341 const Operator* Word32AtomicPairXor();
1342 // atomic-pair-exchange [base + index], value_high, value_low
1343 const Operator* Word32AtomicPairExchange();
1344 // atomic-pair-compare-exchange [base + index], old_value_high, old_value_low,
1345 // new_value_high, new_value_low
1346 const Operator* Word32AtomicPairCompareExchange();
1347
1348 // Target machine word-size assumed by this builder.
1349 bool Is32() const { return word() == MachineRepresentation::kWord32; }
1350 bool Is64() const { return word() == MachineRepresentation::kWord64; }
1351 MachineRepresentation word() const { return word_; }
1352
1354 return alignment_requirements_.IsUnalignedLoadSupported(rep);
1355 }
1356
1358 return alignment_requirements_.IsUnalignedStoreSupported(rep);
1359 }
1360
1361// Pseudo operators that translate to 32/64-bit operators depending on the
1362// word-size of the target machine assumed by this builder.
1363#define PSEUDO_OP_LIST(V) \
1364 V(Word, And) \
1365 V(Word, Or) \
1366 V(Word, Xor) \
1367 V(Word, Shl) \
1368 V(Word, Shr) \
1369 V(Word, Ror) \
1370 V(Word, Clz) \
1371 V(Word, Equal) \
1372 V(Int, Add) \
1373 V(Int, Sub) \
1374 V(Int, Mul) \
1375 V(Int, Div) \
1376 V(Int, Mod) \
1377 V(Int, LessThan) \
1378 V(Int, LessThanOrEqual) \
1379 V(Uint, Div) \
1380 V(Uint, LessThan) \
1381 V(Uint, Mod)
1382#define PSEUDO_OP(Prefix, Suffix) \
1383 const Operator* Prefix##Suffix() { \
1384 return Is32() ? Prefix##32##Suffix() : Prefix##64##Suffix(); \
1385 }
1387#undef PSEUDO_OP
1388#undef PSEUDO_OP_LIST
1389
1390 const Operator* WordSar(ShiftKind kind = ShiftKind::kNormal) {
1391 return Is32() ? Word32Sar(kind) : Word64Sar(kind);
1392 }
1394 return WordSar(ShiftKind::kShiftOutZeros);
1395 }
1396
1398 return COMPRESS_POINTERS_BOOL ? Word32Equal() : WordEqual();
1399 }
1400
1401 private:
1407};
1408
1409
1411
1412} // namespace compiler
1413} // namespace internal
1414} // namespace v8
1415
1416#endif // V8_COMPILER_MACHINE_OPERATOR_H_
#define DEFINE_OPERATORS_FOR_FLAGS(Type)
Definition flags.h:100
Builtins::Kind kind
Definition builtins.cc:40
constexpr bool contains(E element) const
Definition enum-set.h:35
AtomicLoadParameters(LoadRepresentation representation, AtomicMemoryOrder order, MemoryAccessKind kind=MemoryAccessKind::kNormal)
AtomicOpParameters(MachineType type, MemoryAccessKind kind=MemoryAccessKind::kNormal)
AtomicStoreParameters(MachineRepresentation representation, WriteBarrierKind write_barrier_kind, AtomicMemoryOrder order, MemoryAccessKind kind=MemoryAccessKind::kNormal)
const base::EnumSet< MachineRepresentation > unalignedStoreUnsupportedTypes_
const AlignmentRequirements::UnalignedAccessSupport unalignedSupport_
const base::EnumSet< MachineRepresentation > unalignedLoadUnsupportedTypes_
static AlignmentRequirements SomeUnalignedAccessUnsupported(base::EnumSet< MachineRepresentation > unalignedLoadUnsupportedTypes, base::EnumSet< MachineRepresentation > unalignedStoreUnsupportedTypes)
bool IsUnalignedSupported(base::EnumSet< MachineRepresentation > unsupported, MachineRepresentation rep) const
AlignmentRequirements(AlignmentRequirements::UnalignedAccessSupport unalignedAccessSupport, base::EnumSet< MachineRepresentation > unalignedLoadUnsupportedTypes=base::EnumSet< MachineRepresentation >(), base::EnumSet< MachineRepresentation > unalignedStoreUnsupportedTypes=base::EnumSet< MachineRepresentation >())
bool UnalignedLoadSupported(MachineRepresentation rep)
MachineOperatorBuilder & operator=(const MachineOperatorBuilder &)=delete
MachineOperatorBuilder(const MachineOperatorBuilder &)=delete
bool UnalignedStoreSupported(MachineRepresentation rep)
const Operator * WordSar(ShiftKind kind=ShiftKind::kNormal)
MachineOperatorGlobalCache const & cache_
OptionalOperator(bool supported, const Operator *op)
const std::array< uint8_t, simd_size > & immediate() const
SimdImmediateParameter(const uint8_t immediate[simd_size])
std::array< uint8_t, simd_size > immediate_
StackSlotRepresentation(int size, int alignment, bool is_tagged)
StoreRepresentation(MachineRepresentation representation, WriteBarrierKind write_barrier_kind)
MachineRepresentation representation() const
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
JSRegExp::Flags flags_
double second
int x
#define PSEUDO_OP(Prefix, Suffix)
#define PSEUDO_OP_LIST(V)
STL namespace.
V8_INLINE size_t hash_range(Iterator first, Iterator last)
Definition hashing.h:308
AtomicOpParameters AtomicOpParametersOf(Operator const *op)
V8_EXPORT_PRIVATE StoreLaneParameters const & StoreLaneParametersOf(Operator const *) V8_WARN_UNUSED_RESULT
StackCheckKind StackCheckKindOf(Operator const *op)
V8_EXPORT_PRIVATE LoadTransformParameters const & LoadTransformParametersOf(Operator const *) V8_WARN_UNUSED_RESULT
AtomicStoreParameters const & AtomicStoreParametersOf(Operator const *op)
V8_EXPORT_PRIVATE S128ImmediateParameter const & S128ImmediateParameterOf(Operator const *op) V8_WARN_UNUSED_RESULT
StoreRepresentation const & StoreRepresentationOf(Operator const *op)
V8_EXPORT_PRIVATE S256ImmediateParameter const & S256ImmediateParameterOf(Operator const *op) V8_WARN_UNUSED_RESULT
AtomicLoadParameters AtomicLoadParametersOf(Operator const *op)
StackSlotRepresentation const & StackSlotRepresentationOf(Operator const *op)
MachineType AtomicOpType(InstructionSelectorT *selector, OpIndex node)
StorePairRepresentation const & StorePairRepresentationOf(Operator const *op)
size_t hash_value(const BranchParameters &p)
LoadRepresentation LoadRepresentationOf(Operator const *op)
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs)
bool operator==(const BranchParameters &lhs, const BranchParameters &rhs)
V8_EXPORT_PRIVATE LoadLaneParameters const & LoadLaneParametersOf(Operator const *) V8_WARN_UNUSED_RESULT
UnalignedStoreRepresentation const & UnalignedStoreRepresentationOf(Operator const *op)
std::ostream & operator<<(std::ostream &os, AccessMode access_mode)
ShiftKind ShiftKindOf(Operator const *op)
constexpr int kSimd128Size
Definition globals.h:706
constexpr int kSimd256Size
Definition globals.h:709
V8_EXPORT_PRIVATE FlagValues v8_flags
i::Address Load(i::Address address)
Definition unwinder.cc:19
#define NON_EXPORTED_BASE(code)
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define V8_EXPORT_PRIVATE
Definition macros.h:460
friend std::ostream & operator<<(std::ostream &out, const StorePairRepresentation rep)
StorePairRepresentation(StoreRepresentation first, StoreRepresentation second)
#define V8_WARN_UNUSED_RESULT
Definition v8config.h:671