v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction.h
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_BACKEND_INSTRUCTION_H_
6#define V8_COMPILER_BACKEND_INSTRUCTION_H_
7
8#include <iosfwd>
9#include <map>
10#include <optional>
11
17#include "src/common/globals.h"
21#include "src/compiler/frame.h"
24
25namespace v8 {
26namespace internal {
27
28class RegisterConfiguration;
29
30namespace compiler {
31
32class Schedule;
33class SourcePositionTable;
34
35namespace turboshaft {
36class Graph;
37}
38
39#if defined(V8_CC_MSVC) && defined(V8_TARGET_ARCH_IA32)
40// MSVC on x86 has issues with ALIGNAS(8) on InstructionOperand, but does
41// align the object to 8 bytes anyway (covered by a static assert below).
42// See crbug.com/v8/10796
43#define INSTRUCTION_OPERAND_ALIGN
44#else
45#define INSTRUCTION_OPERAND_ALIGN ALIGNAS(8)
46#endif
47
49 public:
50 static const int kInvalidVirtualRegister = -1;
51
52 enum Kind {
58 // Location operand kinds.
60 FIRST_LOCATION_OPERAND_KIND = ALLOCATED
61 // Location operand kinds must be last.
62 };
63
65
66 Kind kind() const { return KindField::decode(value_); }
67
68#define INSTRUCTION_OPERAND_PREDICATE(name, type) \
69 bool Is##name() const { return kind() == type; }
70 INSTRUCTION_OPERAND_PREDICATE(Invalid, INVALID)
71 // UnallocatedOperands are place-holder operands created before register
72 // allocation. They later are assigned registers and become AllocatedOperands.
73 INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
74 // Constant operands participate in register allocation. They are allocated to
75 // registers but have a special "spilling" behavior. When a ConstantOperand
76 // value must be rematerialized, it is loaded from an immediate constant
77 // rather from an unspilled slot.
78 INSTRUCTION_OPERAND_PREDICATE(Constant, CONSTANT)
79 // ImmediateOperands do not participate in register allocation and are only
80 // embedded directly in instructions, e.g. small integers and on some
81 // platforms Objects.
82 INSTRUCTION_OPERAND_PREDICATE(Immediate, IMMEDIATE)
83 // PendingOperands are pending allocation during register allocation and
84 // shouldn't be seen elsewhere. They chain together multiple operators that
85 // will be replaced together with the same value when finalized.
86 INSTRUCTION_OPERAND_PREDICATE(Pending, PENDING)
87 // AllocatedOperands are registers or stack slots that are assigned by the
88 // register allocator and are always associated with a virtual register.
89 INSTRUCTION_OPERAND_PREDICATE(Allocated, ALLOCATED)
90#undef INSTRUCTION_OPERAND_PREDICATE
91
92 inline bool IsAnyLocationOperand() const;
93 inline bool IsLocationOperand() const;
94 inline bool IsFPLocationOperand() const;
95 inline bool IsAnyRegister() const;
96 inline bool IsRegister() const;
97 inline bool IsFPRegister() const;
98 inline bool IsFloatRegister() const;
99 inline bool IsDoubleRegister() const;
100 inline bool IsSimd128Register() const;
101 inline bool IsSimd256Register() const;
102 inline bool IsAnyStackSlot() const;
103 inline bool IsStackSlot() const;
104 inline bool IsFPStackSlot() const;
105 inline bool IsFloatStackSlot() const;
106 inline bool IsDoubleStackSlot() const;
107 inline bool IsSimd128StackSlot() const;
108 inline bool IsSimd256StackSlot() const;
109
110 template <typename SubKindOperand>
111 static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
112 return zone->New<SubKindOperand>(op);
113 }
114
116 const InstructionOperand* src) {
117 *dest = *src;
118 }
119
120 bool Equals(const InstructionOperand& that) const {
121 if (IsPending()) {
122 // Pending operands are only equal if they are the same operand.
123 return this == &that;
124 }
125 return this->value_ == that.value_;
126 }
127
128 bool Compare(const InstructionOperand& that) const {
129 return this->value_ < that.value_;
130 }
131
132 bool EqualsCanonicalized(const InstructionOperand& that) const {
133 if (IsPending()) {
134 // Pending operands can't be canonicalized, so just compare for equality.
135 return Equals(that);
136 }
137 return this->GetCanonicalizedValue() == that.GetCanonicalizedValue();
138 }
139
140 bool CompareCanonicalized(const InstructionOperand& that) const {
141 DCHECK(!IsPending());
142 return this->GetCanonicalizedValue() < that.GetCanonicalizedValue();
143 }
144
145 bool InterferesWith(const InstructionOperand& other) const;
146
147 // APIs to aid debugging. For general-stream APIs, use operator<<.
148 void Print() const;
149
150 bool operator==(const InstructionOperand& other) const {
151 return Equals(other);
152 }
153 bool operator!=(const InstructionOperand& other) const {
154 return !Equals(other);
155 }
156
157 protected:
159
160 inline uint64_t GetCanonicalizedValue() const;
161
163
164 uint64_t value_;
165};
166
168
169std::ostream& operator<<(std::ostream&, const InstructionOperand&);
170
171#define INSTRUCTION_OPERAND_CASTS(OperandType, OperandKind) \
172 \
173 static OperandType* cast(InstructionOperand* op) { \
174 DCHECK_EQ(OperandKind, op->kind()); \
175 return static_cast<OperandType*>(op); \
176 } \
177 \
178 static const OperandType* cast(const InstructionOperand* op) { \
179 DCHECK_EQ(OperandKind, op->kind()); \
180 return static_cast<const OperandType*>(op); \
181 } \
182 \
183 static OperandType cast(const InstructionOperand& op) { \
184 DCHECK_EQ(OperandKind, op.kind()); \
185 return *static_cast<const OperandType*>(&op); \
186 }
187
189 public:
191
202
203 // Lifetime of operand inside the instruction.
204 enum Lifetime {
205 // USED_AT_START operand is guaranteed to be live only at instruction start.
206 // The register allocator is free to assign the same register to some other
207 // operand used inside instruction (i.e. temporary or output).
209
210 // USED_AT_END operand is treated as live until the end of instruction.
211 // This means that register allocator will not reuse its register for any
212 // other operand inside instruction.
214 };
215
222
230
233 DCHECK(policy == FIXED_SLOT);
235 value_ |= static_cast<uint64_t>(static_cast<int64_t>(index))
237 DCHECK(this->fixed_slot_index() == index);
238 }
239
248
256
262
268
269 // Predicates for the operand policy.
278 bool HasFixedPolicy() const {
279 return basic_policy() == FIXED_SLOT ||
282 }
283 bool HasRegisterPolicy() const {
284 return basic_policy() == EXTENDED_POLICY &&
286 }
287 bool HasSlotPolicy() const {
288 return basic_policy() == EXTENDED_POLICY &&
290 }
291 bool HasSameAsInputPolicy() const {
292 return basic_policy() == EXTENDED_POLICY &&
294 }
295 bool HasFixedSlotPolicy() const { return basic_policy() == FIXED_SLOT; }
297 return basic_policy() == EXTENDED_POLICY &&
299 }
313
314 // [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
316
317 // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
322
323 int input_index() const {
326 }
327
328 // [fixed_slot_index]: Only for FIXED_SLOT.
329 int fixed_slot_index() const {
331 return static_cast<int>(static_cast<int64_t>(value_) >>
333 }
334
335 // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_FP_REGISTER.
340
341 // [virtual_register]: The virtual register ID for this operand.
342 int32_t virtual_register() const {
343 return static_cast<int32_t>(VirtualRegisterField::decode(value_));
344 }
345
346 // [lifetime]: Only for non-FIXED_SLOT.
347 bool IsUsedAtStart() const {
348 return basic_policy() == EXTENDED_POLICY &&
350 }
351
353
354 // The encoding used for UnallocatedOperand operands depends on the policy
355 // that is
356 // stored within the operand. The FIXED_SLOT policy uses a compact encoding
357 // because it accommodates a larger pay-load.
358 //
359 // For FIXED_SLOT policy:
360 // +------------------------------------------------+
361 // | slot_index | 0 | virtual_register | 001 |
362 // +------------------------------------------------+
363 //
364 // For all other (extended) policies:
365 // +-----------------------------------------------------+
366 // | reg_index | L | PPP | 1 | virtual_register | 001 |
367 // +-----------------------------------------------------+
368 // L ... Lifetime
369 // P ... Policy
370 //
371 // The slot index is a signed value which requires us to decode it manually
372 // instead of using the base::BitField utility class.
373
374 using VirtualRegisterField = KindField::Next<uint32_t, 32>;
375
376 // base::BitFields for all unallocated operands.
378
379 // BitFields specific to BasicPolicy::FIXED_SLOT.
380 using FixedSlotIndexField = BasicPolicyField::Next<int, 28>;
381 static_assert(FixedSlotIndexField::kLastUsedBit == 63);
382
383 // BitFields specific to BasicPolicy::EXTENDED_POLICY.
390
391 private:
397};
398
400 public:
406
407 int32_t virtual_register() const {
408 return static_cast<int32_t>(VirtualRegisterField::decode(value_));
409 }
410
414
416
417 using VirtualRegisterField = KindField::Next<uint32_t, 32>;
418};
419
421 public:
422 enum ImmediateType { INLINE_INT32, INLINE_INT64, INDEXED_RPO, INDEXED_IMM };
423
424 explicit ImmediateOperand(ImmediateType type, int32_t value)
426 value_ |= TypeField::encode(type);
427 value_ |= static_cast<uint64_t>(static_cast<int64_t>(value))
428 << ValueField::kShift;
429 }
430
431 ImmediateType type() const { return TypeField::decode(value_); }
432
433 int32_t inline_int32_value() const {
434 DCHECK_EQ(INLINE_INT32, type());
435 return static_cast<int64_t>(value_) >> ValueField::kShift;
436 }
437
438 int64_t inline_int64_value() const {
439 DCHECK_EQ(INLINE_INT64, type());
440 return static_cast<int64_t>(value_) >> ValueField::kShift;
441 }
442
443 int32_t indexed_value() const {
444 DCHECK(type() == INDEXED_IMM || type() == INDEXED_RPO);
445 return static_cast<int64_t>(value_) >> ValueField::kShift;
446 }
447
448 static ImmediateOperand* New(Zone* zone, ImmediateType type, int32_t value) {
449 return InstructionOperand::New(zone, ImmediateOperand(type, value));
450 }
451
453
455 static_assert(TypeField::kLastUsedBit < 32);
456 using ValueField = base::BitField64<int32_t, 32, 32>;
457};
458
460 public:
462 explicit PendingOperand(PendingOperand* next_operand) : PendingOperand() {
463 set_next(next_operand);
464 }
465
467 DCHECK_NULL(this->next());
468 uintptr_t shifted_value =
469 reinterpret_cast<uintptr_t>(next) >> kPointerShift;
470 DCHECK_EQ(reinterpret_cast<uintptr_t>(next),
471 shifted_value << kPointerShift);
472 value_ |= NextOperandField::encode(static_cast<uint64_t>(shifted_value));
473 }
474
476 uintptr_t shifted_value =
477 static_cast<uint64_t>(NextOperandField::decode(value_));
478 return reinterpret_cast<PendingOperand*>(shifted_value << kPointerShift);
479 }
480
481 static PendingOperand* New(Zone* zone, PendingOperand* previous_operand) {
482 return InstructionOperand::New(zone, PendingOperand(previous_operand));
483 }
484
486
487 private:
488 // Operands are uint64_t values and so are aligned to 8 byte boundaries,
489 // therefore we can shift off the bottom three zeros without losing data.
490 static const uint64_t kPointerShift = 3;
491 static_assert(alignof(InstructionOperand) >= (1 << kPointerShift));
492
494 static_assert(NextOperandField::kLastUsedBit == 63);
495};
496
498 public:
499 enum LocationKind { REGISTER, STACK_SLOT };
500
502 LocationOperand::LocationKind location_kind,
503 MachineRepresentation rep, int index)
504 : InstructionOperand(operand_kind) {
505 DCHECK_IMPLIES(location_kind == REGISTER, index >= 0);
506 DCHECK(IsSupportedRepresentation(rep));
507 value_ |= LocationKindField::encode(location_kind);
508 value_ |= RepresentationField::encode(rep);
509 value_ |= static_cast<uint64_t>(static_cast<int64_t>(index))
510 << IndexField::kShift;
511 }
512
513 int index() const {
514 DCHECK(IsStackSlot() || IsFPStackSlot());
515 return static_cast<int64_t>(value_) >> IndexField::kShift;
516 }
517
518 int register_code() const {
519 DCHECK(IsRegister() || IsFPRegister());
520 return static_cast<int64_t>(value_) >> IndexField::kShift;
521 }
522
524 DCHECK(IsRegister());
525 return Register::from_code(register_code());
526 }
527
529 DCHECK(IsFloatRegister());
530 return FloatRegister::from_code(register_code());
531 }
532
534 // On platforms where FloatRegister, DoubleRegister, and Simd128Register
535 // are all the same type, it's convenient to treat everything as a
536 // DoubleRegister, so be lax about type checking here.
537 DCHECK(IsFPRegister());
538 return DoubleRegister::from_code(register_code());
539 }
540
542 DCHECK(IsSimd128Register());
543 return Simd128Register::from_code(register_code());
544 }
545
546#if defined(V8_TARGET_ARCH_X64)
547 // On x64, Simd256 and Simd128 share the identical register.
548 Simd128Register GetSimd256RegisterAsSimd128() const {
549 DCHECK(IsSimd256Register());
550 return Simd128Register::from_code(register_code());
551 }
552
553 Simd256Register GetSimd256Register() const {
554 DCHECK(IsSimd256Register());
555 return Simd256Register::from_code(register_code());
556 }
557#endif
558
560 return LocationKindField::decode(value_);
561 }
562
564 return RepresentationField::decode(value_);
565 }
566
568 switch (rep) {
569 case MachineRepresentation::kWord32:
570 case MachineRepresentation::kWord64:
571 case MachineRepresentation::kFloat32:
572 case MachineRepresentation::kFloat64:
573 case MachineRepresentation::kSimd128:
574 case MachineRepresentation::kSimd256:
575 case MachineRepresentation::kTaggedSigned:
576 case MachineRepresentation::kTaggedPointer:
577 case MachineRepresentation::kTagged:
578 case MachineRepresentation::kCompressedPointer:
579 case MachineRepresentation::kCompressed:
580 case MachineRepresentation::kProtectedPointer:
581 case MachineRepresentation::kSandboxedPointer:
582 return true;
583 case MachineRepresentation::kBit:
584 case MachineRepresentation::kWord8:
585 case MachineRepresentation::kWord16:
586 case MachineRepresentation::kFloat16:
587 case MachineRepresentation::kNone:
588 return false;
589 case MachineRepresentation::kMapWord:
590 case MachineRepresentation::kIndirectPointer:
591 case MachineRepresentation::kFloat16RawBits:
592 UNREACHABLE();
593 }
594 }
595
596 // Return true if the locations can be moved to one another.
597 bool IsCompatible(LocationOperand* op);
598
601 return static_cast<LocationOperand*>(op);
602 }
603
604 static const LocationOperand* cast(const InstructionOperand* op) {
606 return static_cast<const LocationOperand*>(op);
607 }
608
611 return *static_cast<const LocationOperand*>(&op);
612 }
613
616 static_assert(RepresentationField::kLastUsedBit < 32);
618};
619
621 public:
623 : LocationOperand(ALLOCATED, kind, rep, index) {}
624
626 MachineRepresentation rep, int index) {
627 return InstructionOperand::New(zone, AllocatedOperand(kind, rep, index));
628 }
629
631};
632
633#undef INSTRUCTION_OPERAND_CASTS
634
638
640 return IsAnyLocationOperand() &&
641 !IsFloatingPoint(LocationOperand::cast(this)->representation());
642}
643
645 return IsAnyLocationOperand() &&
646 IsFloatingPoint(LocationOperand::cast(this)->representation());
647}
648
654
656 return IsAnyRegister() &&
657 !IsFloatingPoint(LocationOperand::cast(this)->representation());
658}
659
661 return IsAnyRegister() &&
662 IsFloatingPoint(LocationOperand::cast(this)->representation());
663}
664
669
674
679
684
690
692 return IsAnyStackSlot() &&
693 !IsFloatingPoint(LocationOperand::cast(this)->representation());
694}
695
697 return IsAnyStackSlot() &&
698 IsFloatingPoint(LocationOperand::cast(this)->representation());
699}
700
708
716
724
732
734 if (IsAnyLocationOperand()) {
736 if (IsFPRegister()) {
738 // We treat all FP register operands the same for simple aliasing.
741 if (IsSimd128Register()) {
743 } else {
745 }
746 } else {
747 // We need to distinguish FP register operands of different reps when
748 // aliasing is AliasingKind::kCombine (e.g. ARM).
750 canonical = LocationOperand::cast(this)->representation();
751 }
752 }
756 }
757 return this->value_;
758}
759
760// Required for maps that don't care about machine type.
763 const InstructionOperand& b) const {
764 return a.CompareCanonicalized(b);
765 }
766};
767
769 : public NON_EXPORTED_BASE(ZoneObject) {
770 public:
773 : source_(source), destination_(destination) {
774 DCHECK(!source.IsInvalid() && !destination.IsInvalid());
775 CheckPointerCompressionConsistency();
776 }
777
778 MoveOperands(const MoveOperands&) = delete;
780
782#if DEBUG && V8_COMPRESS_POINTERS
783 if (!source_.IsLocationOperand()) return;
784 if (!destination_.IsLocationOperand()) return;
785 using MR = MachineRepresentation;
786 MR dest_rep = LocationOperand::cast(&destination_)->representation();
787 if (dest_rep == MR::kTagged || dest_rep == MR::kTaggedPointer) {
788 MR src_rep = LocationOperand::cast(&source_)->representation();
789 DCHECK_NE(src_rep, MR::kCompressedPointer);
790 // TODO(dmercadier): it would be nice to insert a DEBUG runtime check here
791 // to make sure that if `src_rep` is kCompressed, then the value is a Smi.
792 }
793#endif
794 }
795
796 const InstructionOperand& source() const { return source_; }
798 void set_source(const InstructionOperand& operand) {
799 source_ = operand;
800 CheckPointerCompressionConsistency();
801 }
802
803 const InstructionOperand& destination() const { return destination_; }
804 InstructionOperand& destination() { return destination_; }
805 void set_destination(const InstructionOperand& operand) {
806 destination_ = operand;
807 CheckPointerCompressionConsistency();
808 }
809
810 // The gap resolver marks moves as "in-progress" by clearing the
811 // destination (but not the source).
812 bool IsPending() const {
813 return destination_.IsInvalid() && !source_.IsInvalid();
814 }
815 void SetPending() { destination_ = InstructionOperand(); }
816
817 // A move is redundant if it's been eliminated or if its source and
818 // destination are the same.
819 bool IsRedundant() const {
820 DCHECK_IMPLIES(!destination_.IsInvalid(), !destination_.IsConstant());
821 return IsEliminated() || source_.EqualsCanonicalized(destination_);
822 }
823
824 // We clear both operands to indicate move that's been eliminated.
825 void Eliminate() { source_ = destination_ = InstructionOperand(); }
826 bool IsEliminated() const {
827 DCHECK_IMPLIES(source_.IsInvalid(), destination_.IsInvalid());
828 return source_.IsInvalid();
829 }
830
831 // APIs to aid debugging. For general-stream APIs, use operator<<.
832 void Print() const;
833
834 bool Equals(const MoveOperands& that) const {
835 if (IsRedundant() && that.IsRedundant()) return true;
836 return source_.Equals(that.source_) &&
837 destination_.Equals(that.destination_);
838 }
839
840 private:
843};
844
845V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, const MoveOperands&);
846
848 : public NON_EXPORTED_BASE(ZoneVector<MoveOperands*>),
849 public NON_EXPORTED_BASE(ZoneObject) {
850 public:
851 explicit ParallelMove(Zone* zone) : ZoneVector<MoveOperands*>(zone) {}
852 ParallelMove(const ParallelMove&) = delete;
854
856 const InstructionOperand& to) {
857 return AddMove(from, to, zone());
858 }
859
861 const InstructionOperand& to,
862 Zone* operand_allocation_zone) {
863 if (from.EqualsCanonicalized(to)) return nullptr;
864 MoveOperands* move = operand_allocation_zone->New<MoveOperands>(from, to);
865 if (empty()) reserve(4);
866 push_back(move);
867 return move;
868 }
869
870 bool IsRedundant() const;
871
872 // Prepare this ParallelMove to insert move as if it happened in a subsequent
873 // ParallelMove. move->source() may be changed. Any MoveOperands added to
874 // to_eliminate must be Eliminated.
875 void PrepareInsertAfter(MoveOperands* move,
876 ZoneVector<MoveOperands*>* to_eliminate) const;
877
878 bool Equals(const ParallelMove& that) const;
879
880 // Eliminate all the MoveOperands in this ParallelMove.
881 void Eliminate();
882};
883
884std::ostream& operator<<(std::ostream&, const ParallelMove&);
885
886// TODOC(dmercadier): what is a ReferenceMap exactly, what does it contain,
887// when is it created, and what is it used for?
888class ReferenceMap final : public ZoneObject {
889 public:
890 explicit ReferenceMap(Zone* zone)
891 : reference_operands_(zone), instruction_position_(-1) {}
892
894 return reference_operands_;
895 }
896 int instruction_position() const { return instruction_position_; }
897
899 DCHECK_EQ(-1, instruction_position_);
900 instruction_position_ = pos;
901 }
902
903 void RecordReference(const AllocatedOperand& op);
904
905 private:
906 friend std::ostream& operator<<(std::ostream&, const ReferenceMap&);
907
910};
911
912std::ostream& operator<<(std::ostream&, const ReferenceMap&);
913
914class InstructionBlock;
915
917 public:
918 Instruction(const Instruction&) = delete;
920
921 size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
922 const InstructionOperand* OutputAt(size_t i) const {
923 DCHECK_LT(i, OutputCount());
924 return &operands_[i];
925 }
927 DCHECK_LT(i, OutputCount());
928 return &operands_[i];
929 }
930
931 bool HasOutput() const { return OutputCount() > 0; }
932 const InstructionOperand* Output() const { return OutputAt(0); }
933 InstructionOperand* Output() { return OutputAt(0); }
934
935 size_t InputCount() const { return InputCountField::decode(bit_field_); }
936 const InstructionOperand* InputAt(size_t i) const {
937 DCHECK_LT(i, InputCount());
938 return &operands_[OutputCount() + i];
939 }
941 DCHECK_LT(i, InputCount());
942 return &operands_[OutputCount() + i];
943 }
944
945 size_t TempCount() const { return TempCountField::decode(bit_field_); }
946 const InstructionOperand* TempAt(size_t i) const {
947 DCHECK_LT(i, TempCount());
948 return &operands_[OutputCount() + InputCount() + i];
949 }
951 DCHECK_LT(i, TempCount());
952 return &operands_[OutputCount() + InputCount() + i];
953 }
954
955 InstructionCode opcode() const { return opcode_; }
956 ArchOpcode arch_opcode() const { return ArchOpcodeField::decode(opcode()); }
958 return AddressingModeField::decode(opcode());
959 }
960 FlagsMode flags_mode() const { return FlagsModeField::decode(opcode()); }
962 return FlagsConditionField::decode(opcode());
963 }
964 int misc() const { return MiscField::decode(opcode()); }
965 bool HasMemoryAccessMode() const {
966 return compiler::HasMemoryAccessMode(arch_opcode());
967 }
970 return AccessModeField::decode(opcode());
971 }
972
973 static Instruction* New(Zone* zone, InstructionCode opcode) {
974 return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
975 }
976
977 static Instruction* New(Zone* zone, InstructionCode opcode,
978 size_t output_count, InstructionOperand* outputs,
979 size_t input_count, InstructionOperand* inputs,
980 size_t temp_count, InstructionOperand* temps) {
981 DCHECK(output_count == 0 || outputs != nullptr);
982 DCHECK(input_count == 0 || inputs != nullptr);
983 DCHECK(temp_count == 0 || temps != nullptr);
984 // TODO(turbofan): Handle this gracefully. See crbug.com/582702.
985 CHECK(InputCountField::is_valid(input_count));
986
987 size_t total_extra_ops = output_count + input_count + temp_count;
988 if (total_extra_ops != 0) total_extra_ops--;
989 int size = static_cast<int>(
990 RoundUp(sizeof(Instruction), sizeof(InstructionOperand)) +
991 total_extra_ops * sizeof(InstructionOperand));
992 return new (zone->Allocate<Instruction>(size)) Instruction(
993 opcode, output_count, outputs, input_count, inputs, temp_count, temps);
994 }
995
997 bit_field_ = IsCallField::update(bit_field_, true);
998 return this;
999 }
1000 bool IsCall() const { return IsCallField::decode(bit_field_); }
1001 bool NeedsReferenceMap() const { return IsCall(); }
1002 bool HasReferenceMap() const { return reference_map_ != nullptr; }
1003
1004 bool ClobbersRegisters() const { return IsCall(); }
1005 bool ClobbersTemps() const { return IsCall(); }
1006 bool ClobbersDoubleRegisters() const { return IsCall(); }
1007 ReferenceMap* reference_map() const { return reference_map_; }
1008
1010 DCHECK(NeedsReferenceMap());
1011 DCHECK(!reference_map_);
1012 reference_map_ = map;
1013 }
1014
1016 opcode_ = ArchOpcodeField::encode(kArchNop);
1017 bit_field_ = 0;
1018 reference_map_ = nullptr;
1019 }
1020
1021 bool IsNop() const { return arch_opcode() == kArchNop; }
1022
1023 bool IsDeoptimizeCall() const {
1024 return arch_opcode() == ArchOpcode::kArchDeoptimize ||
1025 FlagsModeField::decode(opcode()) == kFlags_deoptimize;
1026 }
1027
1028 bool IsTrap() const {
1029 return FlagsModeField::decode(opcode()) == kFlags_trap;
1030 }
1031
1032 bool IsJump() const { return arch_opcode() == ArchOpcode::kArchJmp; }
1033 bool IsRet() const { return arch_opcode() == ArchOpcode::kArchRet; }
1034 bool IsTailCall() const {
1035#if V8_ENABLE_WEBASSEMBLY
1036 return arch_opcode() <= ArchOpcode::kArchTailCallWasmIndirect;
1037#else
1038 return arch_opcode() <= ArchOpcode::kArchTailCallAddress;
1039#endif // V8_ENABLE_WEBASSEMBLY
1040 }
1041 bool IsThrow() const {
1042 return arch_opcode() == ArchOpcode::kArchThrowTerminator;
1043 }
1044
1045 static constexpr bool IsCallWithDescriptorFlags(InstructionCode arch_opcode) {
1046 return arch_opcode <= ArchOpcode::kArchCallBuiltinPointer;
1047 }
1049 return IsCallWithDescriptorFlags(arch_opcode());
1050 }
1052 DCHECK(IsCallWithDescriptorFlags());
1053 static_assert(CallDescriptor::kFlagsBitsEncodedInInstructionCode == 10);
1054#ifdef DEBUG
1055 static constexpr int kInstructionCodeFlagsMask =
1056 ((1 << CallDescriptor::kFlagsBitsEncodedInInstructionCode) - 1);
1057 DCHECK_EQ(static_cast<int>(flag) & kInstructionCodeFlagsMask, flag);
1058#endif
1059 return MiscField::decode(opcode()) & flag;
1060 }
1061
1062#ifdef V8_ENABLE_WEBASSEMBLY
1063 size_t WasmSignatureHashInputIndex() const {
1064 // Keep in sync with instruction-selector.cc where the inputs are assembled.
1065 switch (arch_opcode()) {
1066 case kArchCallWasmFunctionIndirect:
1067 return InputCount() -
1068 (HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)
1069 ? 2
1070 : 1);
1071 case kArchTailCallWasmIndirect:
1072 return InputCount() - 3;
1073 default:
1074 UNREACHABLE();
1075 }
1076 }
1077#endif
1078
1079 // For call instructions, computes the index of the CodeEntrypointTag input.
1081 // Keep in sync with instruction-selector.cc where the inputs are assembled.
1082 switch (arch_opcode()) {
1083 case kArchCallCodeObject:
1084 return InputCount() -
1085 (HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)
1086 ? 2
1087 : 1);
1088 case kArchTailCallCodeObject:
1089 return InputCount() - 3;
1090 default:
1091 UNREACHABLE();
1092 }
1093 }
1094
1095 // For JS call instructions, computes the index of the argument count input.
1097 // Keep in sync with instruction-selector.cc where the inputs are assembled.
1098 if (HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)) {
1099 return InputCount() - 2;
1100 } else {
1101 return InputCount() - 1;
1102 }
1103 }
1104
1108 FIRST_GAP_POSITION = START,
1109 LAST_GAP_POSITION = END
1111
1113 if (parallel_moves_[pos] == nullptr) {
1114 parallel_moves_[pos] = zone->New<ParallelMove>(zone);
1115 }
1116 return parallel_moves_[pos];
1117 }
1118
1120 return parallel_moves_[pos];
1121 }
1122
1124 return parallel_moves_[pos];
1125 }
1126
1127 bool AreMovesRedundant() const;
1128
1129 ParallelMove* const* parallel_moves() const { return &parallel_moves_[0]; }
1130 ParallelMove** parallel_moves() { return &parallel_moves_[0]; }
1131
1132 // The block_id may be invalidated in JumpThreading. It is only important for
1133 // register allocation, to avoid searching for blocks from instruction
1134 // indexes.
1135 InstructionBlock* block() const { return block_; }
1137 DCHECK_NOT_NULL(block);
1138 block_ = block;
1139 }
1140
1141 // APIs to aid debugging. For general-stream APIs, use operator<<.
1142 void Print() const;
1143
1147
1148 static const size_t kMaxOutputCount = OutputCountField::kMax;
1149 static const size_t kMaxInputCount = InputCountField::kMax;
1150 static const size_t kMaxTempCount = TempCountField::kMax;
1151
1152 private:
1153 explicit Instruction(InstructionCode opcode);
1154
1155 Instruction(InstructionCode opcode, size_t output_count,
1156 InstructionOperand* outputs, size_t input_count,
1157 InstructionOperand* inputs, size_t temp_count,
1158 InstructionOperand* temps);
1159
1161
1163 uint32_t bit_field_;
1164 ParallelMove* parallel_moves_[2];
1168};
1169
1170std::ostream& operator<<(std::ostream&, const Instruction&);
1171
1172class RpoNumber final {
1173 public:
1174 static const int kInvalidRpoNumber = -1;
1175 RpoNumber() : index_(kInvalidRpoNumber) {}
1176
1177 int ToInt() const {
1178 DCHECK(IsValid());
1179 return index_;
1180 }
1181 size_t ToSize() const {
1182 DCHECK(IsValid());
1183 return static_cast<size_t>(index_);
1184 }
1185 bool IsValid() const { return index_ >= 0; }
1186 static RpoNumber FromInt(int index) { return RpoNumber(index); }
1187 static RpoNumber Invalid() { return RpoNumber(kInvalidRpoNumber); }
1188
1189 bool IsNext(const RpoNumber other) const {
1190 DCHECK(IsValid());
1191 return other.index_ == this->index_ + 1;
1192 }
1193
1194 RpoNumber Next() const {
1195 DCHECK(IsValid());
1196 return RpoNumber(index_ + 1);
1197 }
1198
1199 // Comparison operators.
1200 bool operator==(RpoNumber other) const { return index_ == other.index_; }
1201 bool operator!=(RpoNumber other) const { return index_ != other.index_; }
1202 bool operator>(RpoNumber other) const { return index_ > other.index_; }
1203 bool operator<(RpoNumber other) const { return index_ < other.index_; }
1204 bool operator<=(RpoNumber other) const { return index_ <= other.index_; }
1205 bool operator>=(RpoNumber other) const { return index_ >= other.index_; }
1206
1207 private:
1208 explicit RpoNumber(int32_t index) : index_(index) {}
1209 int32_t index_;
1210};
1211
1212V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, const RpoNumber&);
1213
1215 public:
1226
1227 explicit Constant(int32_t v);
1228 explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
1229 explicit Constant(float v)
1230 : type_(kFloat32), value_(base::bit_cast<int32_t>(v)) {}
1231 explicit Constant(Float32 v) : type_(kFloat32), value_(v.get_bits()) {}
1232 explicit Constant(double v)
1233 : type_(kFloat64), value_(base::bit_cast<int64_t>(v)) {}
1234 explicit Constant(Float64 v) : type_(kFloat64), value_(v.get_bits()) {}
1236 : type_(kExternalReference),
1237 value_(base::bit_cast<intptr_t>(ref.raw())) {}
1238 explicit Constant(IndirectHandle<HeapObject> obj, bool is_compressed = false)
1239 : type_(is_compressed ? kCompressedHeapObject : kHeapObject),
1240 value_(base::bit_cast<intptr_t>(obj)) {}
1241 explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {}
1242 explicit Constant(RelocatablePtrConstantInfo info);
1243
1244 Type type() const { return type_; }
1245
1246 RelocInfo::Mode rmode() const { return rmode_; }
1247
1248 bool FitsInInt32() const {
1249 if (type() == kInt32) return true;
1250 DCHECK(type() == kInt64);
1251 return value_ >= std::numeric_limits<int32_t>::min() &&
1252 value_ <= std::numeric_limits<int32_t>::max();
1253 }
1254
1255 int32_t ToInt32() const {
1256 DCHECK(FitsInInt32());
1257 const int32_t value = static_cast<int32_t>(value_);
1258 DCHECK_EQ(value_, static_cast<int64_t>(value));
1259 return value;
1260 }
1261
1262 int64_t ToInt64() const {
1263 if (type() == kInt32) return ToInt32();
1264 DCHECK_EQ(kInt64, type());
1265 return value_;
1266 }
1267
1268 float ToFloat32() const {
1269 // TODO(ahaas): We should remove this function. If value_ has the bit
1270 // representation of a signalling NaN, then returning it as float can cause
1271 // the signalling bit to flip, and value_ is returned as a quiet NaN.
1273 return base::bit_cast<float>(static_cast<int32_t>(value_));
1274 }
1275
1276 // TODO(ahaas): All callers of ToFloat32() should call this function instead
1277 // to preserve signaling NaNs.
1280 return Float32::FromBits(static_cast<uint32_t>(value_));
1281 }
1282
1283 uint32_t ToFloat32AsInt() const {
1285 return base::bit_cast<uint32_t>(static_cast<int32_t>(value_));
1286 }
1287
1290 return base::Double(base::bit_cast<uint64_t>(value_));
1291 }
1292
1294 DCHECK_EQ(kExternalReference, type());
1295 return ExternalReference::FromRawAddress(static_cast<Address>(value_));
1296 }
1297
1299 DCHECK_EQ(kRpoNumber, type());
1300 return RpoNumber::FromInt(static_cast<int>(value_));
1301 }
1302
1303 IndirectHandle<HeapObject> ToHeapObject() const;
1304 IndirectHandle<Code> ToCode() const;
1305
1306 private:
1308 RelocInfo::Mode rmode_ = RelocInfo::NO_INFO;
1309 int64_t value_;
1310};
1311
1312std::ostream& operator<<(std::ostream&, const Constant&);
1313
1314// Forward declarations.
1316
1317enum class StateValueKind : uint8_t {
1321 kPlain,
1324 kDuplicate,
1326};
1327
1328std::ostream& operator<<(std::ostream& os, StateValueKind kind);
1329
1331 public:
1334
1336 StateValueDescriptor descr(StateValueKind::kArgumentsElements,
1337 MachineType::AnyTagged());
1338 descr.args_type_ = type;
1339 return descr;
1340 }
1342 return StateValueDescriptor(StateValueKind::kArgumentsLength,
1343 MachineType::AnyTagged());
1344 }
1346 return StateValueDescriptor(StateValueKind::kRestLength,
1347 MachineType::AnyTagged());
1348 }
1350 return StateValueDescriptor(StateValueKind::kPlain, type);
1351 }
1353 return StateValueDescriptor(StateValueKind::kOptimizedOut,
1354 MachineType::AnyTagged());
1355 }
1357 StateValueDescriptor descr(StateValueKind::kNestedObject,
1358 MachineType::AnyTagged());
1359 descr.id_ = id;
1360 return descr;
1361 }
1363 StateValueDescriptor descr(StateValueKind::kDuplicate,
1364 MachineType::AnyTagged());
1365 descr.id_ = id;
1366 return descr;
1367 }
1369 StateValueDescriptor descr(StateValueKind::kStringConcat,
1370 MachineType::AnyTagged());
1371 descr.id_ = id;
1372 return descr;
1373 }
1374
1375 bool IsArgumentsElements() const {
1376 return kind_ == StateValueKind::kArgumentsElements;
1377 }
1378 bool IsArgumentsLength() const {
1379 return kind_ == StateValueKind::kArgumentsLength;
1380 }
1381 bool IsRestLength() const { return kind_ == StateValueKind::kRestLength; }
1382 bool IsPlain() const { return kind_ == StateValueKind::kPlain; }
1383 bool IsOptimizedOut() const { return kind_ == StateValueKind::kOptimizedOut; }
1384 bool IsNestedObject() const { return kind_ == StateValueKind::kNestedObject; }
1385 bool IsNested() const {
1386 return kind_ == StateValueKind::kNestedObject ||
1387 kind_ == StateValueKind::kStringConcat;
1388 }
1389 bool IsDuplicate() const { return kind_ == StateValueKind::kDuplicate; }
1390 bool IsStringConcat() const { return kind_ == StateValueKind::kStringConcat; }
1391 MachineType type() const { return type_; }
1392 size_t id() const {
1393 DCHECK(kind_ == StateValueKind::kDuplicate ||
1394 kind_ == StateValueKind::kNestedObject ||
1395 kind_ == StateValueKind::kStringConcat);
1396 return id_;
1397 }
1399 DCHECK(kind_ == StateValueKind::kArgumentsElements);
1400 return args_type_;
1401 }
1402
1403 void Print(std::ostream& os) const;
1404
1405 private:
1408
1411 union {
1412 size_t id_;
1414 };
1415};
1416
1418 public:
1419 explicit StateValueList(Zone* zone) : fields_(zone), nested_(zone) {}
1420
1421 size_t size() { return fields_.size(); }
1422
1423 size_t nested_count() { return nested_.size(); }
1424
1425 struct Value {
1428
1430 : desc(desc), nested(nested) {}
1431 };
1432
1433 class iterator {
1434 public:
1435 // Bare minimum of operators needed for range iteration.
1436 bool operator!=(const iterator& other) const {
1437 return field_iterator != other.field_iterator;
1438 }
1439 bool operator==(const iterator& other) const {
1440 return field_iterator == other.field_iterator;
1441 }
1443 if (field_iterator->IsNested()) {
1444 nested_iterator++;
1445 }
1446 ++field_iterator;
1447 return *this;
1448 }
1450 StateValueDescriptor* desc = &(*field_iterator);
1451 StateValueList* nested = desc->IsNested() ? *nested_iterator : nullptr;
1452 return Value(desc, nested);
1453 }
1454
1455 private:
1456 friend class StateValueList;
1457
1460 : field_iterator(it), nested_iterator(nested) {}
1461
1464 };
1465
1466 struct Slice {
1468 : start_position(start), fields_count(fields) {}
1469
1472 };
1473
1474 void ReserveSize(size_t size) { fields_.reserve(size); }
1475
1477 fields_.push_back(StateValueDescriptor::Recursive(id));
1478 StateValueList* nested = zone->New<StateValueList>(zone);
1479 nested_.push_back(nested);
1480 return nested;
1481 }
1483 fields_.push_back(StateValueDescriptor::StringConcat(id));
1484 StateValueList* nested = zone->New<StateValueList>(zone);
1485 nested_.push_back(nested);
1486 return nested;
1487 }
1489 fields_.push_back(StateValueDescriptor::ArgumentsElements(type));
1490 }
1492 fields_.push_back(StateValueDescriptor::ArgumentsLength());
1493 }
1495 fields_.push_back(StateValueDescriptor::RestLength());
1496 }
1497 void PushDuplicate(size_t id) {
1498 fields_.push_back(StateValueDescriptor::Duplicate(id));
1499 }
1501 fields_.push_back(StateValueDescriptor::Plain(type));
1502 }
1503 void PushOptimizedOut(size_t num = 1) {
1504 fields_.insert(fields_.end(), num, StateValueDescriptor::OptimizedOut());
1505 }
1506 void PushCachedSlice(const Slice& cached) {
1507 fields_.insert(fields_.end(), cached.start_position,
1508 cached.start_position + cached.fields_count);
1509 }
1510
1511 // Returns a Slice representing the (non-nested) fields in StateValueList from
1512 // values_start to the current end position.
1513 Slice MakeSlice(size_t values_start) {
1514 DCHECK(!HasNestedFieldsAfter(values_start));
1515 size_t fields_count = fields_.size() - values_start;
1516 return Slice(fields_.begin() + values_start, fields_count);
1517 }
1518
1519 iterator begin() { return iterator(fields_.begin(), nested_.begin()); }
1520 iterator end() { return iterator(fields_.end(), nested_.end()); }
1521
1522 private:
1523 bool HasNestedFieldsAfter(size_t values_start) {
1524 auto it = fields_.begin() + values_start;
1525 for (; it != fields_.end(); it++) {
1526 if (it->IsNested()) return true;
1527 }
1528 return false;
1529 }
1530
1533};
1534
1536 public:
1538 Zone* zone, FrameStateType type, BytecodeOffset bailout_id,
1539 OutputFrameStateCombine state_combine, uint16_t parameters_count,
1540 uint16_t max_arguments, size_t locals_count, size_t stack_count,
1543 FrameStateDescriptor* outer_state = nullptr,
1544 uint32_t wasm_liftoff_frame_size = std::numeric_limits<uint32_t>::max(),
1545 uint32_t wasm_function_index = std::numeric_limits<uint32_t>::max());
1546
1547 FrameStateType type() const { return type_; }
1548 BytecodeOffset bailout_id() const { return bailout_id_; }
1549 OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
1550 uint16_t parameters_count() const { return parameters_count_; }
1551 uint16_t max_arguments() const { return max_arguments_; }
1552 size_t locals_count() const { return locals_count_; }
1553 size_t stack_count() const { return stack_count_; }
1555 return shared_info_;
1556 }
1558 return bytecode_array_;
1559 }
1560 FrameStateDescriptor* outer_state() const { return outer_state_; }
1561 bool HasClosure() const {
1562 return
1563#if V8_ENABLE_WEBASSEMBLY
1564 type_ != FrameStateType::kLiftoffFunction &&
1565#endif
1566 type_ != FrameStateType::kConstructInvokeStub;
1567 }
1568 bool HasContext() const {
1569 return FrameStateFunctionInfo::IsJSFunctionType(type_) ||
1570 type_ == FrameStateType::kBuiltinContinuation ||
1571#if V8_ENABLE_WEBASSEMBLY
1572 type_ == FrameStateType::kJSToWasmBuiltinContinuation ||
1573 // TODO(mliedtke): Should we skip the context for the FrameState of
1574 // inlined wasm functions?
1575 type_ == FrameStateType::kWasmInlinedIntoJS ||
1576#endif // V8_ENABLE_WEBASSEMBLY
1577 type_ == FrameStateType::kConstructCreateStub ||
1578 type_ == FrameStateType::kConstructInvokeStub;
1579 }
1580
1581 // The frame height on the stack, in number of slots, as serialized into a
1582 // Translation and later used by the deoptimizer. Does *not* include
1583 // information from the chain of outer states. Unlike |GetSize| this does not
1584 // always include parameters, locals, and stack slots; instead, the returned
1585 // slot kinds depend on the frame type.
1586 size_t GetHeight() const;
1587
1588 // Returns an overapproximation of the unoptimized stack frame size in bytes,
1589 // as later produced by the deoptimizer. Considers both this and the chain of
1590 // outer states.
1592 return total_conservative_frame_size_in_bytes_;
1593 }
1594
1595 size_t GetSize() const;
1596 size_t GetTotalSize() const;
1597 size_t GetFrameCount() const;
1598 size_t GetJSFrameCount() const;
1599
1600 uint32_t GetWasmFunctionIndex() const {
1601 DCHECK(wasm_function_index_ != std::numeric_limits<uint32_t>::max());
1602 return wasm_function_index_;
1603 }
1604
1606
1607 static const int kImpossibleValue = 0xdead;
1608
1609 private:
1613 const uint16_t parameters_count_;
1614 const uint16_t max_arguments_;
1615 const size_t locals_count_;
1616 const size_t stack_count_;
1623};
1624
1625#if V8_ENABLE_WEBASSEMBLY
1626class JSToWasmFrameStateDescriptor : public FrameStateDescriptor {
1627 public:
1628 JSToWasmFrameStateDescriptor(
1629 Zone* zone, FrameStateType type, BytecodeOffset bailout_id,
1630 OutputFrameStateCombine state_combine, uint16_t parameters_count,
1631 size_t locals_count, size_t stack_count,
1633 FrameStateDescriptor* outer_state,
1634 const wasm::CanonicalSig* wasm_signature);
1635
1636 std::optional<wasm::ValueKind> return_kind() const { return return_kind_; }
1637
1638 private:
1639 std::optional<wasm::ValueKind> return_kind_;
1640};
1641#endif // V8_ENABLE_WEBASSEMBLY
1642
1643// A deoptimization entry is a pair of the reason why we deoptimize and the
1644// frame state descriptor that we have to go back to.
1646 public:
1648 DeoptimizeReason reason, NodeId node_id,
1649 FeedbackSource const& feedback)
1650 : descriptor_(descriptor),
1651 kind_(kind),
1652 reason_(reason),
1653#ifdef DEBUG
1654 node_id_(node_id),
1655#endif // DEBUG
1656 feedback_(feedback) {
1657 USE(node_id);
1658 }
1659
1661 DeoptimizeKind kind() const { return kind_; }
1662 DeoptimizeReason reason() const { return reason_; }
1663#ifdef DEBUG
1664 NodeId node_id() const { return node_id_; }
1665#endif // DEBUG
1666 FeedbackSource const& feedback() const { return feedback_; }
1667
1668 private:
1672#ifdef DEBUG
1673 const NodeId node_id_;
1674#endif // DEBUG
1676};
1677
1679
1681 : public NON_EXPORTED_BASE(ZoneObject) {
1682 public:
1684
1685 PhiInstruction(Zone* zone, int virtual_register, size_t input_count);
1686
1687 void SetInput(size_t offset, int virtual_register);
1688 void RenameInput(size_t offset, int virtual_register);
1689
1690 int virtual_register() const { return virtual_register_; }
1691 const IntVector& operands() const { return operands_; }
1692
1693 // TODO(dcarney): this has no real business being here, since it's internal to
1694 // the register allocator, but putting it here was convenient.
1695 const InstructionOperand& output() const { return output_; }
1697
1698 private:
1702};
1703
1704// Analogue of BasicBlock for Instructions instead of Nodes.
1706 : public NON_EXPORTED_BASE(ZoneObject) {
1707 public:
1708 InstructionBlock(Zone* zone, RpoNumber rpo_number, RpoNumber loop_header,
1709 RpoNumber loop_end, RpoNumber dominator, bool deferred,
1710 bool handler);
1711
1712 // Instruction indexes (used by the register allocator).
1714 DCHECK_LE(0, code_start_);
1715 DCHECK_LT(0, code_end_);
1716 DCHECK_GE(code_end_, code_start_);
1717 return code_start_;
1718 }
1720 DCHECK_LE(0, code_start_);
1721 DCHECK_LT(0, code_end_);
1722 DCHECK_GE(code_end_, code_start_);
1723 return code_end_ - 1;
1724 }
1725
1726 int32_t code_start() const { return code_start_; }
1727 void set_code_start(int32_t start) { code_start_ = start; }
1728
1729 int32_t code_end() const { return code_end_; }
1730 void set_code_end(int32_t end) { code_end_ = end; }
1731
1732 bool IsDeferred() const { return deferred_; }
1733 bool IsHandler() const { return handler_; }
1734 void MarkHandler() { handler_ = true; }
1735 void UnmarkHandler() { handler_ = false; }
1736
1737 RpoNumber ao_number() const { return ao_number_; }
1738 RpoNumber rpo_number() const { return rpo_number_; }
1739 RpoNumber loop_header() const { return loop_header_; }
1741 DCHECK(IsLoopHeader());
1742 return loop_end_;
1743 }
1744 inline bool IsLoopHeader() const { return loop_end_.IsValid(); }
1745 inline bool IsSwitchTarget() const { return switch_target_; }
1746 inline bool ShouldAlignCodeTarget() const { return code_target_alignment_; }
1747 inline bool ShouldAlignLoopHeader() const { return loop_header_alignment_; }
1748 inline bool IsLoopHeaderInAssemblyOrder() const {
1749 return loop_header_alignment_;
1750 }
1751 bool omitted_by_jump_threading() const { return omitted_by_jump_threading_; }
1752 void set_omitted_by_jump_threading() { omitted_by_jump_threading_ = true; }
1753
1755 Predecessors& predecessors() { return predecessors_; }
1756 const Predecessors& predecessors() const { return predecessors_; }
1757 size_t PredecessorCount() const { return predecessors_.size(); }
1758 size_t PredecessorIndexOf(RpoNumber rpo_number) const;
1759
1761 Successors& successors() { return successors_; }
1762 const Successors& successors() const { return successors_; }
1763 size_t SuccessorCount() const { return successors_.size(); }
1764
1765 RpoNumber dominator() const { return dominator_; }
1766 void set_dominator(RpoNumber dominator) { dominator_ = dominator; }
1767
1769 const PhiInstructions& phis() const { return phis_; }
1770 PhiInstruction* PhiAt(size_t i) const { return phis_[i]; }
1771 void AddPhi(PhiInstruction* phi) { phis_.push_back(phi); }
1772
1773 void set_ao_number(RpoNumber ao_number) { ao_number_ = ao_number; }
1774
1775 void set_code_target_alignment(bool val) { code_target_alignment_ = val; }
1776 void set_loop_header_alignment(bool val) { loop_header_alignment_ = val; }
1777
1778 void set_switch_target(bool val) { switch_target_ = val; }
1779
1780 bool needs_frame() const { return needs_frame_; }
1781 void mark_needs_frame() { needs_frame_ = true; }
1782
1783 bool must_construct_frame() const { return must_construct_frame_; }
1784 void mark_must_construct_frame() { must_construct_frame_ = true; }
1785
1786 bool must_deconstruct_frame() const { return must_deconstruct_frame_; }
1787 void mark_must_deconstruct_frame() { must_deconstruct_frame_ = true; }
1788 void clear_must_deconstruct_frame() { must_deconstruct_frame_ = false; }
1789
1790 private:
1794 RpoNumber ao_number_; // Assembly order number.
1799 int32_t code_start_; // start index of arch-specific code.
1800 int32_t code_end_ = -1; // end index of arch-specific code.
1801 const bool deferred_ : 1; // Block contains deferred code.
1802 bool handler_ : 1; // Block is a handler entry point.
1804 bool code_target_alignment_ : 1; // insert code target alignment before this
1805 // block
1806 bool loop_header_alignment_ : 1; // insert loop header alignment before this
1807 // block
1811 bool omitted_by_jump_threading_ : 1; // Just for cleaner code comments.
1812};
1813
1815
1820
1821std::ostream& operator<<(std::ostream&, const PrintableInstructionBlock&);
1822
1823using ConstantMap = ZoneUnorderedMap</* virtual register */ int, Constant>;
1827
1828// Represents architecture-specific generated code before, during, and after
1829// register allocation.
1831 : public NON_EXPORTED_BASE(ZoneObject) {
1832 public:
1833 static InstructionBlocks* InstructionBlocksFor(Zone* zone,
1834 const Schedule* schedule);
1835 static InstructionBlocks* InstructionBlocksFor(
1836 Zone* zone, const turboshaft::Graph& graph);
1837 InstructionSequence(Isolate* isolate, Zone* zone,
1838 InstructionBlocks* instruction_blocks);
1841
1842 int NextVirtualRegister();
1843 int VirtualRegisterCount() const { return next_virtual_register_; }
1844
1846 return *instruction_blocks_;
1847 }
1848
1849 const InstructionBlocks& ao_blocks() const { return *ao_blocks_; }
1850
1852 return static_cast<int>(instruction_blocks_->size());
1853 }
1854
1856 return instruction_blocks_->at(rpo_number.ToSize());
1857 }
1858
1860 return instruction_blocks_->at(block->loop_end().ToSize() - 1)
1861 ->last_instruction_index();
1862 }
1863
1865 return instruction_blocks_->at(rpo_number.ToSize());
1866 }
1867
1868 InstructionBlock* GetInstructionBlock(int instruction_index) const {
1869 return instructions()[instruction_index]->block();
1870 }
1871
1873 return MachineType::PointerRepresentation();
1874 }
1875 MachineRepresentation GetRepresentation(int virtual_register) const;
1876 void MarkAsRepresentation(MachineRepresentation rep, int virtual_register);
1877
1878 bool IsReference(int virtual_register) const {
1879 return CanBeTaggedOrCompressedPointer(GetRepresentation(virtual_register));
1880 }
1881 bool IsFP(int virtual_register) const {
1882 return IsFloatingPoint(GetRepresentation(virtual_register));
1883 }
1884 int representation_mask() const { return representation_mask_; }
1886 constexpr int kFPRepMask =
1887 RepresentationBit(MachineRepresentation::kFloat16) |
1888 RepresentationBit(MachineRepresentation::kFloat32) |
1889 RepresentationBit(MachineRepresentation::kFloat64) |
1890 RepresentationBit(MachineRepresentation::kSimd128) |
1891 RepresentationBit(MachineRepresentation::kSimd256);
1892 return (representation_mask() & kFPRepMask) != 0;
1893 }
1894
1896 constexpr int kSimd128RepMask =
1897 RepresentationBit(MachineRepresentation::kSimd128);
1898 return (representation_mask() & kSimd128RepMask) != 0;
1899 }
1900
1901 Instruction* GetBlockStart(RpoNumber rpo) const;
1902
1904 const_iterator begin() const { return instructions_.begin(); }
1905 const_iterator end() const { return instructions_.end(); }
1906 const Instructions& instructions() const { return instructions_; }
1908 return static_cast<int>(instructions().size()) - 1;
1909 }
1910
1911 Instruction* InstructionAt(int index) const {
1912 DCHECK_LE(0, index);
1913 DCHECK_GT(instructions_.size(), index);
1914 return instructions_[index];
1915 }
1916
1917 Isolate* isolate() const { return isolate_; }
1918 const ReferenceMaps* reference_maps() const { return &reference_maps_; }
1919 Zone* zone() const { return zone_; }
1920
1921 // Used by the instruction selector while adding instructions.
1922 int AddInstruction(Instruction* instr);
1923 void StartBlock(RpoNumber rpo);
1924 void EndBlock(RpoNumber rpo);
1925
1926 void AddConstant(int virtual_register, Constant constant) {
1927 // TODO(titzer): allow RPO numbers as constants?
1928 DCHECK_NE(Constant::kRpoNumber, constant.type());
1929 DCHECK(virtual_register >= 0 && virtual_register < next_virtual_register_);
1930 DCHECK(constants_.find(virtual_register) == constants_.end());
1931 constants_.emplace(virtual_register, constant);
1932 }
1933 Constant GetConstant(int virtual_register) const {
1934 auto it = constants_.find(virtual_register);
1935 DCHECK(it != constants_.end());
1936 DCHECK_EQ(virtual_register, it->first);
1937 return it->second;
1938 }
1939
1941 Immediates& immediates() { return immediates_; }
1942
1944 RpoImmediates& rpo_immediates() { return rpo_immediates_; }
1945
1947 if (RelocInfo::IsNoInfo(constant.rmode())) {
1948 if (constant.type() == Constant::kRpoNumber) {
1949 // Ideally we would inline RPO numbers into the operand, however jump-
1950 // threading modifies RPO values and so we indirect through a vector
1951 // of rpo_immediates to enable rewriting. We keep this seperate from the
1952 // immediates vector so that we don't repeatedly push the same rpo
1953 // number.
1954 RpoNumber rpo_number = constant.ToRpoNumber();
1955 DCHECK(!rpo_immediates().at(rpo_number.ToSize()).IsValid() ||
1956 rpo_immediates().at(rpo_number.ToSize()) == rpo_number);
1957 rpo_immediates()[rpo_number.ToSize()] = rpo_number;
1958 return ImmediateOperand(ImmediateOperand::INDEXED_RPO,
1959 rpo_number.ToInt());
1960 } else if (constant.type() == Constant::kInt32) {
1961 return ImmediateOperand(ImmediateOperand::INLINE_INT32,
1962 constant.ToInt32());
1963 } else if (constant.type() == Constant::kInt64 &&
1964 constant.FitsInInt32()) {
1965 return ImmediateOperand(ImmediateOperand::INLINE_INT64,
1966 constant.ToInt32());
1967 }
1968 }
1969 int index = static_cast<int>(immediates_.size());
1970 immediates_.push_back(constant);
1971 return ImmediateOperand(ImmediateOperand::INDEXED_IMM, index);
1972 }
1973
1975 switch (op->type()) {
1976 case ImmediateOperand::INLINE_INT32:
1977 return Constant(op->inline_int32_value());
1978 case ImmediateOperand::INLINE_INT64:
1979 return Constant(op->inline_int64_value());
1980 case ImmediateOperand::INDEXED_RPO: {
1981 int index = op->indexed_value();
1982 DCHECK_LE(0, index);
1983 DCHECK_GT(rpo_immediates_.size(), index);
1984 return Constant(rpo_immediates_[index]);
1985 }
1986 case ImmediateOperand::INDEXED_IMM: {
1987 int index = op->indexed_value();
1988 DCHECK_LE(0, index);
1989 DCHECK_GT(immediates_.size(), index);
1990 return immediates_[index];
1991 }
1992 }
1993 UNREACHABLE();
1994 }
1995
1996 int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
1998 NodeId node_id, FeedbackSource const& feedback);
1999 DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
2001 return static_cast<int>(deoptimization_entries_.size());
2002 }
2003
2004 RpoNumber InputRpo(Instruction* instr, size_t index);
2005
2006 bool GetSourcePosition(const Instruction* instr,
2007 SourcePosition* result) const;
2008 void SetSourcePosition(const Instruction* instr, SourcePosition value);
2009
2010 bool ContainsCall() const {
2011 for (Instruction* instr : instructions_) {
2012 if (instr->IsCall()) return true;
2013 }
2014 return false;
2015 }
2016
2017 // APIs to aid debugging. For general-stream APIs, use operator<<.
2018 void Print() const;
2019
2020 void PrintBlock(int block_id) const;
2021
2022 void ValidateEdgeSplitForm() const;
2023 void ValidateDeferredBlockExitPaths() const;
2024 void ValidateDeferredBlockEntryPaths() const;
2025 void ValidateSSA() const;
2026
2027 static void SetRegisterConfigurationForTesting(
2028 const RegisterConfiguration* regConfig);
2030
2031 void RecomputeAssemblyOrderForTesting();
2032
2033 void IncreaseRpoForTesting(size_t rpo_count) {
2034 DCHECK_GE(rpo_count, rpo_immediates().size());
2035 rpo_immediates().resize(rpo_count);
2036 }
2037
2038 private:
2039 friend V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
2040 const InstructionSequence&);
2041
2044
2045 static const RegisterConfiguration* RegisterConfigurationForTesting();
2047
2048 // Puts the deferred blocks last and may rotate loops.
2049 void ComputeAssemblyOrder();
2050
2052 Zone* const zone_;
2065
2066 // Used at construction time
2068};
2069
2070V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
2071 const InstructionSequence&);
2072#undef INSTRUCTION_OPERAND_ALIGN
2073
2074// Constants for accessing ConditionalCompare data, shared between isel and
2075// codegen.
2076constexpr size_t kNumCcmpOperands = 5;
2077constexpr size_t kCcmpOffsetOfOpcode = 0;
2078constexpr size_t kCcmpOffsetOfLhs = 1;
2079constexpr size_t kCcmpOffsetOfRhs = 2;
2080constexpr size_t kCcmpOffsetOfDefaultFlags = 3;
2084constexpr size_t kBranchEndOffsetOfFalseBlock = 1;
2085constexpr size_t kBranchEndOffsetOfTrueBlock = 2;
2088
2089} // namespace compiler
2090} // namespace internal
2091} // namespace v8
2092
2093#endif // V8_COMPILER_BACKEND_INSTRUCTION_H_
Schedule * schedule
Isolate * isolate_
uint32_t bit_field_
Builtins::Kind kind
Definition builtins.cc:40
SourcePosition pos
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr U encode(T value)
Definition bit-field.h:55
static V8_NODISCARD constexpr U update(U previous, T value)
Definition bit-field.h:61
static constexpr int kShift
Definition bit-field.h:39
T * New(Args &&... args)
Definition zone.h:114
void * Allocate(size_t size)
Definition zone.h:61
AllocatedOperand(LocationKind kind, MachineRepresentation rep, int index)
static AllocatedOperand * New(Zone *zone, LocationKind kind, MachineRepresentation rep, int index)
static ConstantOperand * New(Zone *zone, int virtual_register)
base::Double ToFloat64() const
RelocInfo::Mode rmode() const
Constant(IndirectHandle< HeapObject > obj, bool is_compressed=false)
ExternalReference ToExternalReference() const
Constant(ExternalReference ref)
FrameStateDescriptor * descriptor() const
FrameStateDescriptor *const descriptor_
DeoptimizationEntry(FrameStateDescriptor *descriptor, DeoptimizeKind kind, DeoptimizeReason reason, NodeId node_id, FeedbackSource const &feedback)
FeedbackSource const & feedback() const
FrameStateDescriptor * outer_state() const
OutputFrameStateCombine state_combine() const
MaybeIndirectHandle< BytecodeArray > const bytecode_array_
FrameStateDescriptor *const outer_state_
MaybeIndirectHandle< BytecodeArray > bytecode_array() const
MaybeIndirectHandle< SharedFunctionInfo > const shared_info_
MaybeIndirectHandle< SharedFunctionInfo > shared_info() const
static ImmediateOperand * New(Zone *zone, ImmediateType type, int32_t value)
ImmediateOperand(ImmediateType type, int32_t value)
const Successors & successors() const
PhiInstruction * PhiAt(size_t i) const
void set_ao_number(RpoNumber ao_number)
const Predecessors & predecessors() const
void set_dominator(RpoNumber dominator)
const PhiInstructions & phis() const
bool Equals(const InstructionOperand &that) const
static void ReplaceWith(InstructionOperand *dest, const InstructionOperand *src)
bool EqualsCanonicalized(const InstructionOperand &that) const
static SubKindOperand * New(Zone *zone, const SubKindOperand &op)
bool CompareCanonicalized(const InstructionOperand &that) const
bool Compare(const InstructionOperand &that) const
bool operator==(const InstructionOperand &other) const
bool operator!=(const InstructionOperand &other) const
Constant GetImmediate(const ImmediateOperand *op) const
static const RegisterConfiguration * registerConfigurationForTesting_
Instruction * InstructionAt(int index) const
Constant GetConstant(int virtual_register) const
ZoneVector< MachineRepresentation > representations_
int LastLoopInstructionIndex(const InstructionBlock *block)
void AddConstant(int virtual_register, Constant constant)
InstructionBlocks *const instruction_blocks_
InstructionSequence(const InstructionSequence &)=delete
ImmediateOperand AddImmediate(const Constant &constant)
InstructionSequence & operator=(const InstructionSequence &)=delete
const ReferenceMaps * reference_maps() const
const InstructionBlocks & ao_blocks() const
InstructionBlock * GetInstructionBlock(int instruction_index) const
bool IsReference(int virtual_register) const
static MachineRepresentation DefaultRepresentation()
bool IsFP(int virtual_register) const
const InstructionBlocks & instruction_blocks() const
InstructionBlock * InstructionBlockAt(RpoNumber rpo_number)
const InstructionBlock * InstructionBlockAt(RpoNumber rpo_number) const
const Instructions & instructions() const
ReferenceMap * reference_map() const
bool HasCallDescriptorFlag(CallDescriptor::Flag flag) const
const InstructionOperand * OutputAt(size_t i) const
const InstructionOperand * Output() const
InstructionOperand * TempAt(size_t i)
InstructionCode opcode() const
InstructionOperand * InputAt(size_t i)
const InstructionOperand * InputAt(size_t i) const
InstructionBlock * block() const
AddressingMode addressing_mode() const
ParallelMove *const * parallel_moves() const
InstructionOperand * Output()
ParallelMove * GetParallelMove(GapPosition pos)
Instruction(const Instruction &)=delete
const ParallelMove * GetParallelMove(GapPosition pos) const
ParallelMove * GetOrCreateParallelMove(GapPosition pos, Zone *zone)
FlagsCondition flags_condition() const
MemoryAccessMode memory_access_mode() const
static Instruction * New(Zone *zone, InstructionCode opcode, size_t output_count, InstructionOperand *outputs, size_t input_count, InstructionOperand *inputs, size_t temp_count, InstructionOperand *temps)
static Instruction * New(Zone *zone, InstructionCode opcode)
void set_reference_map(ReferenceMap *map)
static constexpr bool IsCallWithDescriptorFlags(InstructionCode arch_opcode)
Instruction & operator=(const Instruction &)=delete
const InstructionOperand * TempAt(size_t i) const
InstructionOperand * OutputAt(size_t i)
void set_block(InstructionBlock *block)
LocationOperand(InstructionOperand::Kind operand_kind, LocationOperand::LocationKind location_kind, MachineRepresentation rep, int index)
static LocationOperand cast(const InstructionOperand &op)
Simd128Register GetSimd128Register() const
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
DoubleRegister GetDoubleRegister() const
static const LocationOperand * cast(const InstructionOperand *op)
static bool IsSupportedRepresentation(MachineRepresentation rep)
MoveOperands(const InstructionOperand &source, const InstructionOperand &destination)
MoveOperands(const MoveOperands &)=delete
const InstructionOperand & source() const
const InstructionOperand & destination() const
bool Equals(const MoveOperands &that) const
void set_destination(const InstructionOperand &operand)
MoveOperands & operator=(const MoveOperands &)=delete
InstructionOperand & destination()
void set_source(const InstructionOperand &operand)
ParallelMove(const ParallelMove &)=delete
MoveOperands * AddMove(const InstructionOperand &from, const InstructionOperand &to)
MoveOperands * AddMove(const InstructionOperand &from, const InstructionOperand &to, Zone *operand_allocation_zone)
ParallelMove & operator=(const ParallelMove &)=delete
static PendingOperand * New(Zone *zone, PendingOperand *previous_operand)
void set_next(PendingOperand *next)
PendingOperand(PendingOperand *next_operand)
const InstructionOperand & output() const
const IntVector & operands() const
ZoneVector< InstructionOperand > reference_operands_
const ZoneVector< InstructionOperand > & reference_operands() const
bool operator>(RpoNumber other) const
bool IsNext(const RpoNumber other) const
bool operator<=(RpoNumber other) const
static RpoNumber FromInt(int index)
bool operator>=(RpoNumber other) const
bool operator<(RpoNumber other) const
bool operator!=(RpoNumber other) const
bool operator==(RpoNumber other) const
static StateValueDescriptor StringConcat(size_t id)
static StateValueDescriptor RestLength()
static StateValueDescriptor OptimizedOut()
static StateValueDescriptor ArgumentsLength()
static StateValueDescriptor Recursive(size_t id)
StateValueDescriptor(StateValueKind kind, MachineType type)
ArgumentsStateType arguments_type() const
static StateValueDescriptor Plain(MachineType type)
static StateValueDescriptor ArgumentsElements(ArgumentsStateType type)
static StateValueDescriptor Duplicate(size_t id)
ZoneVector< StateValueDescriptor >::iterator field_iterator
iterator(ZoneVector< StateValueDescriptor >::iterator it, ZoneVector< StateValueList * >::iterator nested)
bool operator==(const iterator &other) const
bool operator!=(const iterator &other) const
ZoneVector< StateValueList * >::iterator nested_iterator
StateValueList * PushRecursiveField(Zone *zone, size_t id)
void PushArgumentsElements(ArgumentsStateType type)
void PushCachedSlice(const Slice &cached)
Slice MakeSlice(size_t values_start)
ZoneVector< StateValueDescriptor > fields_
StateValueList * PushStringConcat(Zone *zone, size_t id)
bool HasNestedFieldsAfter(size_t values_start)
ZoneVector< StateValueList * > nested_
UnallocatedOperand(ExtendedPolicy policy, int virtual_register)
UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime, int virtual_register)
UnallocatedOperand(int reg_id, int slot_id, int virtual_register)
UnallocatedOperand(ExtendedPolicy policy, int index, int virtual_register)
UnallocatedOperand(int virtual_register, int input_index)
UnallocatedOperand(const UnallocatedOperand &other, int virtual_register)
UnallocatedOperand(BasicPolicy policy, int index, int virtual_register)
Zone * zone_
Register const value_
Register const index_
const InternalIndex descriptor_
const ObjectRef type_
const PropertyKind kind_
Handle< String > source_
Definition compiler.cc:3791
int start
int end
ArchOpcode opcode_
OptionalOpIndex index
int32_t offset
#define INSTRUCTION_OPERAND_PREDICATE(name, type)
Definition instruction.h:68
#define INSTRUCTION_OPERAND_ALIGN
Definition instruction.h:45
#define INSTRUCTION_OPERAND_CASTS(OperandType, OperandKind)
std::map< const std::string, const std::string > map
Instruction * instr
RpoNumber block
ZoneVector< RpoNumber > & result
Point from
Comparator::Output * output_
Point to
InstructionOperand destination
constexpr size_t kCcmpOffsetOfOpcode
constexpr size_t kNumCcmpOperands
constexpr size_t kConditionalSetEndOffsetOfNumCcmps
bool HasMemoryAccessMode(ArchOpcode opcode)
constexpr size_t kCcmpOffsetOfDefaultFlags
constexpr size_t kCcmpOffsetOfRhs
constexpr size_t kBranchEndOffsetOfTrueBlock
constexpr size_t kBranchEndOffsetOfFalseBlock
constexpr size_t kConditionalBranchEndOffsetOfCondition
constexpr size_t kCcmpOffsetOfCompareCondition
constexpr size_t kConditionalSetEndOffsetOfCondition
constexpr size_t kCcmpOffsetOfLhs
std::ostream & operator<<(std::ostream &os, AccessMode access_mode)
constexpr size_t kConditionalBranchEndOffsetOfNumCcmps
constexpr AliasingKind kFPAliasing
constexpr bool CanBeTaggedOrCompressedPointer(MachineRepresentation rep)
V8_EXPORT_PRIVATE constexpr int RepresentationBit(MachineRepresentation rep)
constexpr bool IsFloatingPoint(MachineRepresentation rep)
return value
Definition map-inl.h:893
std::ostream & operator<<(std::ostream &os, const Operation &operation)
Definition operation.h:49
#define NON_EXPORTED_BASE(code)
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
#define V8_EXPORT_PRIVATE
Definition macros.h:460
bool operator()(const InstructionOperand &a, const InstructionOperand &b) const
Slice(ZoneVector< StateValueDescriptor >::iterator start, size_t fields)
ZoneVector< StateValueDescriptor >::iterator start_position
Value(StateValueDescriptor *desc, StateValueList *nested)
EmbedderRootsHandler * handler_
std::unique_ptr< ValueMirror > value
wasm::ValueType type