26 return is_uint5(value);
30 return is_uint6(value);
40 return is_int12(value);
52 case kRiscvStoreFloat:
53 case kRiscvLoadDouble:
54 case kRiscvStoreDouble:
55 return is_int32(value);
57 return is_int12(value);
61struct ExtendingLoadMatcher {
91 DCHECK(shift.
kind == ShiftOp::Kind::kShiftRightArithmetic ||
92 shift.
kind == ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros);
100 selector_->MatchIntegralWord64Constant(shift.
right(), &constant_rhs) &&
101 constant_rhs == 32 &&
selector_->CanCover(node, shift.
left())) {
106 if (load.index().has_value()) {
107 int64_t index_constant;
108 if (
selector_->MatchIntegralWord64Constant(load.index().value(),
124 ExtendingLoadMatcher
m(node, selector);
125 RiscvOperandGeneratorT g(selector);
127 InstructionOperand inputs[2];
128 inputs[0] = g.UseRegister(
m.base());
131 DCHECK(is_int32(
m.immediate()));
132 inputs[1] = g.TempImmediate(
static_cast<int32_t
>(
m.immediate()));
133 InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
143 RiscvOperandGeneratorT g(selector);
145 const Operation& op = selector->Get(node);
146 const LoadOp& load = op.Cast<LoadOp>();
151 OpIndex index = load.index().value();
155 InstructionOperand inputs[3];
157 InstructionOperand output_op;
161 output_op = g.DefineAsRegister(output.valid() ? output : node);
163 const Operation& base_op = selector->Get(base);
165 selector->MatchSignedIntegralConstant(index, &index_value)) {
166 const ConstantOp& constant_base = base_op.Cast<ConstantOp>();
167 if (selector->CanAddressRelativeToRootsRegister(
168 constant_base.external_reference())) {
169 ptrdiff_t
const delta =
172 selector->isolate(), constant_base.external_reference());
176 if (is_int32(delta)) {
177 inputs[0] = g.UseImmediate(
static_cast<int32_t>(delta));
179 selector->Emit(opcode, 1, &output_op, input_count, inputs);
185 if (base_op.Is<LoadRootRegisterOp>()) {
187 selector->MatchSignedIntegralConstant(index, &index_value);
189 inputs[0] = g.UseImmediate64(index_value);
191 selector->Emit(opcode, 1, &output_op, input_count, inputs);
195 if (g.CanBeImmediate(index, opcode)) {
197 g.DefineAsRegister(output.valid() ? output : node),
198 g.UseRegister(base), g.UseImmediate(index));
200 InstructionOperand addr_reg = g.TempRegister();
202 addr_reg, g.UseRegister(index), g.UseRegister(base));
205 g.DefineAsRegister(output.valid() ? output : node), addr_reg,
212 RiscvOperandGeneratorT g(selector);
214 OpIndex index = selector->input_at(node, 1);
215 if (g.CanBeImmediate(index, opcode)) {
217 g.DefineAsRegister(node), g.UseRegister(base),
218 g.UseImmediate(index), g.UseImmediate(sew),
219 g.UseImmediate(lmul));
221 InstructionOperand addr_reg = g.TempRegister();
223 addr_reg, g.UseRegister(index), g.UseRegister(base));
226 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0),
227 g.UseImmediate(sew), g.UseImmediate(lmul));
231void InstructionSelectorT::VisitStoreLane(
OpIndex node) {
232 const Simd128LaneMemoryOp& store =
Get(node).Cast<Simd128LaneMemoryOp>();
235 if (store.kind.with_trap_handler) {
239 RiscvOperandGeneratorT g(
this);
242 InstructionOperand addr_reg = g.TempRegister();
243 Emit(kRiscvAdd64, addr_reg, g.UseRegister(base), g.UseRegister(index));
244 InstructionOperand
inputs[4] = {
246 g.UseImmediate(store.lane),
254void InstructionSelectorT::VisitLoadLane(
OpIndex node) {
255 const Simd128LaneMemoryOp& load = this->
Get(node).Cast<Simd128LaneMemoryOp>();
258 if (load.kind.with_trap_handler) {
262 RiscvOperandGeneratorT g(
this);
265 InstructionOperand addr_reg = g.TempRegister();
266 Emit(kRiscvAdd64, addr_reg, g.UseRegister(base), g.UseRegister(index));
269 g.UseRegister(this->input_at(node, 2)), g.UseImmediate(load.lane),
270 addr_reg, g.TempImmediate(0));
274ArchOpcode GetLoadOpcode(MemoryRepresentation loaded_rep,
275 RegisterRepresentation result_rep) {
280 switch (loaded_rep) {
300 case MemoryRepresentation::Uint64():
307 return kRiscvLoadFloat;
310 return kRiscvLoadDouble;
311#ifdef V8_COMPRESS_POINTERS
313 case MemoryRepresentation::TaggedPointer():
314 if (result_rep == RegisterRepresentation::Compressed()) {
318 return kRiscvLoadDecompressTagged;
320 if (result_rep == RegisterRepresentation::Compressed()) {
324 return kRiscvLoadDecompressTaggedSigned;
327 case MemoryRepresentation::TaggedPointer():
328 case MemoryRepresentation::TaggedSigned():
333 case MemoryRepresentation::UncompressedTaggedPointer():
334 case MemoryRepresentation::UncompressedTaggedSigned():
339 return kRiscvLoadDecompressProtected;
343 return kRiscvLoadDecodeSandboxedPointer;
351ArchOpcode GetStoreOpcode(MemoryRepresentation stored_rep) {
352 switch (stored_rep) {
354 case MemoryRepresentation::Uint8():
357 case MemoryRepresentation::Uint16():
360 case MemoryRepresentation::Uint32():
363 case MemoryRepresentation::Uint64():
368 return kRiscvStoreFloat;
370 return kRiscvStoreDouble;
372 case MemoryRepresentation::TaggedPointer():
373 case MemoryRepresentation::TaggedSigned():
374 return kRiscvStoreCompressTagged;
376 case MemoryRepresentation::UncompressedTaggedPointer():
377 case MemoryRepresentation::UncompressedTaggedSigned():
383 return kRiscvStoreIndirectPointer;
385 return kRiscvStoreEncodeSandboxedPointer;
397 opcode = GetLoadOpcode(load.ts_loaded_rep(), load.ts_result_rep());
399 if (load.is_protected(&traps_on_null)) {
411void InstructionSelectorT::VisitProtectedLoad(
OpIndex node) {
VisitLoad(node); }
413void InstructionSelectorT::VisitStore(
OpIndex node) {
414 RiscvOperandGeneratorT g(
this);
429 InstructionOperand
inputs[4];
436 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
437 size_t const temp_count =
arraysize(temps);
442 code = kArchStoreIndirectWithWriteBarrier;
446 code = kArchStoreWithWriteBarrier;
452 Emit(code, 0,
nullptr, input_count,
inputs, temp_count, temps);
461 g.UseRegisterOrImmediateZero(value), g.UseImmediate(index));
472 if (g.CanBeImmediate(index, code)) {
474 g.UseRegisterOrImmediateZero(value), g.UseRegister(base),
475 g.UseImmediate(index));
477 InstructionOperand addr_reg = g.TempRegister();
479 g.UseRegister(index), g.UseRegister(base));
482 g.UseRegisterOrImmediateZero(value), addr_reg, g.TempImmediate(0));
486void InstructionSelectorT::VisitProtectedStore(
OpIndex node) {
490void InstructionSelectorT::VisitWord32And(
OpIndex node) {
494void InstructionSelectorT::VisitWord64And(
OpIndex node) {
498void InstructionSelectorT::VisitWord32Or(
OpIndex node) {
502void InstructionSelectorT::VisitWord64Or(
OpIndex node) {
506void InstructionSelectorT::VisitWord32Xor(
OpIndex node) {
510void InstructionSelectorT::VisitWord64Xor(
OpIndex node) {
514void InstructionSelectorT::VisitWord64Shl(
OpIndex node) {
521 int64_t shift_by = rhs.Cast<ConstantOp>().signed_integral();
523 RiscvOperandGeneratorT g(
this);
526 Emit(kRiscvShl64, g.DefineSameAsFirst(node),
527 g.UseRegister(lhs.Cast<ChangeOp>().input()),
528 g.UseImmediate64(shift_by));
535void InstructionSelectorT::VisitWord64Shr(
OpIndex node) {
539void InstructionSelectorT::VisitWord64Sar(
OpIndex node) {
546 int64_t constant_rhs;
549 is_uint5(constant_rhs) &&
CanCover(node, shiftop.left())) {
552 OpIndex input = lhs.Cast<ChangeOp>().input();
554 RiscvOperandGeneratorT g(
this);
555 int right =
static_cast<int>(constant_rhs);
556 Emit(kRiscvSar32, g.DefineAsRegister(node), g.UseRegister(input),
557 g.UseImmediate(right));
568void InstructionSelectorT::VisitWord32Ror(
OpIndex node) {
572void InstructionSelectorT::VisitWord32ReverseBits(
OpIndex node) {
576void InstructionSelectorT::VisitWord64ReverseBits(
OpIndex node) {
580void InstructionSelectorT::VisitWord64ReverseBytes(
OpIndex node) {
581 RiscvOperandGeneratorT g(
this);
583 Emit(kRiscvRev8, g.DefineAsRegister(node),
584 g.UseRegister(this->input_at(node, 0)));
586 Emit(kRiscvByteSwap64, g.DefineAsRegister(node),
587 g.UseRegister(this->input_at(node, 0)));
591void InstructionSelectorT::VisitWord32ReverseBytes(
OpIndex node) {
592 RiscvOperandGeneratorT g(
this);
594 InstructionOperand temp = g.TempRegister();
595 Emit(kRiscvRev8, temp, g.UseRegister(this->input_at(node, 0)));
596 Emit(kRiscvShr64, g.DefineAsRegister(node), temp, g.TempImmediate(32));
598 Emit(kRiscvByteSwap32, g.DefineAsRegister(node),
599 g.UseRegister(this->input_at(node, 0)));
603void InstructionSelectorT::VisitSimd128ReverseBytes(
OpIndex node) {
607void InstructionSelectorT::VisitWord32Ctz(
OpIndex node) {
608 RiscvOperandGeneratorT g(
this);
609 Emit(kRiscvCtzw, g.DefineAsRegister(node),
610 g.UseRegister(this->input_at(node, 0)));
613void InstructionSelectorT::VisitWord64Ctz(
OpIndex node) {
614 RiscvOperandGeneratorT g(
this);
615 Emit(kRiscvCtz, g.DefineAsRegister(node),
616 g.UseRegister(this->input_at(node, 0)));
619void InstructionSelectorT::VisitWord32Popcnt(
OpIndex node) {
620 RiscvOperandGeneratorT g(
this);
621 Emit(kRiscvCpopw, g.DefineAsRegister(node),
622 g.UseRegister(this->input_at(node, 0)));
625void InstructionSelectorT::VisitWord64Popcnt(
OpIndex node) {
626 RiscvOperandGeneratorT g(
this);
627 Emit(kRiscvCpop, g.DefineAsRegister(node),
628 g.UseRegister(this->input_at(node, 0)));
631void InstructionSelectorT::VisitWord64Ror(
OpIndex node) {
635void InstructionSelectorT::VisitWord64Clz(
OpIndex node) {
636 VisitRR(
this, kRiscvClz64, node);
639void InstructionSelectorT::VisitInt32Add(
OpIndex node) {
643void InstructionSelectorT::VisitInt64Add(
OpIndex node) {
647void InstructionSelectorT::VisitInt32Sub(
OpIndex node) {
651void InstructionSelectorT::VisitInt64Sub(
OpIndex node) {
655void InstructionSelectorT::VisitInt32Mul(
OpIndex node) {
663 RiscvOperandGeneratorT g(
this);
664 int64_t constant_left;
666 int64_t constant_right;
668 if (constant_right == 32 && constant_right == 32) {
670 Emit(kRiscvMulHigh64, g.DefineSameAsFirst(node),
671 g.UseRegister(this->input_at(left, 0)),
672 g.UseRegister(this->input_at(right, 0)));
680void InstructionSelectorT::VisitInt32MulHigh(
OpIndex node) {
681 VisitRRR(
this, kRiscvMulHigh32, node);
684void InstructionSelectorT::VisitInt64MulHigh(
OpIndex node) {
685 return VisitRRR(
this, kRiscvMulHigh64, node);
688void InstructionSelectorT::VisitUint32MulHigh(
OpIndex node) {
689 VisitRRR(
this, kRiscvMulHighU32, node);
692void InstructionSelectorT::VisitUint64MulHigh(
OpIndex node) {
693 VisitRRR(
this, kRiscvMulHighU64, node);
696void InstructionSelectorT::VisitInt64Mul(
OpIndex node) {
700void InstructionSelectorT::VisitInt32Div(
OpIndex node) {
705void InstructionSelectorT::VisitUint32Div(
OpIndex node) {
710void InstructionSelectorT::VisitInt32Mod(
OpIndex node) {
714void InstructionSelectorT::VisitUint32Mod(
OpIndex node) {
718void InstructionSelectorT::VisitInt64Div(
OpIndex node) {
723void InstructionSelectorT::VisitUint64Div(
OpIndex node) {
728void InstructionSelectorT::VisitInt64Mod(
OpIndex node) {
732void InstructionSelectorT::VisitUint64Mod(
OpIndex node) {
736void InstructionSelectorT::VisitChangeFloat32ToFloat64(
OpIndex node) {
737 VisitRR(
this, kRiscvCvtDS, node);
740void InstructionSelectorT::VisitRoundInt32ToFloat32(
OpIndex node) {
741 VisitRR(
this, kRiscvCvtSW, node);
744void InstructionSelectorT::VisitRoundUint32ToFloat32(
OpIndex node) {
745 VisitRR(
this, kRiscvCvtSUw, node);
748void InstructionSelectorT::VisitChangeInt32ToFloat64(
OpIndex node) {
749 VisitRR(
this, kRiscvCvtDW, node);
752void InstructionSelectorT::VisitChangeInt64ToFloat64(
OpIndex node) {
753 VisitRR(
this, kRiscvCvtDL, node);
756void InstructionSelectorT::VisitChangeUint32ToFloat64(
OpIndex node) {
757 VisitRR(
this, kRiscvCvtDUw, node);
760void InstructionSelectorT::VisitTruncateFloat32ToInt32(
OpIndex node) {
761 RiscvOperandGeneratorT g(
this);
768 g.UseRegister(this->input_at(node, 0)));
771void InstructionSelectorT::VisitTruncateFloat32ToUint32(
OpIndex node) {
772 RiscvOperandGeneratorT g(
this);
781 g.UseRegister(this->input_at(node, 0)));
784void InstructionSelectorT::VisitChangeFloat64ToInt32(
OpIndex node) {
785 RiscvOperandGeneratorT g(
this);
786 auto value = this->
input_at(node, 0);
789 if (
const FloatUnaryOp* load = op.TryCast<FloatUnaryOp>()) {
791 switch (load->kind) {
792 case FloatUnaryOp::Kind::kRoundDown:
793 Emit(kRiscvFloorWD, g.DefineAsRegister(node),
794 g.UseRegister(this->input_at(value, 0)));
796 case FloatUnaryOp::Kind::kRoundUp:
797 Emit(kRiscvCeilWD, g.DefineAsRegister(node),
798 g.UseRegister(this->input_at(value, 0)));
800 case FloatUnaryOp::Kind::kRoundToZero:
801 Emit(kRiscvTruncWD, g.DefineAsRegister(node),
802 g.UseRegister(this->input_at(value, 0)));
804 case FloatUnaryOp::Kind::kRoundTiesEven:
805 Emit(kRiscvRoundWD, g.DefineAsRegister(node),
806 g.UseRegister(this->input_at(value, 0)));
812 if (op.Is<ChangeOp>()) {
813 const ChangeOp& change = op.Cast<ChangeOp>();
814 using Rep = turboshaft::RegisterRepresentation;
815 if (change.from == Rep::Float32() && change.to == Rep::Float64()) {
816 auto next = this->
input_at(value, 0);
819 if (
const FloatUnaryOp* round = next_op.TryCast<FloatUnaryOp>()) {
821 switch (round->kind) {
822 case FloatUnaryOp::Kind::kRoundDown:
823 Emit(kRiscvFloorWS, g.DefineAsRegister(node),
824 g.UseRegister(this->input_at(next, 0)));
826 case FloatUnaryOp::Kind::kRoundUp:
827 Emit(kRiscvCeilWS, g.DefineAsRegister(node),
828 g.UseRegister(this->input_at(next, 0)));
830 case FloatUnaryOp::Kind::kRoundToZero:
831 Emit(kRiscvTruncWS, g.DefineAsRegister(node),
832 g.UseRegister(this->input_at(next, 0)));
834 case FloatUnaryOp::Kind::kRoundTiesEven:
835 Emit(kRiscvRoundWS, g.DefineAsRegister(node),
836 g.UseRegister(this->input_at(next, 0)));
844 Emit(kRiscvTruncWS, g.DefineAsRegister(node),
845 g.UseRegister(this->input_at(value, 0)));
850 VisitRR(
this, kRiscvTruncWD, node);
853void InstructionSelectorT::VisitTryTruncateFloat64ToInt32(
OpIndex node) {
854 RiscvOperandGeneratorT g(
this);
855 InstructionOperand
inputs[] = {g.UseRegister(this->
input_at(node, 0))};
856 InstructionOperand outputs[2];
857 size_t output_count = 0;
858 outputs[output_count++] = g.DefineAsRegister(node);
861 if (success_output.valid()) {
862 outputs[output_count++] = g.DefineAsRegister(success_output.value());
865 this->
Emit(kRiscvTruncWD, output_count, outputs, 1,
inputs);
868void InstructionSelectorT::VisitTryTruncateFloat64ToUint32(
OpIndex node) {
869 RiscvOperandGeneratorT g(
this);
870 InstructionOperand
inputs[] = {g.UseRegister(this->
input_at(node, 0))};
871 InstructionOperand outputs[2];
872 size_t output_count = 0;
873 outputs[output_count++] = g.DefineAsRegister(node);
876 if (success_output.valid()) {
877 outputs[output_count++] = g.DefineAsRegister(success_output.value());
880 Emit(kRiscvTruncUwD, output_count, outputs, 1,
inputs);
883void InstructionSelectorT::VisitChangeFloat64ToInt64(
OpIndex node) {
884 VisitRR(
this, kRiscvTruncLD, node);
887void InstructionSelectorT::VisitChangeFloat64ToUint32(
OpIndex node) {
888 VisitRR(
this, kRiscvTruncUwD, node);
891void InstructionSelectorT::VisitChangeFloat64ToUint64(
OpIndex node) {
892 VisitRR(
this, kRiscvTruncUlD, node);
895void InstructionSelectorT::VisitTruncateFloat64ToUint32(
OpIndex node) {
896 VisitRR(
this, kRiscvTruncUwD, node);
899void InstructionSelectorT::VisitTruncateFloat64ToInt64(
OpIndex node) {
900 RiscvOperandGeneratorT g(
this);
908 Emit(
opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)));
911void InstructionSelectorT::VisitTryTruncateFloat32ToInt64(
OpIndex node) {
912 RiscvOperandGeneratorT g(
this);
913 InstructionOperand
inputs[] = {g.UseRegister(this->
input_at(node, 0))};
914 InstructionOperand outputs[2];
915 size_t output_count = 0;
916 outputs[output_count++] = g.DefineAsRegister(node);
919 if (success_output.valid()) {
920 outputs[output_count++] = g.DefineAsRegister(success_output.value());
923 this->
Emit(kRiscvTruncLS, output_count, outputs, 1,
inputs);
926void InstructionSelectorT::VisitTryTruncateFloat64ToInt64(
OpIndex node) {
927 RiscvOperandGeneratorT g(
this);
928 InstructionOperand
inputs[] = {g.UseRegister(this->
input_at(node, 0))};
929 InstructionOperand outputs[2];
930 size_t output_count = 0;
931 outputs[output_count++] = g.DefineAsRegister(node);
934 if (success_output.valid()) {
935 outputs[output_count++] = g.DefineAsRegister(success_output.value());
938 Emit(kRiscvTruncLD, output_count, outputs, 1,
inputs);
941void InstructionSelectorT::VisitTryTruncateFloat32ToUint64(
OpIndex node) {
942 RiscvOperandGeneratorT g(
this);
943 InstructionOperand
inputs[] = {g.UseRegister(this->
input_at(node, 0))};
944 InstructionOperand outputs[2];
945 size_t output_count = 0;
946 outputs[output_count++] = g.DefineAsRegister(node);
949 if (success_output.valid()) {
950 outputs[output_count++] = g.DefineAsRegister(success_output.value());
953 Emit(kRiscvTruncUlS, output_count, outputs, 1,
inputs);
956void InstructionSelectorT::VisitTryTruncateFloat64ToUint64(
OpIndex node) {
957 RiscvOperandGeneratorT g(
this);
959 InstructionOperand
inputs[] = {g.UseRegister(this->
input_at(node, 0))};
960 InstructionOperand outputs[2];
961 size_t output_count = 0;
962 outputs[output_count++] = g.DefineAsRegister(node);
965 if (success_output.valid()) {
966 outputs[output_count++] = g.DefineAsRegister(success_output.value());
969 Emit(kRiscvTruncUlD, output_count, outputs, 1,
inputs);
972void InstructionSelectorT::VisitBitcastWord32ToWord64(
OpIndex node) {
975 RiscvOperandGeneratorT g(
this);
976 Emit(kRiscvZeroExtendWord, g.DefineAsRegister(node),
977 g.UseRegister(this->input_at(node, 0)));
987void InstructionSelectorT::VisitChangeInt32ToInt64(
OpIndex node) {
999 opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
1002 opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
1023void InstructionSelectorT::VisitChangeUint32ToUint64(
OpIndex node) {
1024 RiscvOperandGeneratorT g(
this);
1026 if (ZeroExtendsWord32ToWord64(value)) {
1027 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1030 Emit(kRiscvZeroExtendWord, g.DefineAsRegister(node), g.UseRegister(value));
1033bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis(
OpIndex node) {
1036 if (op.opcode == Opcode::kLoad) {
1039 if (load_rep.IsUnsigned()) {
1040 switch (load_rep.representation()) {
1053void InstructionSelectorT::VisitTruncateInt64ToInt32(
OpIndex node) {
1054 RiscvOperandGeneratorT g(
this);
1058 auto shift_value =
input_at(value, 1);
1062 }
else if (int64_t constant;
1064 if (constant <= 63 && constant >= 32) {
1066 Emit(kRiscvSar64, g.DefineSameAsFirst(node),
1067 g.UseRegister(
input_at(value, 0)), g.UseImmediate64(constant));
1077 Emit(kRiscvSignExtendWord, g.DefineAsRegister(node), g.UseRegister(value),
1078 g.TempImmediate(0));
1081void InstructionSelectorT::VisitRoundInt64ToFloat32(
OpIndex node) {
1082 VisitRR(
this, kRiscvCvtSL, node);
1085void InstructionSelectorT::VisitRoundInt64ToFloat64(
OpIndex node) {
1086 VisitRR(
this, kRiscvCvtDL, node);
1089void InstructionSelectorT::VisitRoundUint64ToFloat32(
OpIndex node) {
1090 VisitRR(
this, kRiscvCvtSUl, node);
1093void InstructionSelectorT::VisitRoundUint64ToFloat64(
OpIndex node) {
1094 VisitRR(
this, kRiscvCvtDUl, node);
1097void InstructionSelectorT::VisitBitcastFloat32ToInt32(
OpIndex node) {
1098 VisitRR(
this, kRiscvBitcastFloat32ToInt32, node);
1101void InstructionSelectorT::VisitBitcastFloat64ToInt64(
OpIndex node) {
1102 VisitRR(
this, kRiscvBitcastDL, node);
1105void InstructionSelectorT::VisitBitcastInt32ToFloat32(
OpIndex node) {
1106 VisitRR(
this, kRiscvBitcastInt32ToFloat32, node);
1109void InstructionSelectorT::VisitBitcastInt64ToFloat64(
OpIndex node) {
1110 VisitRR(
this, kRiscvBitcastLD, node);
1113void InstructionSelectorT::VisitFloat64RoundDown(
OpIndex node) {
1114 VisitRR(
this, kRiscvFloat64RoundDown, node);
1117void InstructionSelectorT::VisitFloat32RoundUp(
OpIndex node) {
1118 VisitRR(
this, kRiscvFloat32RoundUp, node);
1121void InstructionSelectorT::VisitFloat64RoundUp(
OpIndex node) {
1122 VisitRR(
this, kRiscvFloat64RoundUp, node);
1125void InstructionSelectorT::VisitFloat32RoundTruncate(
OpIndex node) {
1126 VisitRR(
this, kRiscvFloat32RoundTruncate, node);
1129void InstructionSelectorT::VisitFloat64RoundTruncate(
OpIndex node) {
1130 VisitRR(
this, kRiscvFloat64RoundTruncate, node);
1133void InstructionSelectorT::VisitFloat64RoundTiesAway(
OpIndex node) {
1137void InstructionSelectorT::VisitFloat32RoundTiesEven(
OpIndex node) {
1138 VisitRR(
this, kRiscvFloat32RoundTiesEven, node);
1141void InstructionSelectorT::VisitFloat64RoundTiesEven(
OpIndex node) {
1142 VisitRR(
this, kRiscvFloat64RoundTiesEven, node);
1145void InstructionSelectorT::VisitFloat32Neg(
OpIndex node) {
1146 VisitRR(
this, kRiscvNegS, node);
1149void InstructionSelectorT::VisitFloat64Neg(
OpIndex node) {
1150 VisitRR(
this, kRiscvNegD, node);
1155 RiscvOperandGeneratorT g(
this);
1157 g.UseFixed(this->input_at(node, 0), fa0),
1158 g.UseFixed(this->input_at(node, 1), fa1))
1164 RiscvOperandGeneratorT g(
this);
1166 g.UseFixed(this->input_at(node, 0), fa1))
1171 ZoneVector<PushParameter>* arguments,
const CallDescriptor* call_descriptor,
1173 RiscvOperandGeneratorT g(
this);
1176 if (call_descriptor->IsCFunctionCall()) {
1177 int gp_param_count =
static_cast<int>(call_descriptor->GPParameterCount());
1178 int fp_param_count =
static_cast<int>(call_descriptor->FPParameterCount());
1181 0,
nullptr, 0,
nullptr);
1186 Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1191 int push_count =
static_cast<int>(call_descriptor->ParameterSlotCount());
1192 if (push_count > 0) {
1196 if (input.node.valid()) {
1197 stack_size += input.location.GetSizeInPointers();
1200 Emit(kRiscvStackClaim, g.NoOutput(),
1203 for (
size_t n = 0; n < arguments->size(); ++
n) {
1205 if (input.node.valid()) {
1206 Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1213void InstructionSelectorT::VisitUnalignedLoad(
OpIndex node) {
1216 RiscvOperandGeneratorT g(
this);
1221 switch (load_rep.representation()) {
1223 opcode = kRiscvULoadFloat;
1226 opcode = kRiscvULoadDouble;
1229 opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
1232 opcode = load_rep.IsUnsigned() ? kRiscvUlhu : kRiscvUlh;
1260 if (load.is_protected(&traps_on_null)) {
1261 if (traps_on_null) {
1267 if (g.CanBeImmediate(index,
opcode)) {
1269 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1271 InstructionOperand addr_reg = g.TempRegister();
1273 g.UseRegister(index), g.UseRegister(base));
1276 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1280void InstructionSelectorT::VisitUnalignedStore(
OpIndex node) {
1281 RiscvOperandGeneratorT g(
this);
1293 opcode = kRiscvUStoreFloat;
1296 opcode = kRiscvUStoreDouble;
1330 if (g.CanBeImmediate(index,
opcode)) {
1332 g.UseRegister(base), g.UseImmediate(index),
1333 g.UseRegisterOrImmediateZero(value));
1335 InstructionOperand addr_reg = g.TempRegister();
1337 g.UseRegister(index), g.UseRegister(base));
1340 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1346bool IsNodeUnsigned(InstructionSelectorT* selector,
OpIndex n) {
1348 if (op.Is<LoadOp>()) {
1349 const LoadOp& load = op.Cast<LoadOp>();
1350 return load.machine_type().IsUnsigned() ||
1351 load.machine_type().IsCompressed();
1352 }
else if (op.Is<WordBinopOp>()) {
1353 const WordBinopOp& binop = op.Cast<WordBinopOp>();
1354 switch (binop.kind) {
1355 case WordBinopOp::Kind::kUnsignedDiv:
1356 case WordBinopOp::Kind::kUnsignedMod:
1357 case WordBinopOp::Kind::kUnsignedMulOverflownBits:
1362 }
else if (op.Is<ChangeOrDeoptOp>()) {
1363 const ChangeOrDeoptOp& change = op.Cast<ChangeOrDeoptOp>();
1364 return change.kind == ChangeOrDeoptOp::Kind::kFloat64ToUint32;
1365 }
else if (op.Is<ConvertJSPrimitiveToUntaggedOp>()) {
1366 const ConvertJSPrimitiveToUntaggedOp& convert =
1367 op.Cast<ConvertJSPrimitiveToUntaggedOp>();
1368 return convert.kind ==
1369 ConvertJSPrimitiveToUntaggedOp::UntaggedKind::kUint32;
1370 }
else if (op.Is<ConstantOp>()) {
1371 const ConstantOp& constant = op.Cast<ConstantOp>();
1372 return constant.kind == ConstantOp::Kind::kCompressedHeapObject;
1378bool CanUseOptimizedWord32Compare(InstructionSelectorT* selector,
1383 if (IsNodeUnsigned(selector, selector->input_at(node, 0)) ==
1384 IsNodeUnsigned(selector, selector->input_at(node, 1))) {
1392void VisitFullWord32Compare(InstructionSelectorT* selector,
OpIndex node,
1394 RiscvOperandGeneratorT g(selector);
1395 InstructionOperand leftOp = g.TempRegister();
1396 InstructionOperand rightOp = g.TempRegister();
1398 selector->Emit(kRiscvShl64, leftOp,
1399 g.UseRegister(selector->input_at(node, 0)),
1400 g.TempImmediate(32));
1401 selector->Emit(kRiscvShl64, rightOp,
1402 g.UseRegister(selector->input_at(node, 1)),
1403 g.TempImmediate(32));
1406 selector->UpdateSourcePosition(
instr, node);
1409void VisitOptimizedWord32Compare(InstructionSelectorT* selector,
OpIndex node,
1411 FlagsContinuationT* cont) {
1413 RiscvOperandGeneratorT g(selector);
1414 InstructionOperand leftOp = g.TempRegister();
1415 InstructionOperand rightOp = g.TempRegister();
1416 InstructionOperand optimizedResult = g.TempRegister();
1417 InstructionOperand fullResult = g.TempRegister();
1423 selector->Emit(testOpcode, optimizedResult,
1424 g.UseRegister(selector->input_at(node, 0)),
1425 g.UseRegister(selector->input_at(node, 1)));
1426 selector->Emit(kRiscvShl64, leftOp,
1427 g.UseRegister(selector->input_at(node, 0)),
1428 g.TempImmediate(32));
1429 selector->Emit(kRiscvShl64, rightOp,
1430 g.UseRegister(selector->input_at(node, 1)),
1431 g.TempImmediate(32));
1432 selector->Emit(testOpcode, fullResult, leftOp, rightOp);
1434 selector->Emit(kRiscvAssertEqual, g.NoOutput(), optimizedResult, fullResult,
1435 g.TempImmediate(
static_cast<int>(
1436 AbortReason::kUnsupportedNonPrimitiveCompare)));
1440 selector->UpdateSourcePosition(
instr, node);
1443void VisitWord32Compare(InstructionSelectorT* selector,
OpIndex node,
1444 FlagsContinuationT* cont) {
1446 const Operation& lhs = selector->Get(selector->input_at(node, 0));
1447 const Operation& rhs = selector->Get(selector->input_at(node, 1));
1448 if (lhs.Is<DidntThrowOp>() || rhs.Is<DidntThrowOp>()) {
1449 VisitFullWord32Compare(selector, node, kRiscvCmp, cont);
1450 }
else if (!CanUseOptimizedWord32Compare(selector, node)) {
1452 if (!CanUseOptimizedWord32Compare(selector, node)) {
1454 VisitFullWord32Compare(selector, node, kRiscvCmp, cont);
1456 VisitOptimizedWord32Compare(selector, node, kRiscvCmp, cont);
1460void VisitWord64Compare(InstructionSelectorT* selector,
OpIndex node,
1461 FlagsContinuationT* cont) {
1465void VisitAtomicLoad(InstructionSelectorT* selector,
OpIndex node,
1468 RiscvOperandGeneratorT g(selector);
1469 auto load = selector->load_view(node);
1478 switch (load_rep.representation()) {
1481 code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
1485 code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
1488 code = kAtomicLoadWord32;
1491 code = kRiscvWord64AtomicLoadUint64;
1493#ifdef V8_COMPRESS_POINTERS
1495 code = kRiscvAtomicLoadDecompressTaggedSigned;
1499 code = kRiscvAtomicLoadDecompressTagged;
1506 code = kRiscvWord64AtomicLoadUint64;
1508 code = kAtomicLoadWord32;
1515 code = kAtomicLoadWord32;
1522 if (load.is_protected(&traps_on_null)) {
1530 if (g.CanBeImmediate(index, code)) {
1533 g.DefineAsRegister(node), g.UseRegister(base),
1534 g.UseImmediate(index));
1536 InstructionOperand addr_reg = g.TempRegister();
1538 addr_reg, g.UseRegister(base), g.UseRegister(index));
1542 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1548 auto store = selector->store_view(node);
1549 return AtomicStoreParameters(store.stored_rep().representation(),
1550 store.stored_rep().write_barrier_kind(),
1551 store.memory_order().value(),
1552 store.access_kind());
1555void VisitAtomicStore(InstructionSelectorT* selector,
OpIndex node,
1558 RiscvOperandGeneratorT g(selector);
1559 auto store = selector->store_view(node);
1561 OpIndex index = selector->value(store.index());
1562 OpIndex value = store.value();
1570 if (
v8_flags.enable_unconditional_write_barriers &&
1578 !
v8_flags.disable_write_barriers) {
1582 InstructionOperand inputs[3];
1584 inputs[
input_count++] = g.UseUniqueRegister(base);
1585 inputs[
input_count++] = g.UseUniqueRegister(index);
1586 inputs[
input_count++] = g.UseUniqueRegister(value);
1589 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
1590 size_t const temp_count =
arraysize(temps);
1591 code = kArchAtomicStoreWithWriteBarrier;
1593 selector->Emit(code, 0,
nullptr, input_count, inputs, temp_count, temps);
1597 code = kAtomicStoreWord8;
1600 code = kAtomicStoreWord16;
1603 code = kAtomicStoreWord32;
1607 code = kRiscvWord64AtomicStoreWord64;
1613 code = kRiscvStoreCompressTagged;
1623 if (g.CanBeImmediate(index, code)) {
1626 g.NoOutput(), g.UseRegisterOrImmediateZero(value),
1627 g.UseRegister(base), g.UseImmediate(index));
1629 InstructionOperand addr_reg = g.TempRegister();
1631 addr_reg, g.UseRegister(index), g.UseRegister(base));
1635 g.NoOutput(), g.UseRegisterOrImmediateZero(value),
1636 addr_reg, g.TempImmediate(0));
1645 RiscvOperandGeneratorT g(selector);
1646 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(
node);
1648 OpIndex index = atomic_op.index();
1649 OpIndex value = atomic_op.value();
1652 InstructionOperand inputs[3];
1654 inputs[
input_count++] = g.UseUniqueRegister(base);
1655 inputs[
input_count++] = g.UseUniqueRegister(index);
1656 inputs[
input_count++] = g.UseUniqueRegister(value);
1657 InstructionOperand outputs[1];
1658 outputs[0] = g.UseUniqueRegister(node);
1659 InstructionOperand temps[4];
1660 temps[0] = g.TempRegister();
1661 temps[1] = g.TempRegister();
1662 temps[2] = g.TempRegister();
1663 temps[3] = g.TempRegister();
1669 selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
1675 OpIndex node, FlagsContinuationT* cont) {
1682 value = op.stack_limit();
1686 RiscvOperandGeneratorT g(
this);
1689 InstructionOperand*
const outputs =
nullptr;
1690 const int output_count = 0;
1695 InstructionOperand temps[] = {g.TempRegister()};
1701 InstructionOperand
inputs[] = {g.UseRegisterWithMode(value, register_mode)};
1705 temp_count, temps, cont);
1709 FlagsContinuation* cont) {
1711 while (
const ComparisonOp*
equal =
1717 value =
equal->left();
1723 if (
const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
1724 switch (comparison->rep.value()) {
1726 cont->OverwriteAndNegateIfEqual(
1728 return VisitWord32Compare(
this, value, cont);
1731 cont->OverwriteAndNegateIfEqual(
1733 return VisitWord64Compare(
this, value, cont);
1737 case ComparisonOp::Kind::kEqual:
1738 cont->OverwriteAndNegateIfEqual(
kEqual);
1740 case ComparisonOp::Kind::kSignedLessThan:
1743 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1751 case ComparisonOp::Kind::kEqual:
1752 cont->OverwriteAndNegateIfEqual(
kEqual);
1754 case ComparisonOp::Kind::kSignedLessThan:
1757 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1766 }
else if (
const ProjectionOp* projection =
1767 value_op.TryCast<ProjectionOp>()) {
1770 if (projection->index == 1u) {
1776 OpIndex node = projection->input();
1777 if (
const OverflowCheckedBinopOp* binop =
1779 binop && CanDoBranchIfOverflowFusion(node)) {
1781 switch (binop->kind) {
1782 case OverflowCheckedBinopOp::Kind::kSignedAdd:
1783 cont->OverwriteAndNegateIfEqual(
kOverflow);
1784 return VisitBinop<Int32BinopMatcher>(
1785 this, node, is64 ? kRiscvAddOvf64 : kRiscvAdd64, cont);
1786 case OverflowCheckedBinopOp::Kind::kSignedSub:
1787 cont->OverwriteAndNegateIfEqual(
kOverflow);
1788 return VisitBinop<Int32BinopMatcher>(
1789 this, node, is64 ? kRiscvSubOvf64 : kRiscvSub64, cont);
1790 case OverflowCheckedBinopOp::Kind::kSignedMul:
1791 cont->OverwriteAndNegateIfEqual(
kOverflow);
1792 return VisitBinop<Int32BinopMatcher>(
1793 this, node, is64 ? kRiscvMulOvf64 : kRiscvMulOvf32, cont);
1802 const ComparisonOp* comparison = this->
Get(user).TryCast<ComparisonOp>();
1803#ifdef V8_COMPRESS_POINTERS
1808 return EmitWord32CompareZero(
this, value, cont);
1813 return EmitWord32CompareZero(
this, value, cont);
1820void InstructionSelectorT::VisitWord32Equal(OpIndex node) {
1826 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1828 if (MatchZero(right)) {
1829 return VisitWordCompareZero(user, left, &cont);
1834 RiscvOperandGeneratorT g(
this);
1835 const RootsTable& roots_table =
isolate()->roots_table();
1837 Handle<HeapObject> right;
1841 if (MatchHeapConstant(node, &right) && !right.is_null() &&
1842 roots_table.IsRootHandle(right, &root_index)) {
1843 if (RootsTable::IsReadOnly(root_index)) {
1845 MacroAssemblerBase::ReadOnlyRootPtr(root_index,
isolate());
1846 if (g.CanBeImmediate(ptr, kRiscvCmp32)) {
1847 VisitCompare(
this, kRiscvCmp32, g.UseRegister(left),
1848 g.TempImmediate(
int32_t(ptr)), &cont);
1854 VisitWord32Compare(
this, node, &cont);
1857void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
1858 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1859 VisitWord32Compare(
this, node, &cont);
1862void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
1863 FlagsContinuation cont =
1864 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1865 VisitWord32Compare(
this, node, &cont);
1868void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
1869 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1870 VisitWord32Compare(
this, node, &cont);
1873void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
1874 FlagsContinuation cont =
1875 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1876 VisitWord32Compare(
this, node, &cont);
1879void InstructionSelectorT::VisitInt32AddWithOverflow(OpIndex node) {
1881 if (ovf.valid() && IsUsed(ovf.value())) {
1882 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1883 return VisitBinop<Int32BinopMatcher>(
this, node, kRiscvAdd64, &cont);
1885 FlagsContinuation cont;
1886 VisitBinop<Int32BinopMatcher>(
this, node, kRiscvAdd64, &cont);
1889void InstructionSelectorT::VisitInt32SubWithOverflow(OpIndex node) {
1892 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1893 return VisitBinop<Int32BinopMatcher>(
this, node, kRiscvSub64, &cont);
1895 FlagsContinuation cont;
1896 VisitBinop<Int32BinopMatcher>(
this, node, kRiscvSub64, &cont);
1899void InstructionSelectorT::VisitInt32MulWithOverflow(OpIndex node) {
1902 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1903 return VisitBinop<Int32BinopMatcher>(
this, node, kRiscvMulOvf32, &cont);
1905 FlagsContinuation cont;
1906 VisitBinop<Int32BinopMatcher>(
this, node, kRiscvMulOvf32, &cont);
1909void InstructionSelectorT::VisitInt64AddWithOverflow(OpIndex node) {
1912 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1913 return VisitBinop<Int64BinopMatcher>(
this, node, kRiscvAddOvf64, &cont);
1915 FlagsContinuation cont;
1916 VisitBinop<Int64BinopMatcher>(
this, node, kRiscvAddOvf64, &cont);
1919void InstructionSelectorT::VisitInt64SubWithOverflow(OpIndex node) {
1922 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1923 return VisitBinop<Int64BinopMatcher>(
this, node, kRiscvSubOvf64, &cont);
1925 FlagsContinuation cont;
1926 VisitBinop<Int64BinopMatcher>(
this, node, kRiscvSubOvf64, &cont);
1929void InstructionSelectorT::VisitInt64MulWithOverflow(OpIndex node) {
1932 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1933 return VisitBinop<Int64BinopMatcher>(
this, node, kRiscvMulOvf64, &cont);
1935 FlagsContinuation cont;
1936 VisitBinop<Int64BinopMatcher>(
this, node, kRiscvMulOvf64, &cont);
1939void InstructionSelectorT::VisitWord64Equal(OpIndex node) {
1940 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1941 const ComparisonOp&
equal = this->
Get(node).template Cast<ComparisonOp>();
1943 if (this->MatchIntegralZero(
equal.right())) {
1944 return VisitWordCompareZero(node,
equal.left(), &cont);
1946 VisitWord64Compare(
this, node, &cont);
1949void InstructionSelectorT::VisitInt64LessThan(OpIndex node) {
1950 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1951 VisitWord64Compare(
this, node, &cont);
1954void InstructionSelectorT::VisitInt64LessThanOrEqual(OpIndex node) {
1955 FlagsContinuation cont =
1956 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1957 VisitWord64Compare(
this, node, &cont);
1960void InstructionSelectorT::VisitUint64LessThan(OpIndex node) {
1961 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1962 VisitWord64Compare(
this, node, &cont);
1965void InstructionSelectorT::VisitUint64LessThanOrEqual(OpIndex node) {
1966 FlagsContinuation cont =
1967 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1968 VisitWord64Compare(
this, node, &cont);
1971void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
1972 VisitAtomicLoad(
this, node, AtomicWidth::kWord32);
1975void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
1976 VisitAtomicStore(
this, node, AtomicWidth::kWord32);
1979void InstructionSelectorT::VisitWord64AtomicLoad(OpIndex node) {
1980 VisitAtomicLoad(
this, node, AtomicWidth::kWord64);
1983void InstructionSelectorT::VisitWord64AtomicStore(OpIndex node) {
1984 VisitAtomicStore(
this, node, AtomicWidth::kWord64);
1998 size_t input_count = 0;
1999 inputs[input_count++] = g.UseUniqueRegister(
base);
2000 inputs[input_count++] = g.UseUniqueRegister(index);
2001 inputs[input_count++] = g.UseUniqueRegister(value);
2003 outputs[0] = g.UseUniqueRegister(node);
2005 temp[0] = g.TempRegister();
2006 temp[1] = g.TempRegister();
2007 temp[2] = g.TempRegister();
2009 InstructionCode code = opcode | AddressingModeField::encode(kMode_MRI) |
2010 AtomicWidthField::encode(width);
2011 if (access_kind == MemoryAccessKind::kProtectedByTrapHandler) {
2014 selector->
Emit(code, 1, outputs, input_count, inputs, 3, temp);
2030 size_t input_count = 0;
2041 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2042 AtomicWidthField::encode(width);
2043 if (access_kind == MemoryAccessKind::kProtectedByTrapHandler) {
2046 selector->
Emit(code, 1, outputs, input_count, inputs, 3, temp);
2049void InstructionSelectorT::VisitWord32AtomicExchange(
OpIndex node) {
2050 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2052 if (atomic_op.
memory_rep == MemoryRepresentation::Int8()) {
2053 opcode = kAtomicExchangeInt8;
2054 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint8()) {
2055 opcode = kAtomicExchangeUint8;
2056 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int16()) {
2057 opcode = kAtomicExchangeInt16;
2058 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Uint16()) {
2059 opcode = kAtomicExchangeUint16;
2060 }
else if (atomic_op.
memory_rep == MemoryRepresentation::Int32() ||
2061 atomic_op.
memory_rep == MemoryRepresentation::Uint32()) {
2062 opcode = kAtomicExchangeWord32;
2070void InstructionSelectorT::VisitWord64AtomicExchange(OpIndex node) {
2071 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2073 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2074 opcode = kAtomicExchangeUint8;
2075 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2076 opcode = kAtomicExchangeUint16;
2077 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2078 opcode = kAtomicExchangeWord32;
2079 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2080 opcode = kRiscvWord64AtomicExchangeUint64;
2085 atomic_op.memory_access_kind);
2088void InstructionSelectorT::VisitWord32AtomicCompareExchange(OpIndex node) {
2089 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2091 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2092 opcode = kAtomicCompareExchangeInt8;
2093 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2094 opcode = kAtomicCompareExchangeUint8;
2095 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2096 opcode = kAtomicCompareExchangeInt16;
2097 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2098 opcode = kAtomicCompareExchangeUint16;
2099 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2100 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2101 opcode = kAtomicCompareExchangeWord32;
2106 atomic_op.memory_access_kind);
2109void InstructionSelectorT::VisitWord64AtomicCompareExchange(OpIndex node) {
2110 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2112 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2113 opcode = kAtomicCompareExchangeUint8;
2114 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2115 opcode = kAtomicCompareExchangeUint16;
2116 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2117 opcode = kAtomicCompareExchangeWord32;
2118 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2119 opcode = kRiscvWord64AtomicCompareExchangeUint64;
2124 atomic_op.memory_access_kind);
2127void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
2128 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2129 ArchOpcode uint16_op, ArchOpcode word32_op) {
2130 const AtomicRMWOp& atomic_op = this->
Get(node).template Cast<AtomicRMWOp>();
2132 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2134 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2136 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2138 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2140 }
else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2141 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2147 atomic_op.memory_access_kind);
2150#define VISIT_ATOMIC_BINOP(op) \
2152 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
2153 VisitWord32AtomicBinaryOperation( \
2154 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2155 kAtomic##op##Uint16, kAtomic##op##Word32); \
2162#undef VISIT_ATOMIC_BINOP
2164void InstructionSelectorT::VisitWord64AtomicBinaryOperation(
2165 OpIndex node, ArchOpcode uint8_op, ArchOpcode uint16_op,
2166 ArchOpcode uint32_op, ArchOpcode uint64_op) {
2167 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2169 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2171 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2173 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2175 }
else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2180 VisitAtomicBinop(
this, node, opcode, AtomicWidth::kWord64,
2181 atomic_op.memory_access_kind);
2184#define VISIT_ATOMIC_BINOP(op) \
2186 void InstructionSelectorT::VisitWord64Atomic##op(OpIndex node) { \
2187 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
2188 kAtomic##op##Uint16, kAtomic##op##Word32, \
2189 kRiscvWord64Atomic##op##Uint64); \
2196#undef VISIT_ATOMIC_BINOP
2198void InstructionSelectorT::VisitInt32AbsWithOverflow(OpIndex node) {
2202void InstructionSelectorT::VisitInt64AbsWithOverflow(OpIndex node) {
2206void InstructionSelectorT::VisitSignExtendWord8ToInt64(OpIndex node) {
2207 RiscvOperandGeneratorT g(
this);
2208 Emit(kRiscvSignExtendByte, g.DefineAsRegister(node),
2209 g.UseRegister(this->input_at(node, 0)));
2212void InstructionSelectorT::VisitSignExtendWord16ToInt64(OpIndex node) {
2213 RiscvOperandGeneratorT g(
this);
2214 Emit(kRiscvSignExtendShort, g.DefineAsRegister(node),
2215 g.UseRegister(this->input_at(node, 0)));
2218void InstructionSelectorT::VisitSignExtendWord32ToInt64(OpIndex node) {
2222void InstructionSelectorT::VisitF64x2Min(OpIndex node) {
2223 RiscvOperandGeneratorT g(
this);
2224 InstructionOperand temp1 = g.TempFpRegister(v0);
2225 InstructionOperand temp2 = g.TempFpRegister(kSimd128ScratchReg);
2226 InstructionOperand mask_reg = g.TempFpRegister(v0);
2227 this->Emit(kRiscvVmfeqVv, temp1, g.UseRegister(this->input_at(node, 0)),
2228 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
2229 g.UseImmediate(m1));
2230 this->Emit(kRiscvVmfeqVv, temp2, g.UseRegister(this->input_at(node, 1)),
2231 g.UseRegister(this->input_at(node, 1)), g.UseImmediate(E64),
2232 g.UseImmediate(m1));
2233 this->Emit(kRiscvVandVv, mask_reg, temp2, temp1, g.UseImmediate(E64),
2234 g.UseImmediate(m1));
2236 InstructionOperand NaN = g.TempFpRegister(kSimd128ScratchReg);
2237 InstructionOperand
result = g.TempFpRegister(kSimd128ScratchReg);
2238 this->Emit(kRiscvVmv, NaN, g.UseImmediate64(0x7ff8000000000000L),
2239 g.UseImmediate(E64), g.UseImmediate(m1));
2240 this->Emit(kRiscvVfminVv,
result, g.UseRegister(this->input_at(node, 1)),
2241 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
2242 g.UseImmediate(m1), g.UseImmediate(MaskType::Mask));
2243 this->Emit(kRiscvVmv, g.DefineAsRegister(node),
result, g.UseImmediate(E64),
2244 g.UseImmediate(m1));
2247void InstructionSelectorT::VisitF64x2Max(OpIndex node) {
2248 RiscvOperandGeneratorT g(
this);
2249 InstructionOperand temp1 = g.TempFpRegister(v0);
2250 InstructionOperand temp2 = g.TempFpRegister(kSimd128ScratchReg);
2251 InstructionOperand mask_reg = g.TempFpRegister(v0);
2252 this->Emit(kRiscvVmfeqVv, temp1, g.UseRegister(this->input_at(node, 0)),
2253 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
2254 g.UseImmediate(m1));
2255 this->Emit(kRiscvVmfeqVv, temp2, g.UseRegister(this->input_at(node, 1)),
2256 g.UseRegister(this->input_at(node, 1)), g.UseImmediate(E64),
2257 g.UseImmediate(m1));
2258 this->Emit(kRiscvVandVv, mask_reg, temp2, temp1, g.UseImmediate(E64),
2259 g.UseImmediate(m1));
2261 InstructionOperand NaN = g.TempFpRegister(kSimd128ScratchReg);
2262 InstructionOperand
result = g.TempFpRegister(kSimd128ScratchReg);
2263 this->Emit(kRiscvVmv, NaN, g.UseImmediate64(0x7ff8000000000000L),
2264 g.UseImmediate(E64), g.UseImmediate(m1));
2265 this->Emit(kRiscvVfmaxVv,
result, g.UseRegister(this->input_at(node, 1)),
2266 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
2267 g.UseImmediate(m1), g.UseImmediate(MaskType::Mask));
2268 this->Emit(kRiscvVmv, g.DefineAsRegister(node),
result, g.UseImmediate(E64),
2269 g.UseImmediate(m1));
2292MachineOperatorBuilder::Flags
2293InstructionSelector::SupportedMachineOperatorFlags() {
2294 MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
2295 flags |= MachineOperatorBuilder::kWord32ShiftIsSafe |
2296 MachineOperatorBuilder::kInt32DivIsSafe |
2297 MachineOperatorBuilder::kUint32DivIsSafe |
2298 MachineOperatorBuilder::kFloat64RoundDown |
2299 MachineOperatorBuilder::kFloat32RoundDown |
2300 MachineOperatorBuilder::kFloat64RoundUp |
2301 MachineOperatorBuilder::kFloat32RoundUp |
2302 MachineOperatorBuilder::kFloat64RoundTruncate |
2303 MachineOperatorBuilder::kFloat32RoundTruncate |
2304 MachineOperatorBuilder::kFloat64RoundTiesEven |
2305 MachineOperatorBuilder::kFloat32RoundTiesEven;
2306 if (CpuFeatures::IsSupported(ZBB)) {
2307 flags |= MachineOperatorBuilder::kWord32Ctz |
2308 MachineOperatorBuilder::kWord64Ctz |
2309 MachineOperatorBuilder::kWord32Popcnt |
2310 MachineOperatorBuilder::kWord64Popcnt;
static constexpr T decode(U value)
static constexpr U encode(T value)
static bool IsSupported(CpuFeature f)
constexpr MachineRepresentation representation() const
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
FlagsCondition GetComparisonFlagCondition(const turboshaft::ComparisonOp &op) const
void EmitPrepareArguments(ZoneVector< PushParameter > *arguments, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
PushParameterT PushParameter
OperandGeneratorT OperandGenerator
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
Instruction * MarkAsCall()
InstructionOperand TempRegister()
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand UseUniqueRegister(turboshaft::OpIndex node)
InstructionOperand UseRegister(turboshaft::OpIndex node)
bool CanBeImmediate(OpIndex node, InstructionCode mode)
MachineRepresentation representation() const
WriteBarrierKind write_barrier_kind() const
LoadRepresentation loaded_rep() const
MemoryAccessKind access_kind() const
IndirectPointerTag indirect_pointer_tag() const
turboshaft::OpIndex base() const
turboshaft::OpIndex value() const
turboshaft::MemoryRepresentation ts_stored_rep() const
int32_t displacement() const
StoreRepresentation stored_rep() const
bool is_store_trap_on_null() const
static constexpr FloatRepresentation Float32()
static constexpr FloatRepresentation Float64()
V8_INLINE const Operation & Get(OpIndex i) const
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation Int8()
static constexpr MemoryRepresentation Float16()
static constexpr MemoryRepresentation AnyUncompressedTagged()
static constexpr MemoryRepresentation Uint32()
static constexpr MemoryRepresentation TaggedSigned()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Int64()
static constexpr MemoryRepresentation Simd128()
static constexpr MemoryRepresentation SandboxedPointer()
static constexpr MemoryRepresentation Uint16()
static constexpr MemoryRepresentation ProtectedPointer()
static constexpr MemoryRepresentation Uint8()
static constexpr MemoryRepresentation Simd256()
static constexpr MemoryRepresentation Int16()
static constexpr MemoryRepresentation IndirectPointer()
static constexpr MemoryRepresentation Float32()
static constexpr MemoryRepresentation Float64()
bool Is(V< AnyOrNone > op_idx) const
bool MatchIntegralZero(V< Any > matched) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
bool MatchIntegralWord64Constant(V< Any > matched, uint64_t *constant) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
constexpr OpIndex value() const
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Float32()
static constexpr RegisterRepresentation Word64()
static constexpr RegisterRepresentation Tagged()
static constexpr WordRepresentation Word64()
#define COMPRESS_POINTERS_BOOL
#define V8_ENABLE_SANDBOX_BOOL
other heap size flags(e.g. initial_heap_size) take precedence") DEFINE_SIZE_T( max_shared_heap_size
InstructionSelectorT * selector_
#define VISIT_ATOMIC_BINOP(op)
ZoneVector< RpoNumber > & result
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float64(), RegisterRepresentation::Word64()> kTruncateFloat64ToInt64OverflowToMin
ConstantMask::For< ConstantOp::Kind::kWord32 > kWord32Constant
ChangeOpMask::For< ChangeOp::Kind::kSignExtend, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Word32(), RegisterRepresentation::Word64()> kChangeInt32ToInt64
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToInt32OverflowToMin
ShiftMask::For< ShiftOp::Kind::kShiftRightLogical, WordRepresentation::Word64()> kWord64ShiftRightLogical
ConstantMask::For< ConstantOp::Kind::kExternal > kExternalConstant
ChangeOpMask::For< ChangeOp::Kind::kZeroExtend, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Word32(), RegisterRepresentation::Word64()> kChangeUint32ToUint64
ChangeOpMask::For< ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToUint32OverflowToMin
constexpr size_t input_count()
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
WordWithBits< 64 > Word64
FloatWithBits< 32 > Float32
WordWithBits< 32 > Word32
FloatWithBits< 64 > Float64
void VisitAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode, AtomicWidth width)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
static void VisitRRO(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void EmitSignExtendWord(InstructionSelectorT *selector, OpIndex node)
@ kIndirectPointerWriteBarrier
void VisitRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
AtomicStoreParameters const & AtomicStoreParametersOf(Operator const *op)
bool TryEmitExtendingLoad(InstructionSelectorT *selector, OpIndex node, OpIndex output_node)
size_t AtomicWidthSize(AtomicWidth width)
void EmitS128Load(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, VSew sew, Vlmul lmul)
static Instruction * VisitCompare(InstructionSelectorT *selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuationT *cont)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
void EmitLoad(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, ImmediateMode immediate_mode, MachineRepresentation rep, OptionalOpIndex output={})
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
MachineType LoadRepresentation
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
@ kProtectedByTrapHandler
@ kMemoryAccessProtectedMemOutOfBounds
@ kMemoryAccessProtectedNullDereference
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
bool TryCast(Tagged< From > value, Tagged< To > *out)
constexpr int kTaggedSize
constexpr int kBitsPerByte
constexpr bool CanBeTaggedOrCompressedOrIndirectPointer(MachineRepresentation rep)
Tagged(T object) -> Tagged< T >
constexpr bool CanBeTaggedOrCompressedPointer(MachineRepresentation rep)
constexpr int kSystemPointerSizeLog2
void EmitWordCompareZero(InstructionSelectorT *selector, OpIndex value, FlagsContinuationT *cont)
constexpr bool SmiValuesAre31Bits()
constexpr bool CanBeTaggedPointer(MachineRepresentation rep)
V8_EXPORT_PRIVATE FlagValues v8_flags
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
ExtendingLoadMatcher(OpIndex node, InstructionSelectorT *selector)
ArchOpcode opcode() const
void Initialize(turboshaft::OpIndex node)
InstructionSelectorT * selector_
int64_t immediate() const
bool is_load_root_register(turboshaft::OpIndex node) const
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
LoadView load_view(turboshaft::OpIndex node)
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
turboshaft::Graph * turboshaft_graph() const
StoreView store_view(turboshaft::OpIndex node)
V< WordPtr > index() const
OptionalOpIndex expected() const
V< WordPtr > base() const
MemoryAccessKind memory_access_kind
MemoryRepresentation memory_rep
underlying_operation_t< Op > & Cast()
V< Word32 > right() const
#define V8_STATIC_ROOTS_BOOL
#define V8_LIKELY(condition)