v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-selector-s390.cc
Go to the documentation of this file.
1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <optional>
6
7#include "src/base/logging.h"
13
14namespace v8 {
15namespace internal {
16namespace compiler {
17
18using namespace turboshaft; // NOLINT(build/namespaces)
19
20enum class OperandMode : uint32_t {
21 kNone = 0u,
22 // Immediate mode
23 kShift32Imm = 1u << 0,
24 kShift64Imm = 1u << 1,
25 kInt32Imm = 1u << 2,
26 kInt32Imm_Negate = 1u << 3,
27 kUint32Imm = 1u << 4,
28 kInt20Imm = 1u << 5,
29 kUint12Imm = 1u << 6,
30 // Instr format
31 kAllowRRR = 1u << 7,
32 kAllowRM = 1u << 8,
33 kAllowRI = 1u << 9,
34 kAllowRRI = 1u << 10,
35 kAllowRRM = 1u << 11,
36 // Useful combination
42};
43
50
51#define AndCommonMode \
52 ((OperandMode::kAllowRM | \
53 (CpuFeatures::IsSupported(DISTINCT_OPS) ? OperandMode::kAllowRRR \
54 : OperandMode::kNone)))
55#define And64OperandMode AndCommonMode
56#define Or64OperandMode And64OperandMode
57#define Xor64OperandMode And64OperandMode
58
59#define And32OperandMode \
60 (AndCommonMode | OperandMode::kAllowRI | OperandMode::kUint32Imm)
61#define Or32OperandMode And32OperandMode
62#define Xor32OperandMode And32OperandMode
63
64#define Shift32OperandMode \
65 ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
66 (CpuFeatures::IsSupported(DISTINCT_OPS) \
67 ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
68 : OperandMode::kNone)))
69
70#define Shift64OperandMode \
71 ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
72 OperandMode::kAllowRRR | OperandMode::kAllowRRI))
73
74#define AddOperandMode \
75 ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm | \
76 (CpuFeatures::IsSupported(DISTINCT_OPS) \
77 ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
78 : OperandMode::kArithmeticCommonMode)))
79#define SubOperandMode \
80 ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm_Negate | \
81 (CpuFeatures::IsSupported(DISTINCT_OPS) \
82 ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
83 : OperandMode::kArithmeticCommonMode)))
84#define MulOperandMode \
85 (OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm)
86
87struct BaseWithScaledIndexAndDisplacementMatch {
89 OpIndex index = {};
90 int scale = 0;
91 int64_t displacement = 0;
93};
94
95std::optional<BaseWithScaledIndexAndDisplacementMatch>
97 OpIndex node) {
98 // The BaseWithIndexAndDisplacementMatcher canonicalizes the order of
99 // displacements and scale factors that are used as inputs, so instead of
100 // enumerating all possible patterns by brute force, checking for node
101 // clusters using the following templates in the following order suffices
102 // to find all of the interesting cases (S = index * scale, B = base
103 // input, D = displacement input):
104 //
105 // (S + (B + D))
106 // (S + (B + B))
107 // (S + D)
108 // (S + B)
109 // ((S + D) + B)
110 // ((S + B) + D)
111 // ((B + D) + B)
112 // ((B + B) + D)
113 // (B + D)
114 // (B + B)
115 BaseWithScaledIndexAndDisplacementMatch result;
116 result.displacement_mode = kPositiveDisplacement;
117
118 const Operation& op = selector->Get(node);
119 if (const LoadOp* load = op.TryCast<LoadOp>()) {
120 result.base = load->base();
121 result.index = load->index().value_or_invalid();
122 result.scale = load->element_size_log2;
123 result.displacement = load->offset;
124 if (load->kind.tagged_base) result.displacement -= kHeapObjectTag;
125 return result;
126 } else if (const StoreOp* store = op.TryCast<StoreOp>()) {
127 result.base = store->base();
128 result.index = store->index().value_or_invalid();
129 result.scale = store->element_size_log2;
130 result.displacement = store->offset;
131 if (store->kind.tagged_base) result.displacement -= kHeapObjectTag;
132 return result;
133 } else if (op.Is<WordBinopOp>()) {
135#ifdef V8_ENABLE_WEBASSEMBLY
136 } else if (const Simd128LaneMemoryOp* lane_op =
137 op.TryCast<Simd128LaneMemoryOp>()) {
138 result.base = lane_op->base();
139 result.index = lane_op->index();
140 result.scale = 0;
141 result.displacement = 0;
142 if (lane_op->kind.tagged_base) result.displacement -= kHeapObjectTag;
143 return result;
144 } else if (const Simd128LoadTransformOp* load_transform =
145 op.TryCast<Simd128LoadTransformOp>()) {
146 result.base = load_transform->base();
147 result.index = load_transform->index();
148 DCHECK_EQ(load_transform->offset, 0);
149 result.scale = 0;
150 result.displacement = 0;
151 DCHECK(!load_transform->load_kind.tagged_base);
152 return result;
153#endif // V8_ENABLE_WEBASSEMBLY
154 }
155 return std::nullopt;
156}
157
158// Adds S390-specific methods for generating operands.
160 public:
163
165 if (CanBeImmediate(node, mode)) {
166 return UseImmediate(node);
167 }
168 return UseRegister(node);
169 }
170
172 int64_t value;
173 if (MatchSignedIntegralConstant(node, &value))
174 return UseRegister(node);
175 else
176 return this->Use(node);
177 }
178
179 int64_t GetImmediate(OpIndex node) {
180 ConstantOp* op =
181 this->turboshaft_graph()->Get(node).template TryCast<ConstantOp>();
182 switch (op->kind) {
183 case ConstantOp::Kind::kWord32:
184 return op->word32();
185 case ConstantOp::Kind::kWord64:
186 return op->word64();
187 default:
189 }
190 }
191
193 int64_t value;
194 if (!selector()->MatchSignedIntegralConstant(node, &value)) return false;
195 return CanBeImmediate(value, mode);
196 }
197
198 bool CanBeImmediate(int64_t value, OperandModes mode) {
199 if (mode & OperandMode::kShift32Imm)
200 return 0 <= value && value < 32;
201 else if (mode & OperandMode::kShift64Imm)
202 return 0 <= value && value < 64;
203 else if (mode & OperandMode::kInt32Imm)
204 return is_int32(value);
205 else if (mode & OperandMode::kInt32Imm_Negate)
206 return is_int32(-value);
207 else if (mode & OperandMode::kUint32Imm)
208 return is_uint32(value);
209 else if (mode & OperandMode::kInt20Imm)
210 return is_int20(value);
211 else if (mode & OperandMode::kUint12Imm)
212 return is_uint12(value);
213 else
214 return false;
215 }
216
218 int effect_level) {
219 if (!this->IsLoadOrLoadImmutable(input)) return false;
220 if (!selector()->CanCover(user, input)) return false;
221 if (effect_level != selector()->GetEffectLevel(input)) {
222 return false;
223 }
224
226 this->load_view(input).loaded_rep().representation();
227 switch (opcode) {
228 case kS390_Cmp64:
229 case kS390_LoadAndTestWord64:
233 return true;
234 }
235 break;
236 case kS390_LoadAndTestWord32:
237 case kS390_Cmp32:
241 return true;
242 }
243 break;
244 default:
245 break;
246 }
247 return false;
248 }
249
251 OptionalOpIndex index, OpIndex base, int64_t displacement,
253 size_t* input_count,
255 AddressingMode mode = kMode_MRI;
256 if (base.valid()) {
257 inputs[(*input_count)++] = UseRegister(base, reg_kind);
258 if (index.valid()) {
259 inputs[(*input_count)++] = UseRegister(this->value(index), reg_kind);
260 if (displacement != 0) {
261 inputs[(*input_count)++] = UseImmediate(
263 : displacement);
264 mode = kMode_MRRI;
265 } else {
266 mode = kMode_MRR;
267 }
268 } else {
269 if (displacement == 0) {
270 mode = kMode_MR;
271 } else {
272 inputs[(*input_count)++] = UseImmediate(
274 : displacement);
275 mode = kMode_MRI;
276 }
277 }
278 } else {
279 DCHECK(index.valid());
280 inputs[(*input_count)++] = UseRegister(this->value(index), reg_kind);
281 if (displacement != 0) {
282 inputs[(*input_count)++] = UseImmediate(
284 : displacement);
285 mode = kMode_MRI;
286 } else {
287 mode = kMode_MR;
288 }
289 }
290 return mode;
291 }
292
294 OpIndex operand, InstructionOperand inputs[], size_t* input_count,
295 OperandModes immediate_mode = OperandMode::kInt20Imm) {
297 DCHECK(m.has_value());
298 if (m->base.valid() &&
299 this->Get(m->base).template Is<LoadRootRegisterOp>()) {
300 DCHECK(!m->index.valid());
301 DCHECK_EQ(m->scale, 0);
302 inputs[(*input_count)++] =
303 UseImmediate(static_cast<int>(m->displacement));
304 return kMode_Root;
305 } else if (CanBeImmediate(m->displacement, immediate_mode)) {
306 DCHECK_EQ(m->scale, 0);
307 return GenerateMemoryOperandInputs(m->index, m->base, m->displacement,
308 m->displacement_mode, inputs,
309 input_count);
310 } else {
311 DCHECK_EQ(m->displacement, 0);
312 inputs[(*input_count)++] = UseRegister(m->base);
313 inputs[(*input_count)++] = UseRegister(m->index);
314 return kMode_MRR;
315 }
316 }
317
319 return !selector()->IsLive(node);
320 }
321};
322
323namespace {
324
325bool S390OpcodeOnlySupport12BitDisp(ArchOpcode opcode) {
326 switch (opcode) {
327 case kS390_AddFloat:
328 case kS390_AddDouble:
329 case kS390_CmpFloat:
330 case kS390_CmpDouble:
331 case kS390_Float32ToDouble:
332 return true;
333 default:
334 return false;
335 }
336}
337
338bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
340 return S390OpcodeOnlySupport12BitDisp(opcode);
341}
342
343#define OpcodeImmMode(op) \
344 (S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
345 : OperandMode::kInt20Imm)
346
347ArchOpcode SelectLoadOpcode(MemoryRepresentation loaded_rep,
348 RegisterRepresentation result_rep) {
349 // NOTE: The meaning of `loaded_rep` = `MemoryRepresentation::AnyTagged()` is
350 // we are loading a compressed tagged field, while `result_rep` =
351 // `RegisterRepresentation::Tagged()` refers to an uncompressed tagged value.
352 switch (loaded_rep) {
355 return kS390_LoadWordS8;
358 return kS390_LoadWordU8;
361 return kS390_LoadWordS16;
364 return kS390_LoadWordU16;
368 return kS390_LoadWordU32;
372 return kS390_LoadWord64;
377 return kS390_LoadFloat32;
380 return kS390_LoadDouble;
381#ifdef V8_COMPRESS_POINTERS
384 if (result_rep == RegisterRepresentation::Compressed()) {
385 return kS390_LoadWordS32;
386 }
388 return kS390_LoadDecompressTagged;
390 if (result_rep == RegisterRepresentation::Compressed()) {
391 return kS390_LoadWordS32;
392 }
394 return kS390_LoadDecompressTaggedSigned;
395#else
400 return kS390_LoadWord64;
401#endif
406 return kS390_LoadWord64;
409 return kS390_LoadSimd128;
414 UNREACHABLE();
415 }
416}
417
419 ArchOpcode opcode;
420 switch (load_rep.representation()) {
422 opcode = kS390_LoadFloat32;
423 break;
425 opcode = kS390_LoadDouble;
426 break;
427 case MachineRepresentation::kBit: // Fall through.
429 opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
430 break;
432 opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
433 break;
435 opcode = kS390_LoadWordU32;
436 break;
437 case MachineRepresentation::kCompressedPointer: // Fall through.
439 case MachineRepresentation::kIndirectPointer: // Fall through.
440 case MachineRepresentation::kSandboxedPointer: // Fall through.
441#ifdef V8_COMPRESS_POINTERS
442 opcode = kS390_LoadWordS32;
443 break;
444#else
445 UNREACHABLE();
446#endif
447#ifdef V8_COMPRESS_POINTERS
449 opcode = kS390_LoadDecompressTaggedSigned;
450 break;
452 opcode = kS390_LoadDecompressTagged;
453 break;
455 opcode = kS390_LoadDecompressTagged;
456 break;
457#else
458 case MachineRepresentation::kTaggedSigned: // Fall through.
459 case MachineRepresentation::kTaggedPointer: // Fall through.
460 case MachineRepresentation::kTagged: // Fall through.
461#endif
463 opcode = kS390_LoadWord64;
464 break;
466 opcode = kS390_LoadSimd128;
467 break;
470 case MachineRepresentation::kProtectedPointer: // Fall through.
471 case MachineRepresentation::kSimd256: // Fall through.
472 case MachineRepresentation::kMapWord: // Fall through.
473 case MachineRepresentation::kFloat16RawBits: // Fall through.
475 default:
476 UNREACHABLE();
477 }
478 return opcode;
479}
480
481#define RESULT_IS_WORD32_LIST(V) \
482 /* Float unary op*/ \
483 V(BitcastFloat32ToInt32) \
484 /* V(TruncateFloat64ToWord32) */ \
485 V(RoundFloat64ToInt32) \
486 V(TruncateFloat32ToInt32) \
487 V(TruncateFloat32ToUint32) \
488 V(TruncateFloat64ToUint32) \
489 V(ChangeFloat64ToInt32) \
490 V(ChangeFloat64ToUint32) \
491 /* Word32 unary op */ \
492 V(Word32Clz) \
493 V(Word32Popcnt) \
494 V(Float64ExtractLowWord32) \
495 V(Float64ExtractHighWord32) \
496 V(SignExtendWord8ToInt32) \
497 V(SignExtendWord16ToInt32) \
498 /* Word32 bin op */ \
499 V(Int32Add) \
500 V(Int32Sub) \
501 V(Int32Mul) \
502 V(Int32AddWithOverflow) \
503 V(Int32SubWithOverflow) \
504 V(Int32MulWithOverflow) \
505 V(Int32MulHigh) \
506 V(Uint32MulHigh) \
507 V(Int32Div) \
508 V(Uint32Div) \
509 V(Int32Mod) \
510 V(Uint32Mod) \
511 V(Word32Ror) \
512 V(Word32And) \
513 V(Word32Or) \
514 V(Word32Xor) \
515 V(Word32Shl) \
516 V(Word32Shr) \
517 V(Word32Sar)
518
519bool ProduceWord32Result(InstructionSelectorT* selector, OpIndex node) {
520 const Operation& op = selector->Get(node);
521 switch (op.opcode) {
522 case Opcode::kWordBinop: {
523 const auto& binop = op.Cast<WordBinopOp>();
524 if (binop.rep != WordRepresentation::Word32()) return false;
525 return binop.kind == WordBinopOp::Kind::kAdd ||
526 binop.kind == WordBinopOp::Kind::kSub ||
527 binop.kind == WordBinopOp::Kind::kMul ||
528 binop.kind == WordBinopOp::Kind::kSignedDiv ||
529 binop.kind == WordBinopOp::Kind::kUnsignedDiv ||
530 binop.kind == WordBinopOp::Kind::kSignedMod ||
531 binop.kind == WordBinopOp::Kind::kUnsignedMod ||
532 binop.kind == WordBinopOp::Kind::kBitwiseAnd ||
533 binop.kind == WordBinopOp::Kind::kBitwiseOr ||
534 binop.kind == WordBinopOp::Kind::kBitwiseXor ||
535 binop.kind == WordBinopOp::Kind::kSignedMulOverflownBits ||
536 binop.kind == WordBinopOp::Kind::kUnsignedMulOverflownBits;
537 }
538 case Opcode::kWordUnary: {
539 const auto& unop = op.Cast<WordUnaryOp>();
540 if (unop.rep != WordRepresentation::Word32()) return false;
541 return unop.kind == WordUnaryOp::Kind::kCountLeadingZeros ||
542 unop.kind == WordUnaryOp::Kind::kPopCount ||
543 unop.kind == WordUnaryOp::Kind::kSignExtend8 ||
544 unop.kind == WordUnaryOp::Kind::kSignExtend16;
545 }
546 case Opcode::kChange: {
547 const auto& changeop = op.Cast<ChangeOp>();
548 switch (changeop.kind) {
549 // Float64ExtractLowWord32
550 // Float64ExtractHighWord32
551 case ChangeOp::Kind::kExtractLowHalf:
552 case ChangeOp::Kind::kExtractHighHalf:
553 CHECK_EQ(changeop.from, FloatRepresentation::Float64());
554 CHECK_EQ(changeop.to, WordRepresentation::Word32());
555 return true;
556 // BitcastFloat32ToInt32
557 case ChangeOp::Kind::kBitcast:
558 return changeop.from == FloatRepresentation::Float32() &&
559 changeop.to == WordRepresentation::Word32();
560 case ChangeOp::Kind::kSignedFloatTruncateOverflowToMin:
561 case ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin:
562 // RoundFloat64ToInt32
563 // ChangeFloat64ToInt32
564 // TruncateFloat64ToUint32
565 // ChangeFloat64ToUint32
566 if (changeop.from == FloatRepresentation::Float64() &&
567 changeop.to == WordRepresentation::Word32()) {
568 return true;
569 }
570 // TruncateFloat32ToInt32
571 // TruncateFloat32ToUint32
572 if (changeop.from == FloatRepresentation::Float32() &&
573 changeop.to == WordRepresentation::Word32()) {
574 return true;
575 }
576 return false;
577 default:
578 return false;
579 }
580 }
581 case Opcode::kShift: {
582 const auto& shift = op.Cast<ShiftOp>();
583 if (shift.rep != WordRepresentation::Word32()) return false;
584 return shift.kind == ShiftOp::Kind::kShiftRightArithmetic ||
585 shift.kind == ShiftOp::Kind::kShiftRightLogical ||
586 shift.kind == ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros ||
587 shift.kind == ShiftOp::Kind::kShiftLeft ||
588 shift.kind == ShiftOp::Kind::kRotateRight;
589 }
590 case Opcode::kOverflowCheckedBinop: {
591 const auto& ovfbinop = op.Cast<OverflowCheckedBinopOp>();
592 if (ovfbinop.rep != WordRepresentation::Word32()) return false;
593 return ovfbinop.kind == OverflowCheckedBinopOp::Kind::kSignedAdd ||
594 ovfbinop.kind == OverflowCheckedBinopOp::Kind::kSignedSub ||
595 ovfbinop.kind == OverflowCheckedBinopOp::Kind::kSignedMul;
596 }
597 case Opcode::kLoad: {
598 LoadRepresentation load_rep = selector->load_view(node).loaded_rep();
599 MachineRepresentation rep = load_rep.representation();
600 switch (rep) {
602 return true;
604 if (load_rep.IsSigned())
605 return false;
606 else
607 return true;
608 default:
609 return false;
610 }
611 }
612 default:
613 return false;
614 }
615}
616
617static inline bool DoZeroExtForResult(InstructionSelectorT* selector,
618 OpIndex node) {
619 return ProduceWord32Result(selector, node);
620}
621
622// TODO(john.yan): Create VisiteShift to match dst = src shift (R+I)
623#if 0
624void VisitShift() { }
625#endif
626
627void VisitTryTruncateDouble(InstructionSelectorT* selector, ArchOpcode opcode,
628 OpIndex node) {
629 S390OperandGeneratorT g(selector);
630 InstructionOperand inputs[] = {g.UseRegister(selector->input_at(node, 0))};
631 InstructionOperand outputs[2];
632 size_t output_count = 0;
633 outputs[output_count++] = g.DefineAsRegister(node);
634
635 OptionalOpIndex success_output = selector->FindProjection(node, 1);
636 if (success_output.valid()) {
637 outputs[output_count++] = g.DefineAsRegister(success_output.value());
638 }
639
640 selector->Emit(opcode, output_count, outputs, 1, inputs);
641}
642
643template <class CanCombineWithLoad>
644void GenerateRightOperands(InstructionSelectorT* selector, OpIndex node,
645 OpIndex right, InstructionCode* opcode,
646 OperandModes* operand_mode,
647 InstructionOperand* inputs, size_t* input_count,
648 CanCombineWithLoad canCombineWithLoad) {
649 S390OperandGeneratorT g(selector);
650
651 if ((*operand_mode & OperandMode::kAllowImmediate) &&
652 g.CanBeImmediate(right, *operand_mode)) {
653 inputs[(*input_count)++] = g.UseImmediate(right);
654 // Can only be RI or RRI
655 *operand_mode &= OperandMode::kAllowImmediate;
656 } else if (*operand_mode & OperandMode::kAllowMemoryOperand) {
657 const Operation& right_op = selector->Get(right);
658 if (right_op.Is<LoadOp>() && selector->CanCover(node, right) &&
659 canCombineWithLoad(
660 SelectLoadOpcode(selector->load_view(right).ts_loaded_rep(),
661 selector->load_view(right).ts_result_rep()))) {
662 AddressingMode mode =
663 g.GetEffectiveAddressMemoryOperand(right, inputs, input_count);
664 *opcode |= AddressingModeField::encode(mode);
665 *operand_mode &= ~OperandMode::kAllowImmediate;
666 if (*operand_mode & OperandMode::kAllowRM)
667 *operand_mode &= ~OperandMode::kAllowDistinctOps;
668 } else if (*operand_mode & OperandMode::kAllowRM) {
669 DCHECK(!(*operand_mode & OperandMode::kAllowRRM));
670 inputs[(*input_count)++] = g.UseAnyExceptImmediate(right);
671 // Can not be Immediate
672 *operand_mode &=
673 ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
674 } else if (*operand_mode & OperandMode::kAllowRRM) {
675 DCHECK(!(*operand_mode & OperandMode::kAllowRM));
676 inputs[(*input_count)++] = g.UseAnyExceptImmediate(right);
677 // Can not be Immediate
678 *operand_mode &= ~OperandMode::kAllowImmediate;
679 } else {
680 UNREACHABLE();
681 }
682 } else {
683 inputs[(*input_count)++] = g.UseRegister(right);
684 // Can only be RR or RRR
685 *operand_mode &= OperandMode::kAllowRRR;
686 }
687}
688
689template <class CanCombineWithLoad>
690void GenerateBinOpOperands(InstructionSelectorT* selector, OpIndex node,
691 OpIndex left, OpIndex right, InstructionCode* opcode,
692 OperandModes* operand_mode,
693 InstructionOperand* inputs, size_t* input_count,
694 CanCombineWithLoad canCombineWithLoad) {
695 S390OperandGeneratorT g(selector);
696 // left is always register
697 InstructionOperand const left_input = g.UseRegister(left);
698 inputs[(*input_count)++] = left_input;
699
700 if (left == right) {
701 inputs[(*input_count)++] = left_input;
702 // Can only be RR or RRR
703 *operand_mode &= OperandMode::kAllowRRR;
704 } else {
705 GenerateRightOperands(selector, node, right, opcode, operand_mode, inputs,
706 input_count, canCombineWithLoad);
707 }
708}
709
710template <class CanCombineWithLoad>
711void VisitUnaryOp(InstructionSelectorT* selector, OpIndex node,
712 InstructionCode opcode, OperandModes operand_mode,
713 FlagsContinuationT* cont,
714 CanCombineWithLoad canCombineWithLoad);
715
716template <class CanCombineWithLoad>
717void VisitBinOp(InstructionSelectorT* selector, OpIndex node,
718 InstructionCode opcode, OperandModes operand_mode,
719 FlagsContinuationT* cont,
720 CanCombineWithLoad canCombineWithLoad);
721
722// Generate The following variations:
723// VisitWord32UnaryOp, VisitWord32BinOp,
724// VisitWord64UnaryOp, VisitWord64BinOp,
725// VisitFloat32UnaryOp, VisitFloat32BinOp,
726// VisitFloat64UnaryOp, VisitFloat64BinOp
727#define VISIT_OP_LIST_32(V) \
728 V(Word32, Unary, [](ArchOpcode opcode) { \
729 return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
730 }) \
731 V(Word64, Unary, \
732 [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; }) \
733 V(Float32, Unary, \
734 [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
735 V(Float64, Unary, \
736 [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; }) \
737 V(Word32, Bin, [](ArchOpcode opcode) { \
738 return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
739 }) \
740 V(Float32, Bin, \
741 [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
742 V(Float64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; })
743
744#define VISIT_OP_LIST(V) \
745 VISIT_OP_LIST_32(V) \
746 V(Word64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; })
747
748#define DECLARE_VISIT_HELPER_FUNCTIONS(type1, type2, canCombineWithLoad) \
749 static inline void Visit##type1##type2##Op( \
750 InstructionSelectorT* selector, OpIndex node, InstructionCode opcode, \
751 OperandModes operand_mode, FlagsContinuationT* cont) { \
752 Visit##type2##Op(selector, node, opcode, operand_mode, cont, \
753 canCombineWithLoad); \
754 } \
755 static inline void Visit##type1##type2##Op( \
756 InstructionSelectorT* selector, OpIndex node, InstructionCode opcode, \
757 OperandModes operand_mode) { \
758 FlagsContinuationT cont; \
759 Visit##type1##type2##Op(selector, node, opcode, operand_mode, &cont); \
760 }
762#undef DECLARE_VISIT_HELPER_FUNCTIONS
763#undef VISIT_OP_LIST_32
764#undef VISIT_OP_LIST
765
766template <class CanCombineWithLoad>
767void VisitUnaryOp(InstructionSelectorT* selector, OpIndex node,
768 InstructionCode opcode, OperandModes operand_mode,
769 FlagsContinuationT* cont,
770 CanCombineWithLoad canCombineWithLoad) {
771 S390OperandGeneratorT g(selector);
772 InstructionOperand inputs[8];
773 size_t input_count = 0;
774 InstructionOperand outputs[2];
775 size_t output_count = 0;
776 OpIndex input = selector->input_at(node, 0);
777
778 GenerateRightOperands(selector, node, input, &opcode, &operand_mode, inputs,
779 &input_count, canCombineWithLoad);
780
781 bool input_is_word32 = ProduceWord32Result(selector, input);
782
783 bool doZeroExt = DoZeroExtForResult(selector, node);
784 bool canEliminateZeroExt = input_is_word32;
785
786 if (doZeroExt) {
787 // Add zero-ext indication
788 inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
789 }
790
791 if (!cont->IsDeoptimize()) {
792 // If we can deoptimize as a result of the binop, we need to make sure
793 // that the deopt inputs are not overwritten by the binop result. One way
794 // to achieve that is to declare the output register as same-as-first.
795 if (doZeroExt && canEliminateZeroExt) {
796 // we have to make sure result and left use the same register
797 outputs[output_count++] = g.DefineSameAsFirst(node);
798 } else {
799 outputs[output_count++] = g.DefineAsRegister(node);
800 }
801 } else {
802 outputs[output_count++] = g.DefineSameAsFirst(node);
803 }
804
805 DCHECK_NE(0u, input_count);
806 DCHECK_NE(0u, output_count);
807 DCHECK_GE(arraysize(inputs), input_count);
808 DCHECK_GE(arraysize(outputs), output_count);
809
810 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
811 inputs, cont);
812}
813
814template <class CanCombineWithLoad>
815void VisitBinOp(InstructionSelectorT* selector, OpIndex node,
816 InstructionCode opcode, OperandModes operand_mode,
817 FlagsContinuationT* cont,
818 CanCombineWithLoad canCombineWithLoad) {
819 S390OperandGeneratorT g(selector);
820 OpIndex left = selector->input_at(node, 0);
821 OpIndex right = selector->input_at(node, 1);
822 InstructionOperand inputs[8];
823 size_t input_count = 0;
824 InstructionOperand outputs[2];
825 size_t output_count = 0;
826
827 const Operation& op = selector->Get(node);
828 if (op.TryCast<WordBinopOp>() &&
830 selector->Get(node).template Cast<WordBinopOp>().kind) &&
831 !g.CanBeImmediate(right, operand_mode) &&
832 (g.CanBeBetterLeftOperand(right))) {
833 std::swap(left, right);
834 }
835
836 GenerateBinOpOperands(selector, node, left, right, &opcode, &operand_mode,
837 inputs, &input_count, canCombineWithLoad);
838
839 bool left_is_word32 = ProduceWord32Result(selector, left);
840
841 bool doZeroExt = DoZeroExtForResult(selector, node);
842 bool canEliminateZeroExt = left_is_word32;
843
844 if (doZeroExt) {
845 // Add zero-ext indication
846 inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
847 }
848
849 if ((operand_mode & OperandMode::kAllowDistinctOps) &&
850 // If we can deoptimize as a result of the binop, we need to make sure
851 // that the deopt inputs are not overwritten by the binop result. One way
852 // to achieve that is to declare the output register as same-as-first.
853 !cont->IsDeoptimize()) {
854 if (doZeroExt && canEliminateZeroExt) {
855 // we have to make sure result and left use the same register
856 outputs[output_count++] = g.DefineSameAsFirst(node);
857 } else {
858 outputs[output_count++] = g.DefineAsRegister(node);
859 }
860 } else {
861 outputs[output_count++] = g.DefineSameAsFirst(node);
862 }
863
864 DCHECK_NE(0u, input_count);
865 DCHECK_NE(0u, output_count);
866 DCHECK_GE(arraysize(inputs), input_count);
867 DCHECK_GE(arraysize(outputs), output_count);
868
869 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
870 inputs, cont);
871}
872
873} // namespace
874
875void InstructionSelectorT::VisitStackSlot(OpIndex node) {
876 const StackSlotOp& stack_slot = Cast<StackSlotOp>(node);
877 int slot = frame_->AllocateSpillSlot(stack_slot.size, stack_slot.alignment,
878 stack_slot.is_tagged);
879 OperandGenerator g(this);
880
881 Emit(kArchStackSlot, g.DefineAsRegister(node),
882 sequence()->AddImmediate(Constant(slot)), 0, nullptr);
883}
884
885void InstructionSelectorT::VisitAbortCSADcheck(OpIndex node) {
886 S390OperandGeneratorT g(this);
887 Emit(kArchAbortCSADcheck, g.NoOutput(),
888 g.UseFixed(this->input_at(node, 0), r3));
889}
890
892 InstructionCode opcode) {
893 S390OperandGeneratorT g(this);
894 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
895 InstructionOperand inputs[3];
896 size_t input_count = 0;
897 AddressingMode mode =
898 g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
900 Emit(opcode, 1, outputs, input_count, inputs);
901}
902
904 TurboshaftAdapter::LoadView view = this->load_view(node);
905 VisitLoad(node, node,
906 SelectLoadOpcode(view.ts_loaded_rep(), view.ts_result_rep()));
907}
908
909void InstructionSelectorT::VisitProtectedLoad(OpIndex node) {
910 // TODO(eholk)
912}
913
916 WriteBarrierKind write_barrier_kind = kNoWriteBarrier) {
917 S390OperandGeneratorT g(selector);
918
919 auto store_view = selector->store_view(node);
920 DCHECK_EQ(store_view.element_size_log2(), 0);
921
922 OpIndex base = store_view.base();
923 OptionalOpIndex index = store_view.index();
924 OpIndex value = store_view.value();
925 int32_t displacement = store_view.displacement();
926
927 if (write_barrier_kind != kNoWriteBarrier &&
928 !v8_flags.disable_write_barriers) {
930 // Uncompressed stores should not happen if we need a write barrier.
931 CHECK((store_view.ts_stored_rep() !=
933 (store_view.ts_stored_rep() !=
935 (store_view.ts_stored_rep() !=
937 AddressingMode addressing_mode;
938 InstructionOperand inputs[4];
939 size_t input_count = 0;
940 addressing_mode = g.GenerateMemoryOperandInputs(
942 inputs, &input_count,
944 DCHECK_LT(input_count, 4);
945 inputs[input_count++] = g.UseUniqueRegister(value);
946 RecordWriteMode record_write_mode =
947 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
948 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
949 size_t const temp_count = arraysize(temps);
950 InstructionCode code = kArchStoreWithWriteBarrier;
951 code |= AddressingModeField::encode(addressing_mode);
952 code |= RecordWriteModeField::encode(record_write_mode);
953 selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
954 } else {
955 ArchOpcode opcode;
956
957 switch (store_view.ts_stored_rep()) {
960 opcode = kS390_StoreWord8;
961 break;
964 opcode = kS390_StoreWord16;
965 break;
968 opcode = kS390_StoreWord32;
969 const Operation& reverse_op = selector->Get(value);
970 if (reverse_op.Is<Opmask::kWord32ReverseBytes>()) {
971 opcode = kS390_StoreReverse32;
972 value = selector->input_at(value, 0);
973 }
974 break;
975 }
978 opcode = kS390_StoreWord64;
979 const Operation& reverse_op = selector->Get(value);
980 if (reverse_op.Is<Opmask::kWord64ReverseBytes>()) {
981 opcode = kS390_StoreReverse64;
982 value = selector->input_at(value, 0);
983 }
984 break;
985 }
989 opcode = kS390_StoreFloat32;
990 break;
992 opcode = kS390_StoreDouble;
993 break;
997 opcode = kS390_StoreCompressTagged;
998 break;
1002 opcode = kS390_StoreWord64;
1003 break;
1005 opcode = kS390_StoreSimd128;
1006 const Operation& reverse_op = selector->Get(value);
1007 // TODO(miladfarca): Rename this to `Opmask::kSimd128ReverseBytes` once
1008 // Turboshaft naming is decoupled from Turbofan naming.
1009 if (reverse_op.Is<Opmask::kSimd128Simd128ReverseBytes>()) {
1010 opcode = kS390_StoreReverseSimd128;
1011 value = selector->input_at(value, 0);
1012 }
1013 break;
1014 }
1016 // We never store directly to protected pointers from generated code.
1017 UNREACHABLE();
1021 UNREACHABLE();
1022 }
1023
1024 InstructionOperand inputs[4];
1025 size_t input_count = 0;
1026 AddressingMode addressing_mode =
1027 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
1028 InstructionCode code =
1029 opcode | AddressingModeField::encode(addressing_mode);
1030 InstructionOperand value_operand = g.UseRegister(value);
1031 inputs[input_count++] = value_operand;
1032 selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
1033 input_count, inputs);
1034 }
1035}
1036
1037void InstructionSelectorT::VisitStorePair(OpIndex node) { UNREACHABLE(); }
1038
1039void InstructionSelectorT::VisitStore(OpIndex node) {
1040 StoreRepresentation store_rep = this->store_view(node).stored_rep();
1041 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
1042 MachineRepresentation rep = store_rep.representation();
1043
1044 if (v8_flags.enable_unconditional_write_barriers &&
1046 write_barrier_kind = kFullWriteBarrier;
1047 }
1048
1049 VisitGeneralStore(this, node, rep, write_barrier_kind);
1050}
1051
1052void InstructionSelectorT::VisitProtectedStore(OpIndex node) {
1053 // TODO(eholk)
1054 UNIMPLEMENTED();
1055}
1056
1057// Architecture supports unaligned access, therefore VisitLoad is used instead
1058void InstructionSelectorT::VisitUnalignedLoad(OpIndex node) { UNREACHABLE(); }
1059
1060// Architecture supports unaligned access, therefore VisitStore is used instead
1061void InstructionSelectorT::VisitUnalignedStore(OpIndex node) { UNREACHABLE(); }
1062
1064 OpIndex node, FlagsContinuation* cont) {
1066 OpIndex value;
1067 const auto& op = this->turboshaft_graph()
1068 ->Get(node)
1070 kind = op.kind;
1071 value = op.stack_limit();
1073 kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
1074
1075 S390OperandGeneratorT g(this);
1076
1077 // No outputs.
1078 InstructionOperand* const outputs = nullptr;
1079 const int output_count = 0;
1080
1081 // Applying an offset to this stack check requires a temp register. Offsets
1082 // are only applied to the first stack check. If applying an offset, we must
1083 // ensure the input and temp registers do not alias, thus kUniqueRegister.
1084 InstructionOperand temps[] = {g.TempRegister()};
1085 const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0;
1086 const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
1089
1090 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
1091 static constexpr int input_count = arraysize(inputs);
1092
1093 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
1094 temp_count, temps, cont);
1095}
1096
1097#if 0
1098static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
1099 int mask_width = base::bits::CountPopulation(value);
1100 int mask_msb = base::bits::CountLeadingZeros32(value);
1101 int mask_lsb = base::bits::CountTrailingZeros32(value);
1102 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
1103 return false;
1104 *mb = mask_lsb + mask_width - 1;
1105 *me = mask_lsb;
1106 return true;
1107}
1108#endif
1109
1110static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
1111 int mask_width = base::bits::CountPopulation(value);
1112 int mask_msb = base::bits::CountLeadingZeros64(value);
1113 int mask_lsb = base::bits::CountTrailingZeros64(value);
1114 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
1115 return false;
1116 *mb = mask_lsb + mask_width - 1;
1117 *me = mask_lsb;
1118 return true;
1119}
1120
1121void InstructionSelectorT::VisitWord64And(OpIndex node) {
1122 S390OperandGeneratorT g(this);
1123
1124 const WordBinopOp& bitwise_and = Get(node).Cast<WordBinopOp>();
1125 int mb = 0;
1126 int me = 0;
1127 int64_t value;
1128 if (MatchSignedIntegralConstant(bitwise_and.right(), &value) &&
1129 IsContiguousMask64(value, &mb, &me)) {
1130 int sh = 0;
1131 OpIndex left = bitwise_and.left();
1132 const Operation& lhs = Get(left);
1134 lhs.Is<Opmask::kWord64ShiftLeft>()) &&
1135 CanCover(node, left)) {
1136 // Try to absorb left/right shift into rldic
1137 int64_t shift_by;
1138 const ShiftOp& shift_op = lhs.Cast<ShiftOp>();
1139 if (MatchIntegralWord64Constant(shift_op.right(), &shift_by) &&
1140 base::IsInRange(shift_by, 0, 63)) {
1141 left = shift_op.left();
1142 sh = shift_by;
1144 // Adjust the mask such that it doesn't include any rotated bits.
1145 if (mb > 63 - sh) mb = 63 - sh;
1146 sh = (64 - sh) & 0x3F;
1147 } else {
1148 // Adjust the mask such that it doesn't include any rotated bits.
1149 if (me < sh) me = sh;
1150 }
1151 }
1152 }
1153 if (mb >= me) {
1154 bool match = false;
1156 int mask;
1157 if (me == 0) {
1158 match = true;
1159 opcode = kS390_RotLeftAndClearLeft64;
1160 mask = mb;
1161 } else if (mb == 63) {
1162 match = true;
1163 opcode = kS390_RotLeftAndClearRight64;
1164 mask = me;
1165 } else if (sh && me <= sh && lhs.Is<Opmask::kWord64ShiftLeft>()) {
1166 match = true;
1167 opcode = kS390_RotLeftAndClear64;
1168 mask = mb;
1169 }
1170 if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1171 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
1172 g.TempImmediate(sh), g.TempImmediate(mask));
1173 return;
1174 }
1175 }
1176 }
1177 VisitWord64BinOp(this, node, kS390_And64, And64OperandMode);
1178}
1179
1180void InstructionSelectorT::VisitWord64Shl(OpIndex node) {
1181 S390OperandGeneratorT g(this);
1182 const ShiftOp& shl = this->Get(node).template Cast<ShiftOp>();
1183 const Operation& lhs = this->Get(shl.left());
1184 int64_t value;
1185 if (lhs.Is<Opmask::kWord64BitwiseAnd>() &&
1186 MatchSignedIntegralConstant(shl.right(), &value) &&
1187 base::IsInRange(value, 0, 63)) {
1188 int sh = value;
1189 int mb;
1190 int me;
1191 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
1192 int64_t right_value;
1193 if (MatchSignedIntegralConstant(bitwise_and.right(), &right_value) &&
1194 IsContiguousMask64(right_value << sh, &mb, &me)) {
1195 // Adjust the mask such that it doesn't include any rotated bits.
1196 if (me < sh) me = sh;
1197 if (mb >= me) {
1198 bool match = false;
1200 int mask;
1201 if (me == 0) {
1202 match = true;
1203 opcode = kS390_RotLeftAndClearLeft64;
1204 mask = mb;
1205 } else if (mb == 63) {
1206 match = true;
1207 opcode = kS390_RotLeftAndClearRight64;
1208 mask = me;
1209 } else if (sh && me <= sh) {
1210 match = true;
1211 opcode = kS390_RotLeftAndClear64;
1212 mask = mb;
1213 }
1214 if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1215 Emit(opcode, g.DefineAsRegister(node),
1216 g.UseRegister(bitwise_and.left()), g.TempImmediate(sh),
1217 g.TempImmediate(mask));
1218 return;
1219 }
1220 }
1221 }
1222 }
1223 VisitWord64BinOp(this, node, kS390_ShiftLeft64, Shift64OperandMode);
1224}
1225
1226void InstructionSelectorT::VisitWord64Shr(OpIndex node) {
1227 S390OperandGeneratorT g(this);
1228 const ShiftOp& shr = this->Get(node).template Cast<ShiftOp>();
1229 const Operation& lhs = this->Get(shr.left());
1230 int64_t value;
1231 if (lhs.Is<Opmask::kWord64BitwiseAnd>() &&
1232 MatchSignedIntegralConstant(shr.right(), &value) &&
1233 base::IsInRange(value, 0, 63)) {
1234 int sh = value;
1235 int mb;
1236 int me;
1237 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
1238 uint64_t right_value;
1239 if (MatchUnsignedIntegralConstant(bitwise_and.right(), &right_value) &&
1240 IsContiguousMask64(static_cast<uint64_t>(right_value >> sh), &mb,
1241 &me)) {
1242 // Adjust the mask such that it doesn't include any rotated bits.
1243 if (mb > 63 - sh) mb = 63 - sh;
1244 sh = (64 - sh) & 0x3F;
1245 if (mb >= me) {
1246 bool match = false;
1248 int mask;
1249 if (me == 0) {
1250 match = true;
1251 opcode = kS390_RotLeftAndClearLeft64;
1252 mask = mb;
1253 } else if (mb == 63) {
1254 match = true;
1255 opcode = kS390_RotLeftAndClearRight64;
1256 mask = me;
1257 }
1258 if (match) {
1259 Emit(opcode, g.DefineAsRegister(node),
1260 g.UseRegister(bitwise_and.left()), g.TempImmediate(sh),
1261 g.TempImmediate(mask));
1262 return;
1263 }
1264 }
1265 }
1266 }
1267 VisitWord64BinOp(this, node, kS390_ShiftRight64, Shift64OperandMode);
1268}
1269
1271 InstructionSelectorT* selector, OpIndex node) {
1272 S390OperandGeneratorT g(selector);
1273
1274 const ShiftOp& sar = selector->Get(node).template Cast<ShiftOp>();
1275 const Operation& lhs = selector->Get(sar.left());
1276 if (selector->CanCover(node, sar.left()) &&
1278 const ShiftOp& shl = lhs.Cast<ShiftOp>();
1279 uint64_t sar_value;
1280 uint64_t shl_value;
1281 if (selector->MatchUnsignedIntegralConstant(sar.right(), &sar_value) &&
1282 selector->MatchUnsignedIntegralConstant(shl.right(), &shl_value)) {
1283 uint32_t sar_by = sar_value;
1284 uint32_t shl_by = shl_value;
1285 if ((sar_by == shl_by) && (sar_by == 16)) {
1286 bool canEliminateZeroExt = ProduceWord32Result(selector, shl.left());
1287 selector->Emit(kS390_SignExtendWord16ToInt32,
1288 canEliminateZeroExt ? g.DefineSameAsFirst(node)
1289 : g.DefineAsRegister(node),
1290 g.UseRegister(shl.left()),
1291 g.TempImmediate(!canEliminateZeroExt));
1292 return true;
1293 } else if ((sar_by == shl_by) && (sar_by == 24)) {
1294 bool canEliminateZeroExt = ProduceWord32Result(selector, shl.left());
1295 selector->Emit(kS390_SignExtendWord8ToInt32,
1296 canEliminateZeroExt ? g.DefineSameAsFirst(node)
1297 : g.DefineAsRegister(node),
1298 g.UseRegister(shl.left()),
1299 g.TempImmediate(!canEliminateZeroExt));
1300 return true;
1301 }
1302 }
1303 }
1304 return false;
1305}
1306
1307void InstructionSelectorT::VisitWord32Rol(OpIndex node) { UNREACHABLE(); }
1308
1309void InstructionSelectorT::VisitWord64Rol(OpIndex node) { UNREACHABLE(); }
1310
1311void InstructionSelectorT::VisitWord32Ctz(OpIndex node) { UNREACHABLE(); }
1312
1313void InstructionSelectorT::VisitWord64Ctz(OpIndex node) { UNREACHABLE(); }
1314
1315void InstructionSelectorT::VisitWord32ReverseBits(OpIndex node) {
1316 UNREACHABLE();
1317}
1318
1319void InstructionSelectorT::VisitWord64ReverseBits(OpIndex node) {
1320 UNREACHABLE();
1321}
1322
1323void InstructionSelectorT::VisitInt32AbsWithOverflow(OpIndex node) {
1324 VisitWord32UnaryOp(this, node, kS390_Abs32, OperandMode::kNone);
1325}
1326
1327void InstructionSelectorT::VisitInt64AbsWithOverflow(OpIndex node) {
1328 VisitWord64UnaryOp(this, node, kS390_Abs64, OperandMode::kNone);
1329}
1330
1331void InstructionSelectorT::VisitWord64ReverseBytes(OpIndex node) {
1332 S390OperandGeneratorT g(this);
1333 OpIndex input = this->Get(node).input(0);
1334 const Operation& input_op = this->Get(input);
1335 if (CanCover(node, input) && input_op.Is<LoadOp>()) {
1336 auto load = this->load_view(input);
1337 LoadRepresentation load_rep = load.loaded_rep();
1338 if (load_rep.representation() == MachineRepresentation::kWord64) {
1339 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
1340 InstructionOperand inputs[3];
1341 size_t input_count = 0;
1342 AddressingMode mode =
1343 g.GetEffectiveAddressMemoryOperand(input, inputs, &input_count);
1344 Emit(kS390_LoadReverse64 | AddressingModeField::encode(mode), 1, outputs,
1345 input_count, inputs);
1346 return;
1347 }
1348 }
1349 Emit(kS390_LoadReverse64RR, g.DefineAsRegister(node),
1350 g.UseRegister(this->input_at(node, 0)));
1351}
1352
1353void InstructionSelectorT::VisitWord32ReverseBytes(OpIndex node) {
1354 S390OperandGeneratorT g(this);
1355 OpIndex input = this->Get(node).input(0);
1356 const Operation& input_op = this->Get(input);
1357 if (CanCover(node, input) && input_op.Is<LoadOp>()) {
1358 auto load = this->load_view(input);
1359 LoadRepresentation load_rep = load.loaded_rep();
1360 if (load_rep.representation() == MachineRepresentation::kWord32) {
1361 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
1362 InstructionOperand inputs[3];
1363 size_t input_count = 0;
1364 AddressingMode mode =
1365 g.GetEffectiveAddressMemoryOperand(input, inputs, &input_count);
1366 Emit(kS390_LoadReverse32 | AddressingModeField::encode(mode), 1, outputs,
1367 input_count, inputs);
1368 return;
1369 }
1370 }
1371 Emit(kS390_LoadReverse32RR, g.DefineAsRegister(node),
1372 g.UseRegister(this->input_at(node, 0)));
1373}
1374
1375void InstructionSelectorT::VisitSimd128ReverseBytes(OpIndex node) {
1376 S390OperandGeneratorT g(this);
1377 OpIndex input = this->Get(node).input(0);
1378 const Operation& input_op = this->Get(input);
1379 if (CanCover(node, input) && input_op.Is<LoadOp>()) {
1380 auto load = this->load_view(input);
1381 LoadRepresentation load_rep = load.loaded_rep();
1382 if (load_rep.representation() == MachineRepresentation::kSimd128) {
1383 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
1384 InstructionOperand inputs[3];
1385 size_t input_count = 0;
1386 AddressingMode mode =
1387 g.GetEffectiveAddressMemoryOperand(input, inputs, &input_count);
1388 Emit(kS390_LoadReverseSimd128 | AddressingModeField::encode(mode), 1,
1389 outputs, input_count, inputs);
1390 return;
1391 }
1392 }
1393 Emit(kS390_LoadReverseSimd128RR, g.DefineAsRegister(node),
1394 g.UseRegister(this->input_at(node, 0)));
1395}
1396
1397template <class Matcher, ArchOpcode neg_opcode>
1398static inline bool TryMatchNegFromSub(InstructionSelectorT* selector,
1399 OpIndex node) {
1400 S390OperandGeneratorT g(selector);
1401 static_assert(neg_opcode == kS390_Neg32 || neg_opcode == kS390_Neg64,
1402 "Provided opcode is not a Neg opcode.");
1403 const WordBinopOp& sub_op = selector->Get(node).template Cast<WordBinopOp>();
1404 if (selector->MatchIntegralZero(sub_op.left())) {
1405 OpIndex value = sub_op.right();
1406 bool doZeroExt = DoZeroExtForResult(selector, node);
1407 bool canEliminateZeroExt = ProduceWord32Result(selector, value);
1408 if (doZeroExt) {
1409 selector->Emit(neg_opcode,
1410 canEliminateZeroExt ? g.DefineSameAsFirst(node)
1411 : g.DefineAsRegister(node),
1412 g.UseRegister(value),
1413 g.TempImmediate(!canEliminateZeroExt));
1414 } else {
1415 selector->Emit(neg_opcode, g.DefineAsRegister(node),
1416 g.UseRegister(value));
1417 }
1418 return true;
1419 }
1420 return false;
1421}
1422
1423template <class Matcher, ArchOpcode shift_op>
1425 S390OperandGeneratorT g(selector);
1426 const Operation& op = selector->Get(node);
1427 const WordBinopOp& mul_op = op.Cast<WordBinopOp>();
1428 OpIndex left = mul_op.left();
1429 OpIndex right = mul_op.right();
1430 if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
1432 int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
1433 bool doZeroExt = DoZeroExtForResult(selector, node);
1434 bool canEliminateZeroExt = ProduceWord32Result(selector, left);
1435 InstructionOperand dst = (doZeroExt && !canEliminateZeroExt &&
1436 CpuFeatures::IsSupported(DISTINCT_OPS))
1437 ? g.DefineAsRegister(node)
1438 : g.DefineSameAsFirst(node);
1439
1440 if (doZeroExt) {
1441 selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power),
1442 g.TempImmediate(!canEliminateZeroExt));
1443 } else {
1444 selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power));
1445 }
1446 return true;
1447 }
1448 return false;
1449}
1450
1451template <ArchOpcode opcode>
1453 OpIndex node,
1454 OperandModes mode) {
1455 OptionalOpIndex ovf = selector->FindProjection(node, 1);
1456 if (ovf.valid()) {
1457 FlagsContinuationT cont =
1459 VisitWord32BinOp(selector, node, opcode, mode, &cont);
1460 return true;
1461 }
1462 return false;
1463}
1464
1466 OpIndex node) {
1467 return TryMatchInt32OpWithOverflow<kS390_Add32>(selector, node,
1469}
1470
1472 OpIndex node) {
1473 return TryMatchInt32OpWithOverflow<kS390_Sub32>(selector, node,
1475}
1476
1478 OpIndex node) {
1479 OptionalOpIndex ovf = selector->FindProjection(node, 1);
1480 if (ovf.valid()) {
1481 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1484 } else {
1485 FlagsContinuationT cont =
1487 VisitWord32BinOp(selector, node, kS390_Mul32WithOverflow,
1489 &cont);
1490 }
1491 return true;
1492 }
1494 node);
1495}
1496
1497template <ArchOpcode opcode>
1499 OpIndex node,
1500 OperandModes mode) {
1501 OptionalOpIndex ovf = selector->FindProjection(node, 1);
1502 if (ovf.valid()) {
1503 FlagsContinuationT cont =
1505 VisitWord64BinOp(selector, node, opcode, mode, &cont);
1506 return true;
1507 }
1508 return false;
1509}
1510
1512 OpIndex node) {
1513 return TryMatchInt64OpWithOverflow<kS390_Add64>(selector, node,
1515}
1516
1518 OpIndex node) {
1519 return TryMatchInt64OpWithOverflow<kS390_Sub64>(selector, node,
1521}
1522
1524 FlagsContinuationT* cont) {
1525 S390OperandGeneratorT g(selector);
1526 OpIndex lhs = selector->input_at(node, 0);
1527 OpIndex rhs = selector->input_at(node, 1);
1528 InstructionOperand inputs[2];
1529 size_t input_count = 0;
1530 InstructionOperand outputs[1];
1531 size_t output_count = 0;
1532
1533 inputs[input_count++] = g.UseUniqueRegister(lhs);
1534 inputs[input_count++] = g.UseUniqueRegister(rhs);
1535 outputs[output_count++] = g.DefineAsRegister(node);
1536 selector->EmitWithContinuation(kS390_Mul64WithOverflow, output_count, outputs,
1537 input_count, inputs, cont);
1538}
1539
1541 InstructionSelectorT* selector, OpIndex node) {
1542 UNIMPLEMENTED();
1543}
1544
1545#define null ([]() { return false; })
1546
1547#define FLOAT_UNARY_OP_LIST(V) \
1548 V(Float64, TruncateFloat64ToUint32, kS390_DoubleToUint32, \
1549 OperandMode::kNone, null) \
1550 V(Float64, Float64SilenceNaN, kS390_Float64SilenceNaN, OperandMode::kNone, \
1551 null) \
1552 V(Float64, Float64Sqrt, kS390_SqrtDouble, OperandMode::kNone, null) \
1553 V(Float64, Float64RoundUp, kS390_CeilDouble, OperandMode::kNone, null) \
1554 V(Float64, Float64RoundTruncate, kS390_TruncateDouble, OperandMode::kNone, \
1555 null) \
1556 V(Float64, Float64RoundTiesEven, kS390_DoubleNearestInt, OperandMode::kNone, \
1557 null) \
1558 V(Float64, Float64RoundTiesAway, kS390_RoundDouble, OperandMode::kNone, \
1559 null) \
1560 V(Float64, Float64RoundDown, kS390_FloorDouble, OperandMode::kNone, null) \
1561 V(Float64, Float64Neg, kS390_NegDouble, OperandMode::kNone, null) \
1562 V(Float64, Float64Abs, kS390_AbsDouble, OperandMode::kNone, null) \
1563 V(Float32, Float32Sqrt, kS390_SqrtFloat, OperandMode::kNone, null) \
1564 V(Float32, Float32RoundUp, kS390_CeilFloat, OperandMode::kNone, null) \
1565 V(Float32, Float32RoundTruncate, kS390_TruncateFloat, OperandMode::kNone, \
1566 null) \
1567 V(Float32, Float32RoundTiesEven, kS390_FloatNearestInt, OperandMode::kNone, \
1568 null) \
1569 V(Float32, Float32RoundDown, kS390_FloorFloat, OperandMode::kNone, null) \
1570 V(Float32, Float32Neg, kS390_NegFloat, OperandMode::kNone, null) \
1571 V(Float32, Float32Abs, kS390_AbsFloat, OperandMode::kNone, null) \
1572 V(Float64, BitcastFloat64ToInt64, kS390_BitcastDoubleToInt64, \
1573 OperandMode::kNone, null) \
1574 V(Float32, BitcastFloat32ToInt32, kS390_BitcastFloat32ToInt32, \
1575 OperandMode::kAllowRM, null) \
1576 V(Word32, Float64ExtractHighWord32, kS390_DoubleExtractHighWord32, \
1577 OperandMode::kNone, null) \
1578 /* TODO(john.yan): can use kAllowRM */ \
1579 V(Word32, Float64ExtractLowWord32, kS390_DoubleExtractLowWord32, \
1580 OperandMode::kNone, null) \
1581 V(Float64, ChangeFloat64ToUint64, kS390_DoubleToUint64, OperandMode::kNone, \
1582 null) \
1583 V(Float64, ChangeFloat64ToInt64, kS390_DoubleToInt64, OperandMode::kNone, \
1584 null) \
1585 V(Float64, ChangeFloat64ToUint32, kS390_DoubleToUint32, OperandMode::kNone, \
1586 null) \
1587 V(Float64, ChangeFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, \
1588 null) \
1589 V(Float64, TruncateFloat64ToInt64, kS390_DoubleToInt64, OperandMode::kNone, \
1590 null) \
1591 V(Float64, TruncateFloat64ToFloat32, kS390_DoubleToFloat32, \
1592 OperandMode::kNone, null) \
1593 V(Float64, TruncateFloat64ToWord32, kArchTruncateDoubleToI, \
1594 OperandMode::kNone, null) \
1595 V(Float32, ChangeFloat32ToFloat64, kS390_Float32ToDouble, \
1596 OperandMode::kAllowRM, null) \
1597 V(Float64, RoundFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, null)
1598
1599#define FLOAT_BIN_OP_LIST(V) \
1600 V(Float64, Float64Mul, kS390_MulDouble, OperandMode::kAllowRM, null) \
1601 V(Float64, Float64Add, kS390_AddDouble, OperandMode::kAllowRM, null) \
1602 V(Float64, Float64Min, kS390_MinDouble, OperandMode::kNone, null) \
1603 V(Float64, Float64Max, kS390_MaxDouble, OperandMode::kNone, null) \
1604 V(Float32, Float32Min, kS390_MinFloat, OperandMode::kNone, null) \
1605 V(Float32, Float32Max, kS390_MaxFloat, OperandMode::kNone, null) \
1606 V(Float32, Float32Div, kS390_DivFloat, OperandMode::kAllowRM, null) \
1607 V(Float32, Float32Mul, kS390_MulFloat, OperandMode::kAllowRM, null) \
1608 V(Float32, Float32Sub, kS390_SubFloat, OperandMode::kAllowRM, null) \
1609 V(Float32, Float32Add, kS390_AddFloat, OperandMode::kAllowRM, null) \
1610 V(Float64, Float64Sub, kS390_SubDouble, OperandMode::kAllowRM, null) \
1611 V(Float64, Float64Div, kS390_DivDouble, OperandMode::kAllowRM, null)
1612
1613#define WORD32_UNARY_OP_LIST(V) \
1614 V(Word32, SignExtendWord32ToInt64, kS390_SignExtendWord32ToInt64, \
1615 OperandMode::kNone, null) \
1616 V(Word32, SignExtendWord16ToInt64, kS390_SignExtendWord16ToInt64, \
1617 OperandMode::kNone, null) \
1618 V(Word32, SignExtendWord8ToInt64, kS390_SignExtendWord8ToInt64, \
1619 OperandMode::kNone, null) \
1620 V(Word32, SignExtendWord16ToInt32, kS390_SignExtendWord16ToInt32, \
1621 OperandMode::kNone, null) \
1622 V(Word32, SignExtendWord8ToInt32, kS390_SignExtendWord8ToInt32, \
1623 OperandMode::kNone, null) \
1624 V(Word32, Word32Popcnt, kS390_Popcnt32, OperandMode::kNone, null) \
1625 V(Word32, Word32Clz, kS390_Cntlz32, OperandMode::kNone, null) \
1626 V(Word32, BitcastInt32ToFloat32, kS390_BitcastInt32ToFloat32, \
1627 OperandMode::kNone, null) \
1628 V(Word32, ChangeUint32ToFloat64, kS390_Uint32ToDouble, OperandMode::kNone, \
1629 null) \
1630 V(Word32, RoundUint32ToFloat32, kS390_Uint32ToFloat32, OperandMode::kNone, \
1631 null) \
1632 V(Word32, RoundInt32ToFloat32, kS390_Int32ToFloat32, OperandMode::kNone, \
1633 null) \
1634 V(Word32, ChangeInt32ToFloat64, kS390_Int32ToDouble, OperandMode::kNone, \
1635 null) \
1636 V(Word32, ChangeInt32ToInt64, kS390_SignExtendWord32ToInt64, \
1637 OperandMode::kNone, null) \
1638 V(Word32, ChangeUint32ToUint64, kS390_Uint32ToUint64, OperandMode::kNone, \
1639 [&]() -> bool { \
1640 if (ProduceWord32Result(this, this->input_at(node, 0))) { \
1641 EmitIdentity(node); \
1642 return true; \
1643 } \
1644 return false; \
1645 })
1646
1647#define WORD32_BIN_OP_LIST(V) \
1648 V(Word32, Float64InsertHighWord32, kS390_DoubleInsertHighWord32, \
1649 OperandMode::kAllowRRR, \
1650 [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); }) \
1651 V(Word32, Float64InsertLowWord32, kS390_DoubleInsertLowWord32, \
1652 OperandMode::kAllowRRR, \
1653 [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); }) \
1654 V(Word32, Int32SubWithOverflow, kS390_Sub32, SubOperandMode, \
1655 ([&]() { return TryMatchInt32SubWithOverflow(this, node); })) \
1656 V(Word32, Uint32MulHigh, kS390_MulHighU32, \
1657 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1658 V(Word32, Uint32Mod, kS390_ModU32, \
1659 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1660 V(Word32, Uint32Div, kS390_DivU32, \
1661 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1662 V(Word32, Int32Mod, kS390_Mod32, \
1663 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1664 V(Word32, Int32Div, kS390_Div32, \
1665 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1666 V(Word32, Int32Mul, kS390_Mul32, MulOperandMode, ([&]() { \
1667 return TryMatchShiftFromMul<Int32BinopMatcher, kS390_ShiftLeft32>(this, \
1668 node); \
1669 })) \
1670 V(Word32, Int32MulHigh, kS390_MulHigh32, \
1671 OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps, null) \
1672 V(Word32, Int32Sub, kS390_Sub32, SubOperandMode, ([&]() { \
1673 return TryMatchNegFromSub<Int32BinopMatcher, kS390_Neg32>(this, node); \
1674 })) \
1675 V(Word32, Int32Add, kS390_Add32, AddOperandMode, null) \
1676 V(Word32, Word32Xor, kS390_Xor32, Xor32OperandMode, null) \
1677 V(Word32, Word32Ror, kS390_RotRight32, \
1678 OperandMode::kAllowRI | OperandMode::kAllowRRR | OperandMode::kAllowRRI | \
1679 OperandMode::kShift32Imm, \
1680 null) \
1681 V(Word32, Word32Shr, kS390_ShiftRight32, Shift32OperandMode, null) \
1682 V(Word32, Word32Shl, kS390_ShiftLeft32, Shift32OperandMode, null) \
1683 V(Word32, Int32AddWithOverflow, kS390_Add32, AddOperandMode, \
1684 ([&]() { return TryMatchInt32AddWithOverflow(this, node); })) \
1685 V(Word32, Int32MulWithOverflow, kS390_Mul32, MulOperandMode, \
1686 ([&]() { return TryMatchInt32MulWithOverflow(this, node); })) \
1687 V(Word32, Word32And, kS390_And32, And32OperandMode, null) \
1688 V(Word32, Word32Or, kS390_Or32, Or32OperandMode, null) \
1689 V(Word32, Word32Sar, kS390_ShiftRightArith32, Shift32OperandMode, \
1690 [&]() { return TryMatchSignExtInt16OrInt8FromWord32Sar(this, node); })
1691
1692#define WORD64_UNARY_OP_LIST(V) \
1693 V(Word64, TruncateInt64ToInt32, kS390_Int64ToInt32, OperandMode::kNone, \
1694 null) \
1695 V(Word64, Word64Clz, kS390_Cntlz64, OperandMode::kNone, null) \
1696 V(Word64, Word64Popcnt, kS390_Popcnt64, OperandMode::kNone, null) \
1697 V(Word64, Int64SubWithOverflow, kS390_Sub64, SubOperandMode, \
1698 ([&]() { return TryMatchInt64SubWithOverflow(this, node); })) \
1699 V(Word64, BitcastInt64ToFloat64, kS390_BitcastInt64ToDouble, \
1700 OperandMode::kNone, null) \
1701 V(Word64, ChangeInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, \
1702 null) \
1703 V(Word64, RoundUint64ToFloat64, kS390_Uint64ToDouble, OperandMode::kNone, \
1704 null) \
1705 V(Word64, RoundUint64ToFloat32, kS390_Uint64ToFloat32, OperandMode::kNone, \
1706 null) \
1707 V(Word64, RoundInt64ToFloat32, kS390_Int64ToFloat32, OperandMode::kNone, \
1708 null) \
1709 V(Word64, RoundInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, null)
1710
1711#define WORD64_BIN_OP_LIST(V) \
1712 V(Word64, Int64AddWithOverflow, kS390_Add64, AddOperandMode, \
1713 ([&]() { return TryMatchInt64AddWithOverflow(this, node); })) \
1714 V(Word64, Uint64MulHigh, kS390_MulHighU64, OperandMode::kAllowRRR, null) \
1715 V(Word64, Uint64Mod, kS390_ModU64, \
1716 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1717 V(Word64, Uint64Div, kS390_DivU64, \
1718 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1719 V(Word64, Int64Mod, kS390_Mod64, \
1720 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1721 V(Word64, Int64Div, kS390_Div64, \
1722 OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1723 V(Word64, Int64MulHigh, kS390_MulHighS64, OperandMode::kAllowRRR, null) \
1724 V(Word64, Int64Mul, kS390_Mul64, MulOperandMode, ([&]() { \
1725 return TryMatchShiftFromMul<Int64BinopMatcher, kS390_ShiftLeft64>(this, \
1726 node); \
1727 })) \
1728 V(Word64, Int64Sub, kS390_Sub64, SubOperandMode, ([&]() { \
1729 return TryMatchNegFromSub<Int64BinopMatcher, kS390_Neg64>(this, node); \
1730 })) \
1731 V(Word64, Word64Xor, kS390_Xor64, Xor64OperandMode, null) \
1732 V(Word64, Word64Or, kS390_Or64, Or64OperandMode, null) \
1733 V(Word64, Word64Ror, kS390_RotRight64, Shift64OperandMode, null) \
1734 V(Word64, Int64Add, kS390_Add64, AddOperandMode, null) \
1735 V(Word64, Word64Sar, kS390_ShiftRightArith64, Shift64OperandMode, null)
1736
1737#define DECLARE_UNARY_OP(type, name, op, mode, try_extra) \
1738 void InstructionSelectorT::Visit##name(OpIndex node) { \
1739 if (std::function<bool()>(try_extra)()) return; \
1740 Visit##type##UnaryOp(this, node, op, mode); \
1741 }
1742
1743#define DECLARE_BIN_OP(type, name, op, mode, try_extra) \
1744 void InstructionSelectorT::Visit##name(OpIndex node) { \
1745 if (std::function<bool()>(try_extra)()) return; \
1746 Visit##type##BinOp(this, node, op, mode); \
1747 }
1748
1755
1756#undef FLOAT_UNARY_OP_LIST
1757#undef FLOAT_BIN_OP_LIST
1758#undef WORD32_UNARY_OP_LIST
1759#undef WORD32_BIN_OP_LIST
1760#undef WORD64_UNARY_OP_LIST
1761#undef WORD64_BIN_OP_LIST
1762#undef DECLARE_UNARY_OP
1763#undef DECLARE_BIN_OP
1764#undef null
1765
1766void InstructionSelectorT::VisitTryTruncateFloat32ToInt64(OpIndex node) {
1767 VisitTryTruncateDouble(this, kS390_Float32ToInt64, node);
1768}
1769
1770void InstructionSelectorT::VisitTryTruncateFloat64ToInt64(OpIndex node) {
1771 VisitTryTruncateDouble(this, kS390_DoubleToInt64, node);
1772}
1773
1774void InstructionSelectorT::VisitTryTruncateFloat32ToUint64(OpIndex node) {
1775 VisitTryTruncateDouble(this, kS390_Float32ToUint64, node);
1776}
1777
1778void InstructionSelectorT::VisitTryTruncateFloat64ToUint64(OpIndex node) {
1779 VisitTryTruncateDouble(this, kS390_DoubleToUint64, node);
1780}
1781
1782void InstructionSelectorT::VisitTryTruncateFloat64ToInt32(OpIndex node) {
1783 VisitTryTruncateDouble(this, kS390_DoubleToInt32, node);
1784}
1785
1786void InstructionSelectorT::VisitTryTruncateFloat64ToUint32(OpIndex node) {
1787 VisitTryTruncateDouble(this, kS390_DoubleToUint32, node);
1788}
1789
1790void InstructionSelectorT::VisitBitcastWord32ToWord64(OpIndex node) {
1793 EmitIdentity(node);
1794}
1795
1796void InstructionSelectorT::VisitFloat64Mod(OpIndex node) {
1797 S390OperandGeneratorT g(this);
1798 Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
1799 g.UseFixed(this->input_at(node, 0), d1),
1800 g.UseFixed(this->input_at(node, 1), d2))
1801 ->MarkAsCall();
1802}
1803
1805 InstructionCode opcode) {
1806 S390OperandGeneratorT g(this);
1807 Emit(opcode, g.DefineAsFixed(node, d1),
1808 g.UseFixed(this->input_at(node, 0), d1))
1809 ->MarkAsCall();
1810}
1811
1813 InstructionCode opcode) {
1814 S390OperandGeneratorT g(this);
1815 Emit(opcode, g.DefineAsFixed(node, d1),
1816 g.UseFixed(this->input_at(node, 0), d1),
1817 g.UseFixed(this->input_at(node, 1), d2))
1818 ->MarkAsCall();
1819}
1820
1821void InstructionSelectorT::VisitInt64MulWithOverflow(OpIndex node) {
1822 OptionalOpIndex ovf = FindProjection(node, 1);
1823 if (ovf.valid()) {
1825 CpuFeatures::IsSupported(MISC_INSTR_EXT2) ? kOverflow : kNotEqual,
1826 ovf.value());
1827 return EmitInt64MulWithOverflow(this, node, &cont);
1828 }
1829 FlagsContinuation cont;
1830 EmitInt64MulWithOverflow(this, node, &cont);
1831}
1832
1834 switch (cont->condition()) {
1835 case kUnsignedLessThan:
1839 return true;
1840 default:
1841 return false;
1842 }
1843 UNREACHABLE();
1844}
1845
1846namespace {
1847
1848// Shared routine for multiple compare operations.
1849void VisitCompare(InstructionSelectorT* selector, InstructionCode opcode,
1850 InstructionOperand left, InstructionOperand right,
1851 FlagsContinuationT* cont) {
1852 selector->EmitWithContinuation(opcode, left, right, cont);
1853}
1854
1855void VisitLoadAndTest(InstructionSelectorT* selector, InstructionCode opcode,
1856 OpIndex node, OpIndex value, FlagsContinuationT* cont,
1857 bool discard_output = false);
1858
1859// Shared routine for multiple word compare operations.
1860void VisitWordCompare(InstructionSelectorT* selector, OpIndex node,
1861 InstructionCode opcode, FlagsContinuationT* cont,
1862 OperandModes immediate_mode) {
1863 S390OperandGeneratorT g(selector);
1864 OpIndex lhs = selector->input_at(node, 0);
1865 OpIndex rhs = selector->input_at(node, 1);
1866
1867 const Operation& op = selector->Get(node);
1868 DCHECK(op.Is<ComparisonOp>() || op.Is<Opmask::kWord32Sub>() ||
1869 op.Is<Opmask::kWord64Sub>());
1870 USE(op);
1871
1872 InstructionOperand inputs[8];
1873 InstructionOperand outputs[1];
1874 size_t input_count = 0;
1875 size_t output_count = 0;
1876
1877 // If one of the two inputs is an immediate, make sure it's on the right, or
1878 // if one of the two inputs is a memory operand, make sure it's on the left.
1879 int effect_level = selector->GetEffectLevel(node, cont);
1880
1881 if ((!g.CanBeImmediate(rhs, immediate_mode) &&
1882 g.CanBeImmediate(lhs, immediate_mode)) ||
1883 (!g.CanBeMemoryOperand(opcode, node, rhs, effect_level) &&
1884 g.CanBeMemoryOperand(opcode, node, lhs, effect_level))) {
1885 if (!selector->IsCommutative(node)) cont->Commute();
1886 std::swap(lhs, rhs);
1887 }
1888
1889 // check if compare with 0
1890 if (g.CanBeImmediate(rhs, immediate_mode) && g.GetImmediate(rhs) == 0) {
1891 DCHECK(opcode == kS390_Cmp32 || opcode == kS390_Cmp64);
1892 ArchOpcode load_and_test = (opcode == kS390_Cmp32)
1893 ? kS390_LoadAndTestWord32
1894 : kS390_LoadAndTestWord64;
1895 return VisitLoadAndTest(selector, load_and_test, node, lhs, cont, true);
1896 }
1897
1898 inputs[input_count++] = g.UseRegister(lhs);
1899 if (g.CanBeMemoryOperand(opcode, node, rhs, effect_level)) {
1900 // generate memory operand
1901 AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
1902 rhs, inputs, &input_count, OpcodeImmMode(opcode));
1903 opcode |= AddressingModeField::encode(addressing_mode);
1904 } else if (g.CanBeImmediate(rhs, immediate_mode)) {
1905 inputs[input_count++] = g.UseImmediate(rhs);
1906 } else {
1907 inputs[input_count++] = g.UseAnyExceptImmediate(rhs);
1908 }
1909
1910 DCHECK(input_count <= 8 && output_count <= 1);
1911 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
1912 inputs, cont);
1913}
1914
1915void VisitWord32Compare(InstructionSelectorT* selector, OpIndex node,
1916 FlagsContinuationT* cont) {
1917 OperandModes mode =
1919 VisitWordCompare(selector, node, kS390_Cmp32, cont, mode);
1920}
1921
1922void VisitWord64Compare(InstructionSelectorT* selector, OpIndex node,
1923 FlagsContinuationT* cont) {
1924 OperandModes mode =
1926 VisitWordCompare(selector, node, kS390_Cmp64, cont, mode);
1927}
1928
1929// Shared routine for multiple float32 compare operations.
1930void VisitFloat32Compare(InstructionSelectorT* selector, OpIndex node,
1931 FlagsContinuationT* cont) {
1932 VisitWordCompare(selector, node, kS390_CmpFloat, cont, OperandMode::kNone);
1933}
1934
1935// Shared routine for multiple float64 compare operations.
1936void VisitFloat64Compare(InstructionSelectorT* selector, OpIndex node,
1937 FlagsContinuationT* cont) {
1938 VisitWordCompare(selector, node, kS390_CmpDouble, cont, OperandMode::kNone);
1939}
1940
1941void VisitTestUnderMask(InstructionSelectorT* selector, OpIndex node,
1942 FlagsContinuationT* cont) {
1943 const Operation& op = selector->Get(node);
1945 op.Is<Opmask::kWord64BitwiseAnd>());
1946 USE(op);
1947
1948 ArchOpcode opcode;
1949 if (selector->Get(node).template TryCast<Opmask::kWord32BitwiseAnd>()) {
1950 opcode = kS390_Tst32;
1951 } else {
1952 opcode = kS390_Tst64;
1953 }
1954
1955 S390OperandGeneratorT g(selector);
1956 OpIndex lhs = selector->input_at(node, 0);
1957 OpIndex rhs = selector->input_at(node, 1);
1958 if (!g.CanBeImmediate(rhs, OperandMode::kUint32Imm) &&
1959 g.CanBeImmediate(lhs, OperandMode::kUint32Imm)) {
1960 std::swap(lhs, rhs);
1961 }
1962 VisitCompare(selector, opcode, g.UseRegister(lhs),
1963 g.UseOperand(rhs, OperandMode::kUint32Imm), cont);
1964}
1965
1966void VisitLoadAndTest(InstructionSelectorT* selector, InstructionCode opcode,
1967 OpIndex node, OpIndex value, FlagsContinuationT* cont,
1968 bool discard_output) {
1969 static_assert(kS390_LoadAndTestFloat64 - kS390_LoadAndTestWord32 == 3,
1970 "LoadAndTest Opcode shouldn't contain other opcodes.");
1971 // TODO(john.yan): Add support for Float32/Float64.
1972 DCHECK(opcode >= kS390_LoadAndTestWord32 ||
1973 opcode <= kS390_LoadAndTestWord64);
1974
1975 S390OperandGeneratorT g(selector);
1976 InstructionOperand inputs[8];
1977 InstructionOperand outputs[2];
1978 size_t input_count = 0;
1979 size_t output_count = 0;
1980 bool use_value = false;
1981
1982 int effect_level = selector->GetEffectLevel(node, cont);
1983
1984 if (g.CanBeMemoryOperand(opcode, node, value, effect_level)) {
1985 // generate memory operand
1986 AddressingMode addressing_mode =
1987 g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
1988 opcode |= AddressingModeField::encode(addressing_mode);
1989 } else {
1990 inputs[input_count++] = g.UseAnyExceptImmediate(value);
1991 use_value = true;
1992 }
1993
1994 if (!discard_output && !use_value) {
1995 outputs[output_count++] = g.DefineAsRegister(value);
1996 }
1997
1998 DCHECK(input_count <= 8 && output_count <= 2);
1999 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
2000 inputs, cont);
2001}
2002
2003} // namespace
2004
2006 FlagsContinuation* cont) {
2007 // Try to combine with comparisons against 0 by simply inverting the branch.
2008 ConsumeEqualZero(&user, &value, cont);
2009
2010 FlagsCondition fc = cont->condition();
2011 if (CanCover(user, value)) {
2012 const Operation& value_op = this->Get(value);
2013 if (const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
2014 if (comparison->kind == ComparisonOp::Kind::kEqual) {
2015 switch (comparison->rep.MapTaggedToWord().value()) {
2017 cont->OverwriteAndNegateIfEqual(kEqual);
2018 if (this->MatchIntegralZero(comparison->right())) {
2019 // Try to combine the branch with a comparison.
2020 if (CanCover(value, comparison->left())) {
2021 const Operation& left_op = this->Get(comparison->left());
2022 if (left_op.Is<Opmask::kWord32Sub>()) {
2023 return VisitWord32Compare(this, comparison->left(), cont);
2024 } else if (left_op.Is<Opmask::kWord32BitwiseAnd>()) {
2025 return VisitTestUnderMask(this, comparison->left(), cont);
2026 }
2027 }
2028 }
2029 return VisitWord32Compare(this, value, cont);
2030 }
2032 cont->OverwriteAndNegateIfEqual(kEqual);
2033 if (this->MatchIntegralZero(comparison->right())) {
2034 // Try to combine the branch with a comparison.
2035 if (CanCover(value, comparison->left())) {
2036 const Operation& left_op = this->Get(comparison->left());
2037 if (left_op.Is<Opmask::kWord64Sub>()) {
2038 return VisitWord64Compare(this, comparison->left(), cont);
2039 } else if (left_op.Is<Opmask::kWord64BitwiseAnd>()) {
2040 return VisitTestUnderMask(this, comparison->left(), cont);
2041 }
2042 }
2043 }
2044 return VisitWord64Compare(this, value, cont);
2045 }
2047 cont->OverwriteAndNegateIfEqual(kEqual);
2048 return VisitFloat32Compare(this, value, cont);
2050 cont->OverwriteAndNegateIfEqual(kEqual);
2051 return VisitFloat64Compare(this, value, cont);
2052 default:
2053 break;
2054 }
2055 } else {
2056 switch (comparison->rep.MapTaggedToWord().value()) {
2058 cont->OverwriteAndNegateIfEqual(
2059 GetComparisonFlagCondition(*comparison));
2060 return VisitWord32Compare(this, value, cont);
2062 cont->OverwriteAndNegateIfEqual(
2063 GetComparisonFlagCondition(*comparison));
2064 return VisitWord64Compare(this, value, cont);
2066 switch (comparison->kind) {
2067 case ComparisonOp::Kind::kSignedLessThan:
2068 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2069 return VisitFloat32Compare(this, value, cont);
2070 case ComparisonOp::Kind::kSignedLessThanOrEqual:
2071 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2072 return VisitFloat32Compare(this, value, cont);
2073 default:
2074 UNREACHABLE();
2075 }
2077 switch (comparison->kind) {
2078 case ComparisonOp::Kind::kSignedLessThan:
2079 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2080 return VisitFloat64Compare(this, value, cont);
2081 case ComparisonOp::Kind::kSignedLessThanOrEqual:
2082 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2083 return VisitFloat64Compare(this, value, cont);
2084 default:
2085 UNREACHABLE();
2086 }
2087 default:
2088 break;
2089 }
2090 }
2091 } else if (const ProjectionOp* projection =
2092 value_op.TryCast<ProjectionOp>()) {
2093 // Check if this is the overflow output projection of an
2094 // <Operation>WithOverflow node.
2095 if (projection->index == 1u) {
2096 // We cannot combine the <Operation>WithOverflow with this branch
2097 // unless the 0th projection (the use of the actual value of the
2098 // <Operation> is either nullptr, which means there's no use of the
2099 // actual value, or was already defined, which means it is scheduled
2100 // *AFTER* this branch).
2101 OpIndex node = projection->input();
2102 if (const OverflowCheckedBinopOp* binop =
2104 binop && CanDoBranchIfOverflowFusion(node)) {
2105 const bool is64 = binop->rep == WordRepresentation::Word64();
2106 switch (binop->kind) {
2107 case OverflowCheckedBinopOp::Kind::kSignedAdd:
2108 cont->OverwriteAndNegateIfEqual(kOverflow);
2109 if (is64) {
2110 return VisitWord64BinOp(this, node, kS390_Add64, AddOperandMode,
2111 cont);
2112 } else {
2113 return VisitWord32BinOp(this, node, kS390_Add32, AddOperandMode,
2114 cont);
2115 }
2116 case OverflowCheckedBinopOp::Kind::kSignedSub:
2117 cont->OverwriteAndNegateIfEqual(kOverflow);
2118 if (is64) {
2119 return VisitWord64BinOp(this, node, kS390_Sub64, AddOperandMode,
2120 cont);
2121 } else {
2122 return VisitWord32BinOp(this, node, kS390_Sub32, AddOperandMode,
2123 cont);
2124 }
2125 case OverflowCheckedBinopOp::Kind::kSignedMul:
2126 if (is64) {
2127 cont->OverwriteAndNegateIfEqual(
2128 CpuFeatures::IsSupported(MISC_INSTR_EXT2) ? kOverflow
2129 : kNotEqual);
2130 return EmitInt64MulWithOverflow(this, node, cont);
2131
2132 } else {
2133 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
2134 cont->OverwriteAndNegateIfEqual(kOverflow);
2135 return VisitWord32BinOp(
2136 this, node, kS390_Mul32,
2137 OperandMode::kAllowRRR | OperandMode::kAllowRM, cont);
2138 } else {
2139 cont->OverwriteAndNegateIfEqual(kNotEqual);
2140 return VisitWord32BinOp(
2141 this, node, kS390_Mul32WithOverflow,
2142 OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
2143 cont);
2144 }
2145 }
2146 default:
2147 break;
2148 }
2149 } else if (const OverflowCheckedUnaryOp* unop =
2151 unop && CanDoBranchIfOverflowFusion(node)) {
2152 const bool is64 = unop->rep == WordRepresentation::Word64();
2153 switch (unop->kind) {
2154 case OverflowCheckedUnaryOp::Kind::kAbs:
2155 if (is64) {
2156 cont->OverwriteAndNegateIfEqual(kOverflow);
2157 return VisitWord64UnaryOp(this, node, kS390_Abs64,
2158 OperandMode::kNone, cont);
2159 } else {
2160 cont->OverwriteAndNegateIfEqual(kOverflow);
2161 return VisitWord32UnaryOp(this, node, kS390_Abs32,
2162 OperandMode::kNone, cont);
2163 }
2164 default:
2165 break;
2166 }
2167 }
2168 }
2169 } else if (value_op.Is<Opmask::kWord32Sub>()) {
2170 if (fc == kNotEqual || fc == kEqual)
2171 return VisitWord32Compare(this, value, cont);
2172 } else if (value_op.Is<Opmask::kWord32BitwiseAnd>()) {
2173 return VisitTestUnderMask(this, value, cont);
2174 } else if (value_op.Is<LoadOp>()) {
2175 auto load = this->load_view(value);
2176 LoadRepresentation load_rep = load.loaded_rep();
2177 switch (load_rep.representation()) {
2179 return VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value,
2180 cont);
2181 default:
2182 break;
2183 }
2184 } else if (value_op.Is<Opmask::kWord32BitwiseOr>()) {
2185 if (fc == kNotEqual || fc == kEqual)
2186 return VisitWord32BinOp(this, value, kS390_Or32, Or32OperandMode, cont);
2187 } else if (value_op.Is<Opmask::kWord32BitwiseXor>()) {
2188 if (fc == kNotEqual || fc == kEqual)
2189 return VisitWord32BinOp(this, value, kS390_Xor32, Xor32OperandMode,
2190 cont);
2191 } else if (value_op.Is<Opmask::kWord64Sub>()) {
2192 if (fc == kNotEqual || fc == kEqual)
2193 return VisitWord64Compare(this, value, cont);
2194 } else if (value_op.Is<Opmask::kWord64BitwiseAnd>()) {
2195 return VisitTestUnderMask(this, value, cont);
2196 } else if (value_op.Is<Opmask::kWord64BitwiseOr>()) {
2197 if (fc == kNotEqual || fc == kEqual)
2198 return VisitWord64BinOp(this, value, kS390_Or64, Or64OperandMode, cont);
2199 } else if (value_op.Is<Opmask::kWord64BitwiseXor>()) {
2200 if (fc == kNotEqual || fc == kEqual)
2201 return VisitWord64BinOp(this, value, kS390_Xor64, Xor64OperandMode,
2202 cont);
2203 } else if (value_op.Is<StackPointerGreaterThanOp>()) {
2204 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
2205 return VisitStackPointerGreaterThan(value, cont);
2206 }
2207 }
2208 // Branch could not be combined with a compare, emit LoadAndTest
2209 VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value, cont, true);
2210}
2211
2212void InstructionSelectorT::VisitSwitch(OpIndex node, const SwitchInfo& sw) {
2213 S390OperandGeneratorT g(this);
2214 InstructionOperand value_operand = g.UseRegister(this->input_at(node, 0));
2215
2216 // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
2217 if (enable_switch_jump_table_ ==
2218 InstructionSelector::kEnableSwitchJumpTable) {
2219 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2220 size_t table_space_cost = 4 + sw.value_range();
2221 size_t table_time_cost = 3;
2222 size_t lookup_space_cost = 3 + 2 * sw.case_count();
2223 size_t lookup_time_cost = sw.case_count();
2224 if (sw.case_count() > 0 &&
2225 table_space_cost + 3 * table_time_cost <=
2226 lookup_space_cost + 3 * lookup_time_cost &&
2227 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2228 sw.value_range() <= kMaxTableSwitchValueRange) {
2229 InstructionOperand index_operand = value_operand;
2230 if (sw.min_value()) {
2231 index_operand = g.TempRegister();
2232 Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
2233 value_operand, g.TempImmediate(-sw.min_value()));
2234 }
2235 InstructionOperand index_operand_zero_ext = g.TempRegister();
2236 Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
2237 index_operand = index_operand_zero_ext;
2238 // Generate a table lookup.
2239 return EmitTableSwitch(sw, index_operand);
2240 }
2241 }
2242
2243 // Generate a tree of conditional jumps.
2244 return EmitBinarySearchSwitch(sw, value_operand);
2245}
2246
2247void InstructionSelectorT::VisitWord32Equal(OpIndex const node) {
2248 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2249 const ComparisonOp& op = this->Get(node).template Cast<ComparisonOp>();
2250 if (this->MatchIntegralZero(op.right())) {
2251 return VisitLoadAndTest(this, kS390_LoadAndTestWord32, node, op.left(),
2252 &cont, true);
2253 }
2254 VisitWord32Compare(this, node, &cont);
2255}
2256
2257void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
2258 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2259 VisitWord32Compare(this, node, &cont);
2260}
2261
2262void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
2263 FlagsContinuation cont =
2264 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2265 VisitWord32Compare(this, node, &cont);
2266}
2267
2268void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
2269 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2270 VisitWord32Compare(this, node, &cont);
2271}
2272
2273void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
2274 FlagsContinuation cont =
2275 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2276 VisitWord32Compare(this, node, &cont);
2277}
2278
2279void InstructionSelectorT::VisitWord64Equal(OpIndex const node) {
2280 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2281 const ComparisonOp& op = this->Get(node).template Cast<ComparisonOp>();
2282 if (this->MatchIntegralZero(op.right())) {
2283 return VisitLoadAndTest(this, kS390_LoadAndTestWord64, node, op.left(),
2284 &cont, true);
2285 }
2286 VisitWord64Compare(this, node, &cont);
2287}
2288
2289void InstructionSelectorT::VisitInt64LessThan(OpIndex node) {
2290 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2291 VisitWord64Compare(this, node, &cont);
2292}
2293
2294void InstructionSelectorT::VisitInt64LessThanOrEqual(OpIndex node) {
2295 FlagsContinuation cont =
2296 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2297 VisitWord64Compare(this, node, &cont);
2298}
2299
2300void InstructionSelectorT::VisitUint64LessThan(OpIndex node) {
2301 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2302 VisitWord64Compare(this, node, &cont);
2303}
2304
2305void InstructionSelectorT::VisitUint64LessThanOrEqual(OpIndex node) {
2306 FlagsContinuation cont =
2307 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2308 VisitWord64Compare(this, node, &cont);
2309}
2310
2311void InstructionSelectorT::VisitTruncateFloat64ToFloat16RawBits(OpIndex node) {
2312 UNIMPLEMENTED();
2313}
2314
2315void InstructionSelectorT::VisitChangeFloat16RawBitsToFloat64(OpIndex node) {
2316 UNIMPLEMENTED();
2317}
2318
2319void InstructionSelectorT::VisitFloat32Equal(OpIndex node) {
2320 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2321 VisitFloat32Compare(this, node, &cont);
2322}
2323
2324void InstructionSelectorT::VisitFloat32LessThan(OpIndex node) {
2325 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2326 VisitFloat32Compare(this, node, &cont);
2327}
2328
2329void InstructionSelectorT::VisitFloat32LessThanOrEqual(OpIndex node) {
2330 FlagsContinuation cont =
2331 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2332 VisitFloat32Compare(this, node, &cont);
2333}
2334
2335void InstructionSelectorT::VisitFloat64Equal(OpIndex node) {
2336 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2337 VisitFloat64Compare(this, node, &cont);
2338}
2339
2340void InstructionSelectorT::VisitFloat64LessThan(OpIndex node) {
2341 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2342 VisitFloat64Compare(this, node, &cont);
2343}
2344
2345void InstructionSelectorT::VisitFloat64LessThanOrEqual(OpIndex node) {
2346 FlagsContinuation cont =
2347 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2348 VisitFloat64Compare(this, node, &cont);
2349}
2350
2351void InstructionSelectorT::VisitBitcastWord32PairToFloat64(OpIndex node) {
2352 S390OperandGeneratorT g(this);
2353 const auto& bitcast = this->Cast<BitcastWord32PairToFloat64Op>(node);
2354 OpIndex hi = bitcast.high_word32();
2355 OpIndex lo = bitcast.low_word32();
2356
2357 InstructionOperand temps[] = {g.TempRegister()};
2358 Emit(kS390_DoubleFromWord32Pair, g.DefineAsRegister(node), g.UseRegister(hi),
2359 g.UseRegister(lo), arraysize(temps), temps);
2360}
2361
2362bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis(OpIndex node) {
2363 UNIMPLEMENTED();
2364}
2365
2366void InstructionSelectorT::EmitMoveParamToFPR(OpIndex node, int index) {}
2367
2368void InstructionSelectorT::EmitMoveFPRToParam(InstructionOperand* op,
2369 LinkageLocation location) {}
2370
2371void InstructionSelectorT::EmitPrepareArguments(
2372 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
2373 OpIndex node) {
2374 S390OperandGeneratorT g(this);
2375
2376 // Prepare for C function call.
2377 if (call_descriptor->IsCFunctionCall()) {
2378 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
2379 call_descriptor->ParameterCount())),
2380 0, nullptr, 0, nullptr);
2381
2382 // Poke any stack arguments.
2383 int slot = kStackFrameExtraParamSlot;
2384 for (PushParameter input : (*arguments)) {
2385 if (!input.node.valid()) continue;
2386 Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
2387 g.TempImmediate(slot));
2388 ++slot;
2389 }
2390 } else {
2391 // Push any stack arguments.
2392 int stack_decrement = 0;
2393 for (PushParameter input : base::Reversed(*arguments)) {
2394 stack_decrement += kSystemPointerSize;
2395 // Skip any alignment holes in pushed nodes.
2396 if (!input.node.valid()) continue;
2397 InstructionOperand decrement = g.UseImmediate(stack_decrement);
2398 stack_decrement = 0;
2399 Emit(kS390_Push, g.NoOutput(), decrement, g.UseRegister(input.node));
2400 }
2401 }
2402}
2403
2404void InstructionSelectorT::VisitMemoryBarrier(OpIndex node) {
2405 S390OperandGeneratorT g(this);
2406 Emit(kArchNop, g.NoOutput());
2407}
2408
2409bool InstructionSelectorT::IsTailCallAddressImmediate() { return false; }
2410
2411void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
2412 auto load = this->load_view(node);
2413 LoadRepresentation load_rep = load.loaded_rep();
2414 VisitLoad(node, node, SelectLoadOpcode(load_rep));
2415}
2416
2417void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
2418 auto store = this->store_view(node);
2419 AtomicStoreParameters store_params(store.stored_rep().representation(),
2420 store.stored_rep().write_barrier_kind(),
2421 store.memory_order().value(),
2422 store.access_kind());
2423 VisitGeneralStore(this, node, store_params.representation());
2424}
2425
2426void VisitAtomicExchange(InstructionSelectorT* selector, OpIndex node,
2427 ArchOpcode opcode, AtomicWidth width) {
2428 S390OperandGeneratorT g(selector);
2429 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
2430 OpIndex base = atomic_op.base();
2431 OpIndex index = atomic_op.index();
2432 OpIndex value = atomic_op.value();
2433
2434 AddressingMode addressing_mode = kMode_MRR;
2435 InstructionOperand inputs[3];
2436 size_t input_count = 0;
2437 inputs[input_count++] = g.UseUniqueRegister(base);
2438 inputs[input_count++] = g.UseUniqueRegister(index);
2439 inputs[input_count++] = g.UseUniqueRegister(value);
2440 InstructionOperand outputs[1];
2441 outputs[0] = g.DefineAsRegister(node);
2442 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2443 AtomicWidthField::encode(width);
2444 selector->Emit(code, 1, outputs, input_count, inputs);
2445}
2446
2447void InstructionSelectorT::VisitWord32AtomicExchange(OpIndex node) {
2448 ArchOpcode opcode;
2449 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2450 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2451 opcode = kAtomicExchangeInt8;
2452 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2453 opcode = kAtomicExchangeUint8;
2454 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2455 opcode = kAtomicExchangeInt16;
2456 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2457 opcode = kAtomicExchangeUint16;
2458 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2459 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2460 opcode = kAtomicExchangeWord32;
2461 } else {
2462 UNREACHABLE();
2463 }
2464 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
2465}
2466
2467void InstructionSelectorT::VisitWord64AtomicExchange(OpIndex node) {
2468 ArchOpcode opcode;
2469 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2470 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2471 opcode = kAtomicExchangeUint8;
2472 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2473 opcode = kAtomicExchangeUint16;
2474 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2475 opcode = kAtomicExchangeWord32;
2476 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2477 opcode = kS390_Word64AtomicExchangeUint64;
2478 } else {
2479 UNREACHABLE();
2480 }
2481 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
2482}
2483
2484void VisitAtomicCompareExchange(InstructionSelectorT* selector, OpIndex node,
2485 ArchOpcode opcode, AtomicWidth width) {
2486 S390OperandGeneratorT g(selector);
2487 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
2488 OpIndex base = atomic_op.base();
2489 OpIndex index = atomic_op.index();
2490 OpIndex old_value = atomic_op.expected().value();
2491 OpIndex new_value = atomic_op.value();
2492
2493 InstructionOperand inputs[4];
2494 size_t input_count = 0;
2495 inputs[input_count++] = g.UseUniqueRegister(old_value);
2496 inputs[input_count++] = g.UseUniqueRegister(new_value);
2497 inputs[input_count++] = g.UseUniqueRegister(base);
2498
2499 AddressingMode addressing_mode;
2500 if (g.CanBeImmediate(index, OperandMode::kInt20Imm)) {
2501 inputs[input_count++] = g.UseImmediate(index);
2502 addressing_mode = kMode_MRI;
2503 } else {
2504 inputs[input_count++] = g.UseUniqueRegister(index);
2505 addressing_mode = kMode_MRR;
2506 }
2507
2508 InstructionOperand outputs[1];
2509 size_t output_count = 0;
2510 outputs[output_count++] = g.DefineSameAsFirst(node);
2511
2512 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2513 AtomicWidthField::encode(width);
2514 selector->Emit(code, output_count, outputs, input_count, inputs);
2515}
2516
2517void InstructionSelectorT::VisitWord32AtomicCompareExchange(OpIndex node) {
2518 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2519 ArchOpcode opcode;
2520 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2521 opcode = kAtomicCompareExchangeInt8;
2522 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2523 opcode = kAtomicCompareExchangeUint8;
2524 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2525 opcode = kAtomicCompareExchangeInt16;
2526 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2527 opcode = kAtomicCompareExchangeUint16;
2528 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2529 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2530 opcode = kAtomicCompareExchangeWord32;
2531 } else {
2532 UNREACHABLE();
2533 }
2534 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
2535}
2536
2537void InstructionSelectorT::VisitWord64AtomicCompareExchange(OpIndex node) {
2538 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2539 ArchOpcode opcode;
2540 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2541 opcode = kAtomicCompareExchangeUint8;
2542 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2543 opcode = kAtomicCompareExchangeUint16;
2544 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2545 opcode = kAtomicCompareExchangeWord32;
2546 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2547 opcode = kS390_Word64AtomicCompareExchangeUint64;
2548 } else {
2549 UNREACHABLE();
2550 }
2551 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
2552}
2553
2555 ArchOpcode opcode, AtomicWidth width) {
2556 S390OperandGeneratorT g(selector);
2557 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
2558 OpIndex base = atomic_op.base();
2559 OpIndex index = atomic_op.index();
2560 OpIndex value = atomic_op.value();
2561
2562 InstructionOperand inputs[3];
2563 size_t input_count = 0;
2564 inputs[input_count++] = g.UseUniqueRegister(base);
2565
2566 AddressingMode addressing_mode;
2567 if (g.CanBeImmediate(index, OperandMode::kInt20Imm)) {
2568 inputs[input_count++] = g.UseImmediate(index);
2569 addressing_mode = kMode_MRI;
2570 } else {
2571 inputs[input_count++] = g.UseUniqueRegister(index);
2572 addressing_mode = kMode_MRR;
2573 }
2574
2575 inputs[input_count++] = g.UseUniqueRegister(value);
2576
2577 InstructionOperand outputs[1];
2578 size_t output_count = 0;
2579 outputs[output_count++] = g.DefineAsRegister(node);
2580
2581 InstructionOperand temps[1];
2582 size_t temp_count = 0;
2583 temps[temp_count++] = g.TempRegister();
2584
2585 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2586 AtomicWidthField::encode(width);
2587 selector->Emit(code, output_count, outputs, input_count, inputs, temp_count,
2588 temps);
2589}
2590
2591void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
2592 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2593 ArchOpcode uint16_op, ArchOpcode word32_op) {
2594 ArchOpcode opcode;
2595 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2596 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2597 opcode = int8_op;
2598 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2599 opcode = uint8_op;
2600 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2601 opcode = int16_op;
2602 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2603 opcode = uint16_op;
2604 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2605 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2606 opcode = word32_op;
2607 } else {
2608 UNREACHABLE();
2609 }
2610 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
2611}
2612
2613#define VISIT_ATOMIC_BINOP(op) \
2614 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
2615 VisitWord32AtomicBinaryOperation( \
2616 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2617 kAtomic##op##Uint16, kAtomic##op##Word32); \
2618 }
2624#undef VISIT_ATOMIC_BINOP
2625
2626void InstructionSelectorT::VisitWord64AtomicBinaryOperation(
2627 OpIndex node, ArchOpcode uint8_op, ArchOpcode uint16_op,
2628 ArchOpcode word32_op, ArchOpcode word64_op) {
2629 ArchOpcode opcode;
2630 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2631 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2632 opcode = uint8_op;
2633 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2634 opcode = uint16_op;
2635 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2636 opcode = word32_op;
2637 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2638 opcode = word64_op;
2639 } else {
2640 UNREACHABLE();
2641 }
2642 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
2643}
2644
2645#define VISIT_ATOMIC64_BINOP(op) \
2646 void InstructionSelectorT::VisitWord64Atomic##op(OpIndex node) { \
2647 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
2648 kAtomic##op##Uint16, kAtomic##op##Word32, \
2649 kS390_Word64Atomic##op##Uint64); \
2650 }
2656#undef VISIT_ATOMIC64_BINOP
2657
2658void InstructionSelectorT::VisitWord64AtomicLoad(OpIndex node) {
2659 auto load = this->load_view(node);
2660 LoadRepresentation load_rep = load.loaded_rep();
2661 VisitLoad(node, node, SelectLoadOpcode(load_rep));
2662}
2663
2664void InstructionSelectorT::VisitWord64AtomicStore(OpIndex node) {
2665 auto store = this->store_view(node);
2666 AtomicStoreParameters store_params(store.stored_rep().representation(),
2667 store.stored_rep().write_barrier_kind(),
2668 store.memory_order().value(),
2669 store.access_kind());
2670 VisitGeneralStore(this, node, store_params.representation());
2671}
2672
2673#define SIMD_TYPES(V) \
2674 V(F64x2) \
2675 V(F32x4) \
2676 V(I64x2) \
2677 V(I32x4) \
2678 V(I16x8) \
2679 V(I8x16)
2680
2681#define SIMD_BINOP_LIST(V) \
2682 V(F64x2Add) \
2683 V(F64x2Sub) \
2684 V(F64x2Mul) \
2685 V(F64x2Div) \
2686 V(F64x2Eq) \
2687 V(F64x2Ne) \
2688 V(F64x2Lt) \
2689 V(F64x2Le) \
2690 V(F64x2Min) \
2691 V(F64x2Max) \
2692 V(F64x2Pmin) \
2693 V(F64x2Pmax) \
2694 V(F32x4Add) \
2695 V(F32x4Sub) \
2696 V(F32x4Mul) \
2697 V(F32x4Eq) \
2698 V(F32x4Ne) \
2699 V(F32x4Lt) \
2700 V(F32x4Le) \
2701 V(F32x4Div) \
2702 V(F32x4Min) \
2703 V(F32x4Max) \
2704 V(F32x4Pmin) \
2705 V(F32x4Pmax) \
2706 V(I64x2Add) \
2707 V(I64x2Sub) \
2708 V(I64x2Mul) \
2709 V(I64x2Eq) \
2710 V(I64x2ExtMulLowI32x4S) \
2711 V(I64x2ExtMulHighI32x4S) \
2712 V(I64x2ExtMulLowI32x4U) \
2713 V(I64x2ExtMulHighI32x4U) \
2714 V(I64x2Ne) \
2715 V(I64x2GtS) \
2716 V(I64x2GeS) \
2717 V(I64x2Shl) \
2718 V(I64x2ShrS) \
2719 V(I64x2ShrU) \
2720 V(I32x4Add) \
2721 V(I32x4Sub) \
2722 V(I32x4Mul) \
2723 V(I32x4MinS) \
2724 V(I32x4MinU) \
2725 V(I32x4MaxS) \
2726 V(I32x4MaxU) \
2727 V(I32x4Eq) \
2728 V(I32x4Ne) \
2729 V(I32x4GtS) \
2730 V(I32x4GeS) \
2731 V(I32x4GtU) \
2732 V(I32x4GeU) \
2733 V(I32x4ExtMulLowI16x8S) \
2734 V(I32x4ExtMulHighI16x8S) \
2735 V(I32x4ExtMulLowI16x8U) \
2736 V(I32x4ExtMulHighI16x8U) \
2737 V(I32x4Shl) \
2738 V(I32x4ShrS) \
2739 V(I32x4ShrU) \
2740 V(I32x4DotI16x8S) \
2741 V(I16x8Add) \
2742 V(I16x8Sub) \
2743 V(I16x8Mul) \
2744 V(I16x8MinS) \
2745 V(I16x8MinU) \
2746 V(I16x8MaxS) \
2747 V(I16x8MaxU) \
2748 V(I16x8Eq) \
2749 V(I16x8Ne) \
2750 V(I16x8GtS) \
2751 V(I16x8GeS) \
2752 V(I16x8GtU) \
2753 V(I16x8GeU) \
2754 V(I16x8SConvertI32x4) \
2755 V(I16x8UConvertI32x4) \
2756 V(I16x8RoundingAverageU) \
2757 V(I16x8ExtMulLowI8x16S) \
2758 V(I16x8ExtMulHighI8x16S) \
2759 V(I16x8ExtMulLowI8x16U) \
2760 V(I16x8ExtMulHighI8x16U) \
2761 V(I16x8Shl) \
2762 V(I16x8ShrS) \
2763 V(I16x8ShrU) \
2764 V(I8x16Add) \
2765 V(I8x16Sub) \
2766 V(I8x16MinS) \
2767 V(I8x16MinU) \
2768 V(I8x16MaxS) \
2769 V(I8x16MaxU) \
2770 V(I8x16Eq) \
2771 V(I8x16Ne) \
2772 V(I8x16GtS) \
2773 V(I8x16GeS) \
2774 V(I8x16GtU) \
2775 V(I8x16GeU) \
2776 V(I8x16SConvertI16x8) \
2777 V(I8x16UConvertI16x8) \
2778 V(I8x16RoundingAverageU) \
2779 V(I8x16Shl) \
2780 V(I8x16ShrS) \
2781 V(I8x16ShrU) \
2782 V(S128And) \
2783 V(S128Or) \
2784 V(S128Xor) \
2785 V(S128AndNot)
2786
2787#define SIMD_BINOP_UNIQUE_REGISTER_LIST(V) \
2788 V(I16x8AddSatS) \
2789 V(I16x8SubSatS) \
2790 V(I16x8AddSatU) \
2791 V(I16x8SubSatU) \
2792 V(I16x8Q15MulRSatS) \
2793 V(I8x16AddSatS) \
2794 V(I8x16SubSatS) \
2795 V(I8x16AddSatU) \
2796 V(I8x16SubSatU)
2797
2798#define SIMD_UNOP_LIST(V) \
2799 V(F64x2Abs) \
2800 V(F64x2Neg) \
2801 V(F64x2Sqrt) \
2802 V(F64x2Ceil) \
2803 V(F64x2Floor) \
2804 V(F64x2Trunc) \
2805 V(F64x2NearestInt) \
2806 V(F64x2ConvertLowI32x4S) \
2807 V(F64x2ConvertLowI32x4U) \
2808 V(F64x2PromoteLowF32x4) \
2809 V(F64x2Splat) \
2810 V(F32x4Abs) \
2811 V(F32x4Neg) \
2812 V(F32x4Sqrt) \
2813 V(F32x4Ceil) \
2814 V(F32x4Floor) \
2815 V(F32x4Trunc) \
2816 V(F32x4NearestInt) \
2817 V(F32x4DemoteF64x2Zero) \
2818 V(F32x4SConvertI32x4) \
2819 V(F32x4UConvertI32x4) \
2820 V(F32x4Splat) \
2821 V(I64x2Neg) \
2822 V(I64x2SConvertI32x4Low) \
2823 V(I64x2SConvertI32x4High) \
2824 V(I64x2UConvertI32x4Low) \
2825 V(I64x2UConvertI32x4High) \
2826 V(I64x2Abs) \
2827 V(I64x2BitMask) \
2828 V(I64x2Splat) \
2829 V(I64x2AllTrue) \
2830 V(I32x4Neg) \
2831 V(I32x4Abs) \
2832 V(I32x4SConvertF32x4) \
2833 V(I32x4UConvertF32x4) \
2834 V(I32x4SConvertI16x8Low) \
2835 V(I32x4SConvertI16x8High) \
2836 V(I32x4UConvertI16x8Low) \
2837 V(I32x4UConvertI16x8High) \
2838 V(I32x4TruncSatF64x2SZero) \
2839 V(I32x4TruncSatF64x2UZero) \
2840 V(I32x4BitMask) \
2841 V(I32x4Splat) \
2842 V(I32x4AllTrue) \
2843 V(I16x8Neg) \
2844 V(I16x8Abs) \
2845 V(I16x8SConvertI8x16Low) \
2846 V(I16x8SConvertI8x16High) \
2847 V(I16x8UConvertI8x16Low) \
2848 V(I16x8UConvertI8x16High) \
2849 V(I16x8BitMask) \
2850 V(I16x8Splat) \
2851 V(I16x8AllTrue) \
2852 V(I8x16Neg) \
2853 V(I8x16Abs) \
2854 V(I8x16Popcnt) \
2855 V(I8x16BitMask) \
2856 V(I8x16Splat) \
2857 V(I8x16AllTrue) \
2858 V(S128Not) \
2859 V(V128AnyTrue)
2860
2861#define SIMD_UNOP_UNIQUE_REGISTER_LIST(V) \
2862 V(I32x4ExtAddPairwiseI16x8S) \
2863 V(I32x4ExtAddPairwiseI16x8U) \
2864 V(I16x8ExtAddPairwiseI8x16S) \
2865 V(I16x8ExtAddPairwiseI8x16U)
2866
2867#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
2868 void InstructionSelectorT::Visit##Type##ExtractLane##Sign(OpIndex node) { \
2869 S390OperandGeneratorT g(this); \
2870 int32_t lane; \
2871 using namespace turboshaft; /* NOLINT(build/namespaces) */ \
2872 const Operation& op = this->Get(node); \
2873 lane = op.template Cast<Simd128ExtractLaneOp>().lane; \
2874 Emit(kS390_##Type##ExtractLane##Sign, g.DefineAsRegister(node), \
2875 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(lane)); \
2876 }
2885#undef SIMD_VISIT_EXTRACT_LANE
2886
2887#define SIMD_VISIT_REPLACE_LANE(Type) \
2888 void InstructionSelectorT::Visit##Type##ReplaceLane(OpIndex node) { \
2889 S390OperandGeneratorT g(this); \
2890 int32_t lane; \
2891 using namespace turboshaft; /* NOLINT(build/namespaces) */ \
2892 const Operation& op = this->Get(node); \
2893 lane = op.template Cast<Simd128ReplaceLaneOp>().lane; \
2894 Emit(kS390_##Type##ReplaceLane, g.DefineAsRegister(node), \
2895 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(lane), \
2896 g.UseRegister(this->input_at(node, 1))); \
2897 }
2899#undef SIMD_VISIT_REPLACE_LANE
2900
2901#define SIMD_VISIT_BINOP(Opcode) \
2902 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2903 S390OperandGeneratorT g(this); \
2904 Emit(kS390_##Opcode, g.DefineAsRegister(node), \
2905 g.UseRegister(this->input_at(node, 0)), \
2906 g.UseRegister(this->input_at(node, 1))); \
2907 }
2909#undef SIMD_VISIT_BINOP
2910#undef SIMD_BINOP_LIST
2911
2912#define SIMD_VISIT_BINOP_UNIQUE_REGISTER(Opcode) \
2913 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2914 S390OperandGeneratorT g(this); \
2915 InstructionOperand temps[] = {g.TempSimd128Register(), \
2916 g.TempSimd128Register()}; \
2917 Emit(kS390_##Opcode, g.DefineAsRegister(node), \
2918 g.UseUniqueRegister(this->input_at(node, 0)), \
2919 g.UseUniqueRegister(this->input_at(node, 1)), arraysize(temps), \
2920 temps); \
2921 }
2923#undef SIMD_VISIT_BINOP_UNIQUE_REGISTER
2924#undef SIMD_BINOP_UNIQUE_REGISTER_LIST
2925
2926#define SIMD_VISIT_UNOP(Opcode) \
2927 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2928 S390OperandGeneratorT g(this); \
2929 Emit(kS390_##Opcode, g.DefineAsRegister(node), \
2930 g.UseRegister(this->input_at(node, 0))); \
2931 }
2933#undef SIMD_VISIT_UNOP
2934#undef SIMD_UNOP_LIST
2935
2936#define SIMD_VISIT_UNOP_UNIQUE_REGISTER(Opcode) \
2937 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2938 S390OperandGeneratorT g(this); \
2939 InstructionOperand temps[] = {g.TempSimd128Register()}; \
2940 Emit(kS390_##Opcode, g.DefineAsRegister(node), \
2941 g.UseUniqueRegister(this->input_at(node, 0)), arraysize(temps), \
2942 temps); \
2943 }
2945#undef SIMD_VISIT_UNOP_UNIQUE_REGISTER
2946#undef SIMD_UNOP_UNIQUE_REGISTER_LIST
2947
2948#define SIMD_VISIT_QFMOP(Opcode) \
2949 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2950 S390OperandGeneratorT g(this); \
2951 Emit(kS390_##Opcode, g.DefineSameAsFirst(node), \
2952 g.UseRegister(this->input_at(node, 0)), \
2953 g.UseRegister(this->input_at(node, 1)), \
2954 g.UseRegister(this->input_at(node, 2))); \
2955 }
2956SIMD_VISIT_QFMOP(F64x2Qfma)
2957SIMD_VISIT_QFMOP(F64x2Qfms)
2958SIMD_VISIT_QFMOP(F32x4Qfma)
2959SIMD_VISIT_QFMOP(F32x4Qfms)
2960#undef SIMD_VISIT_QFMOP
2961
2962#define SIMD_RELAXED_OP_LIST(V) \
2963 V(F64x2RelaxedMin, F64x2Pmin) \
2964 V(F64x2RelaxedMax, F64x2Pmax) \
2965 V(F32x4RelaxedMin, F32x4Pmin) \
2966 V(F32x4RelaxedMax, F32x4Pmax) \
2967 V(I32x4RelaxedTruncF32x4S, I32x4SConvertF32x4) \
2968 V(I32x4RelaxedTruncF32x4U, I32x4UConvertF32x4) \
2969 V(I32x4RelaxedTruncF64x2SZero, I32x4TruncSatF64x2SZero) \
2970 V(I32x4RelaxedTruncF64x2UZero, I32x4TruncSatF64x2UZero) \
2971 V(I16x8RelaxedQ15MulRS, I16x8Q15MulRSatS) \
2972 V(I8x16RelaxedLaneSelect, S128Select) \
2973 V(I16x8RelaxedLaneSelect, S128Select) \
2974 V(I32x4RelaxedLaneSelect, S128Select) \
2975 V(I64x2RelaxedLaneSelect, S128Select)
2976
2977#define SIMD_VISIT_RELAXED_OP(name, op) \
2978 void InstructionSelectorT::Visit##name(OpIndex node) { Visit##op(node); }
2980#undef SIMD_VISIT_RELAXED_OP
2981#undef SIMD_RELAXED_OP_LIST
2982
2983#define F16_OP_LIST(V) \
2984 V(F16x8Splat) \
2985 V(F16x8ExtractLane) \
2986 V(F16x8ReplaceLane) \
2987 V(F16x8Abs) \
2988 V(F16x8Neg) \
2989 V(F16x8Sqrt) \
2990 V(F16x8Floor) \
2991 V(F16x8Ceil) \
2992 V(F16x8Trunc) \
2993 V(F16x8NearestInt) \
2994 V(F16x8Add) \
2995 V(F16x8Sub) \
2996 V(F16x8Mul) \
2997 V(F16x8Div) \
2998 V(F16x8Min) \
2999 V(F16x8Max) \
3000 V(F16x8Pmin) \
3001 V(F16x8Pmax) \
3002 V(F16x8Eq) \
3003 V(F16x8Ne) \
3004 V(F16x8Lt) \
3005 V(F16x8Le) \
3006 V(F16x8SConvertI16x8) \
3007 V(F16x8UConvertI16x8) \
3008 V(I16x8SConvertF16x8) \
3009 V(I16x8UConvertF16x8) \
3010 V(F32x4PromoteLowF16x8) \
3011 V(F16x8DemoteF32x4Zero) \
3012 V(F16x8DemoteF64x2Zero) \
3013 V(F16x8Qfma) \
3014 V(F16x8Qfms)
3015
3016#define VISIT_F16_OP(name) \
3017 void InstructionSelectorT::Visit##name(OpIndex node) { UNIMPLEMENTED(); }
3019#undef VISIT_F16_OP
3020#undef F16_OP_LIST
3021#undef SIMD_TYPES
3022
3023#if V8_ENABLE_WEBASSEMBLY
3024void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) {
3025 uint8_t shuffle[kSimd128Size];
3026 bool is_swizzle;
3027 // TODO(nicohartmann@): Properly use view here once Turboshaft support is
3028 // implemented.
3029 auto view = this->simd_shuffle_view(node);
3030 CanonicalizeShuffle(view, shuffle, &is_swizzle);
3031 S390OperandGeneratorT g(this);
3032 OpIndex input0 = view.input(0);
3033 OpIndex input1 = view.input(1);
3034 // Remap the shuffle indices to match IBM lane numbering.
3035 int max_index = 15;
3036 int total_lane_count = 2 * kSimd128Size;
3037 uint8_t shuffle_remapped[kSimd128Size];
3038 for (int i = 0; i < kSimd128Size; i++) {
3039 uint8_t current_index = shuffle[i];
3040 shuffle_remapped[i] = (current_index <= max_index
3041 ? max_index - current_index
3042 : total_lane_count - current_index + max_index);
3043 }
3044 Emit(kS390_I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
3045 g.UseRegister(input1),
3046 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped)),
3047 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 4)),
3048 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 8)),
3049 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 12)));
3050}
3051
3052void InstructionSelectorT::VisitI8x16Swizzle(OpIndex node) {
3053 S390OperandGeneratorT g(this);
3054 bool relaxed;
3055 const Simd128BinopOp& binop = this->Get(node).template Cast<Simd128BinopOp>();
3056 DCHECK(binop.kind == any_of(Simd128BinopOp::Kind::kI8x16Swizzle,
3057 Simd128BinopOp::Kind::kI8x16RelaxedSwizzle));
3058 relaxed = binop.kind == Simd128BinopOp::Kind::kI8x16RelaxedSwizzle;
3059 // TODO(miladfarca): Optimize Swizzle if relaxed.
3060 USE(relaxed);
3061
3062 Emit(kS390_I8x16Swizzle, g.DefineAsRegister(node),
3063 g.UseUniqueRegister(this->input_at(node, 0)),
3064 g.UseUniqueRegister(this->input_at(node, 1)));
3065}
3066
3067void InstructionSelectorT::VisitSetStackPointer(OpIndex node) {
3068 OperandGenerator g(this);
3069 // TODO(miladfarca): Optimize by using UseAny.
3070 auto input = g.UseRegister(this->input_at(node, 0));
3071 Emit(kArchSetStackPointer, 0, nullptr, 1, &input);
3072}
3073
3074#else
3075void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) { UNREACHABLE(); }
3076void InstructionSelectorT::VisitI8x16Swizzle(OpIndex node) { UNREACHABLE(); }
3077#endif // V8_ENABLE_WEBASSEMBLY
3078
3079// This is a replica of SimdShuffle::Pack4Lanes. However, above function will
3080// not be available on builds with webassembly disabled, hence we need to have
3081// it declared locally as it is used on other visitors such as S128Const.
3082static int32_t Pack4Lanes(const uint8_t* shuffle) {
3083 int32_t result = 0;
3084 for (int i = 3; i >= 0; --i) {
3085 result <<= 8;
3086 result |= shuffle[i];
3087 }
3088 return result;
3089}
3090
3091void InstructionSelectorT::VisitS128Const(OpIndex node) {
3092 S390OperandGeneratorT g(this);
3093 uint32_t val[kSimd128Size / sizeof(uint32_t)];
3094 const Simd128ConstantOp& constant =
3095 this->Get(node).template Cast<Simd128ConstantOp>();
3096 memcpy(val, constant.value, kSimd128Size);
3097 // If all bytes are zeros, avoid emitting code for generic constants.
3098 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
3099 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
3100 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
3101 InstructionOperand dst = g.DefineAsRegister(node);
3102 if (all_zeros) {
3103 Emit(kS390_S128Zero, dst);
3104 } else if (all_ones) {
3105 Emit(kS390_S128AllOnes, dst);
3106 } else {
3107 // We have to use Pack4Lanes to reverse the bytes (lanes) on BE,
3108 // Which in this case is ineffective on LE.
3109 Emit(kS390_S128Const, dst,
3110 g.UseImmediate(Pack4Lanes(reinterpret_cast<uint8_t*>(&val[0]))),
3111 g.UseImmediate(Pack4Lanes(reinterpret_cast<uint8_t*>(&val[0]) + 4)),
3112 g.UseImmediate(Pack4Lanes(reinterpret_cast<uint8_t*>(&val[0]) + 8)),
3113 g.UseImmediate(Pack4Lanes(reinterpret_cast<uint8_t*>(&val[0]) + 12)));
3114 }
3115}
3116
3117void InstructionSelectorT::VisitS128Zero(OpIndex node) {
3118 S390OperandGeneratorT g(this);
3119 Emit(kS390_S128Zero, g.DefineAsRegister(node));
3120}
3121
3122void InstructionSelectorT::VisitS128Select(OpIndex node) {
3123 S390OperandGeneratorT g(this);
3124 Emit(kS390_S128Select, g.DefineAsRegister(node),
3125 g.UseRegister(this->input_at(node, 0)),
3126 g.UseRegister(this->input_at(node, 1)),
3127 g.UseRegister(this->input_at(node, 2)));
3128}
3129
3130void InstructionSelectorT::EmitPrepareResults(
3131 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
3132 OpIndex node) {
3133 S390OperandGeneratorT g(this);
3134
3135 for (PushParameter output : *results) {
3136 if (!output.location.IsCallerFrameSlot()) continue;
3137 // Skip any alignment holes in nodes.
3138 if (output.node.valid()) {
3139 DCHECK(!call_descriptor->IsCFunctionCall());
3140 if (output.location.GetType() == MachineType::Float32()) {
3141 MarkAsFloat32(output.node);
3142 } else if (output.location.GetType() == MachineType::Float64()) {
3143 MarkAsFloat64(output.node);
3144 } else if (output.location.GetType() == MachineType::Simd128()) {
3145 MarkAsSimd128(output.node);
3146 }
3147 int offset = call_descriptor->GetOffsetToReturns();
3148 int reverse_slot = -output.location.GetLocation() - offset;
3149 Emit(kS390_Peek, g.DefineAsRegister(output.node),
3150 g.UseImmediate(reverse_slot));
3151 }
3152 }
3153}
3154
3155void InstructionSelectorT::VisitLoadLane(OpIndex node) {
3156 InstructionCode opcode;
3157 int32_t lane;
3158 const Simd128LaneMemoryOp& load =
3159 this->Get(node).template Cast<Simd128LaneMemoryOp>();
3160 lane = load.lane;
3161 switch (load.lane_kind) {
3162 case Simd128LaneMemoryOp::LaneKind::k8:
3163 opcode = kS390_S128Load8Lane;
3164 break;
3165 case Simd128LaneMemoryOp::LaneKind::k16:
3166 opcode = kS390_S128Load16Lane;
3167 break;
3168 case Simd128LaneMemoryOp::LaneKind::k32:
3169 opcode = kS390_S128Load32Lane;
3170 break;
3171 case Simd128LaneMemoryOp::LaneKind::k64:
3172 opcode = kS390_S128Load64Lane;
3173 break;
3174 }
3175 S390OperandGeneratorT g(this);
3176 InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
3177 InstructionOperand inputs[5];
3178 size_t input_count = 0;
3179
3180 inputs[input_count++] = g.UseRegister(this->input_at(node, 2));
3181 inputs[input_count++] = g.UseImmediate(lane);
3182
3183 AddressingMode mode =
3184 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
3185 opcode |= AddressingModeField::encode(mode);
3186 Emit(opcode, 1, outputs, input_count, inputs);
3187}
3188
3189void InstructionSelectorT::VisitLoadTransform(OpIndex node) {
3190 ArchOpcode opcode;
3191 const Simd128LoadTransformOp& op =
3192 this->Get(node).template Cast<Simd128LoadTransformOp>();
3193 switch (op.transform_kind) {
3194 case Simd128LoadTransformOp::TransformKind::k8Splat:
3195 opcode = kS390_S128Load8Splat;
3196 break;
3197 case Simd128LoadTransformOp::TransformKind::k16Splat:
3198 opcode = kS390_S128Load16Splat;
3199 break;
3200 case Simd128LoadTransformOp::TransformKind::k32Splat:
3201 opcode = kS390_S128Load32Splat;
3202 break;
3203 case Simd128LoadTransformOp::TransformKind::k64Splat:
3204 opcode = kS390_S128Load64Splat;
3205 break;
3206 case Simd128LoadTransformOp::TransformKind::k8x8S:
3207 opcode = kS390_S128Load8x8S;
3208 break;
3209 case Simd128LoadTransformOp::TransformKind::k8x8U:
3210 opcode = kS390_S128Load8x8U;
3211 break;
3212 case Simd128LoadTransformOp::TransformKind::k16x4S:
3213 opcode = kS390_S128Load16x4S;
3214 break;
3215 case Simd128LoadTransformOp::TransformKind::k16x4U:
3216 opcode = kS390_S128Load16x4U;
3217 break;
3218 case Simd128LoadTransformOp::TransformKind::k32x2S:
3219 opcode = kS390_S128Load32x2S;
3220 break;
3221 case Simd128LoadTransformOp::TransformKind::k32x2U:
3222 opcode = kS390_S128Load32x2U;
3223 break;
3224 case Simd128LoadTransformOp::TransformKind::k32Zero:
3225 opcode = kS390_S128Load32Zero;
3226 break;
3227 case Simd128LoadTransformOp::TransformKind::k64Zero:
3228 opcode = kS390_S128Load64Zero;
3229 break;
3230 default:
3231 UNIMPLEMENTED();
3232 }
3233 VisitLoad(node, node, opcode);
3234}
3235
3236void InstructionSelectorT::VisitStoreLane(OpIndex node) {
3237 InstructionCode opcode = kArchNop;
3238 int32_t lane;
3239 const Simd128LaneMemoryOp& store =
3240 this->Get(node).template Cast<Simd128LaneMemoryOp>();
3241 lane = store.lane;
3242 switch (store.lane_kind) {
3243 case Simd128LaneMemoryOp::LaneKind::k8:
3244 opcode = kS390_S128Store8Lane;
3245 break;
3246 case Simd128LaneMemoryOp::LaneKind::k16:
3247 opcode = kS390_S128Store16Lane;
3248 break;
3249 case Simd128LaneMemoryOp::LaneKind::k32:
3250 opcode = kS390_S128Store32Lane;
3251 break;
3252 case Simd128LaneMemoryOp::LaneKind::k64:
3253 opcode = kS390_S128Store64Lane;
3254 break;
3255 }
3256 S390OperandGeneratorT g(this);
3257 InstructionOperand inputs[5];
3258 size_t input_count = 0;
3259
3260 inputs[input_count++] = g.UseRegister(this->input_at(node, 2));
3261 inputs[input_count++] = g.UseImmediate(lane);
3262
3263 AddressingMode mode =
3264 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
3265 opcode |= AddressingModeField::encode(mode);
3266 Emit(opcode, 0, nullptr, input_count, inputs);
3267}
3268
3269void InstructionSelectorT::VisitI16x8DotI8x16I7x16S(OpIndex node) {
3270 S390OperandGeneratorT g(this);
3271 Emit(kS390_I16x8DotI8x16S, g.DefineAsRegister(node),
3272 g.UseUniqueRegister(this->input_at(node, 0)),
3273 g.UseUniqueRegister(this->input_at(node, 1)));
3274}
3275
3276void InstructionSelectorT::VisitI32x4DotI8x16I7x16AddS(OpIndex node) {
3277 S390OperandGeneratorT g(this);
3278 InstructionOperand temps[] = {g.TempSimd128Register()};
3279 Emit(kS390_I32x4DotI8x16AddS, g.DefineAsRegister(node),
3280 g.UseUniqueRegister(this->input_at(node, 0)),
3281 g.UseUniqueRegister(this->input_at(node, 1)),
3282 g.UseUniqueRegister(this->input_at(node, 2)), arraysize(temps), temps);
3283}
3284
3285void InstructionSelectorT::VisitTruncateFloat32ToInt32(OpIndex node) {
3286 S390OperandGeneratorT g(this);
3287 const Operation& op = this->Get(node);
3288 InstructionCode opcode = kS390_Float32ToInt32;
3289 if (op.Is<Opmask::kTruncateFloat32ToInt32OverflowToMin>()) {
3290 opcode |= MiscField::encode(true);
3291 }
3292 Emit(opcode, g.DefineAsRegister(node),
3293 g.UseRegister(this->input_at(node, 0)));
3294}
3295
3296void InstructionSelectorT::VisitTruncateFloat32ToUint32(OpIndex node) {
3297 S390OperandGeneratorT g(this);
3298 const Operation& op = this->Get(node);
3299 InstructionCode opcode = kS390_Float32ToUint32;
3300 if (op.Is<Opmask::kTruncateFloat32ToUint32OverflowToMin>()) {
3301 opcode |= MiscField::encode(true);
3302 }
3303
3304 Emit(opcode, g.DefineAsRegister(node),
3305 g.UseRegister(this->input_at(node, 0)));
3306}
3307
3308void InstructionSelectorT::AddOutputToSelectContinuation(OperandGenerator* g,
3309 int first_input_index,
3310 OpIndex node) {
3311 UNREACHABLE();
3312}
3313
3314MachineOperatorBuilder::Flags
3315InstructionSelector::SupportedMachineOperatorFlags() {
3316 return MachineOperatorBuilder::kFloat32RoundDown |
3317 MachineOperatorBuilder::kFloat64RoundDown |
3318 MachineOperatorBuilder::kFloat32RoundUp |
3319 MachineOperatorBuilder::kFloat64RoundUp |
3320 MachineOperatorBuilder::kFloat32RoundTruncate |
3321 MachineOperatorBuilder::kFloat64RoundTruncate |
3322 MachineOperatorBuilder::kFloat32RoundTiesEven |
3323 MachineOperatorBuilder::kFloat64RoundTiesEven |
3324 MachineOperatorBuilder::kFloat64RoundTiesAway |
3325 MachineOperatorBuilder::kWord32Popcnt |
3326 MachineOperatorBuilder::kInt32AbsWithOverflow |
3327 MachineOperatorBuilder::kInt64AbsWithOverflow |
3328 MachineOperatorBuilder::kWord64Popcnt;
3329}
3330
3331MachineOperatorBuilder::AlignmentRequirements
3332InstructionSelector::AlignmentRequirements() {
3333 return MachineOperatorBuilder::AlignmentRequirements::
3334 FullUnalignedAccessSupport();
3335}
3336
3337} // namespace compiler
3338} // namespace internal
3339} // namespace v8
#define DEFINE_OPERATORS_FOR_FLAGS(Type)
Definition flags.h:100
Builtins::Kind kind
Definition builtins.cc:40
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr U encode(T value)
Definition bit-field.h:55
static bool IsSupported(CpuFeature f)
constexpr MachineRepresentation representation() const
static FlagsContinuationT ForSet(FlagsCondition condition, turboshaft::OpIndex result)
int AllocateSpillSlot(int width, int alignment=0, bool is_tagged=false)
Definition frame.h:138
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
void ConsumeEqualZero(turboshaft::OpIndex *user, turboshaft::OpIndex *value, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
bool IsLive(turboshaft::OpIndex node) const
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand Use(turboshaft::OpIndex node)
InstructionOperand DefineSameAsFirst(turboshaft::OpIndex node)
InstructionOperand UseUniqueRegister(turboshaft::OpIndex node)
InstructionOperand UseRegister(turboshaft::OpIndex node)
bool CanBeImmediate(int64_t value, OperandModes mode)
bool CanBeMemoryOperand(InstructionCode opcode, OpIndex user, OpIndex input, int effect_level)
InstructionOperand UseOperand(OpIndex node, OperandModes mode)
AddressingMode GenerateMemoryOperandInputs(OptionalOpIndex index, OpIndex base, int64_t displacement, DisplacementMode displacement_mode, InstructionOperand inputs[], size_t *input_count, RegisterUseKind reg_kind=RegisterUseKind::kUseRegister)
bool CanBeImmediate(OpIndex node, OperandModes mode)
AddressingMode GetEffectiveAddressMemoryOperand(OpIndex operand, InstructionOperand inputs[], size_t *input_count, OperandModes immediate_mode=OperandMode::kInt20Imm)
static constexpr FloatRepresentation Float32()
static constexpr FloatRepresentation Float64()
V8_INLINE const Operation & Get(OpIndex i) const
Definition graph.h:618
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation Float16()
static constexpr MemoryRepresentation AnyUncompressedTagged()
static constexpr MemoryRepresentation UncompressedTaggedPointer()
static constexpr MemoryRepresentation Uint32()
static constexpr MemoryRepresentation TaggedSigned()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Int64()
static constexpr MemoryRepresentation Simd128()
static constexpr MemoryRepresentation SandboxedPointer()
static constexpr MemoryRepresentation Uint16()
static constexpr MemoryRepresentation ProtectedPointer()
static constexpr MemoryRepresentation TaggedPointer()
static constexpr MemoryRepresentation Uint8()
static constexpr MemoryRepresentation Simd256()
static constexpr MemoryRepresentation Int16()
static constexpr MemoryRepresentation IndirectPointer()
static constexpr MemoryRepresentation Uint64()
static constexpr MemoryRepresentation Float32()
static constexpr MemoryRepresentation UncompressedTaggedSigned()
static constexpr MemoryRepresentation Float64()
const Operation & Get(V< AnyOrNone > op_idx) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
bool MatchIntegralWord64Constant(V< Any > matched, uint64_t *constant) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
bool MatchUnsignedIntegralConstant(V< Any > matched, uint64_t *constant) const
static constexpr RegisterRepresentation Compressed()
static constexpr RegisterRepresentation Simd128()
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Float32()
static constexpr RegisterRepresentation Word64()
static constexpr RegisterRepresentation Tagged()
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define VISIT_ATOMIC_BINOP(op)
int32_t displacement
int32_t offset
DisplacementMode displacement_mode
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign)
#define SIMD_VISIT_REPLACE_LANE(Type)
#define SIMD_VISIT_UNOP(Name, instruction)
#define SIMD_VISIT_BINOP(Name, instruction)
#define SIMD_RELAXED_OP_LIST(V)
#define SIMD_VISIT_RELAXED_OP(Name)
#define SIMD_TYPES(V)
#define F16_OP_LIST(V)
#define VISIT_F16_OP(name)
#define SIMD_VISIT_QFMOP(Opcode)
#define Or32OperandMode
#define AddOperandMode
#define Shift64OperandMode
#define OpcodeImmMode(op)
#define Xor32OperandMode
#define VISIT_ATOMIC64_BINOP(op)
#define WORD32_BIN_OP_LIST(V)
#define SubOperandMode
#define FLOAT_BIN_OP_LIST(V)
#define DECLARE_VISIT_HELPER_FUNCTIONS(type1, type2, canCombineWithLoad)
#define WORD64_UNARY_OP_LIST(V)
#define DECLARE_BIN_OP(type, name, op, mode, try_extra)
#define SIMD_BINOP_UNIQUE_REGISTER_LIST(V)
#define And64OperandMode
#define SIMD_VISIT_UNOP_UNIQUE_REGISTER(Opcode)
#define FLOAT_UNARY_OP_LIST(V)
#define SIMD_UNOP_UNIQUE_REGISTER_LIST(V)
#define SIMD_VISIT_BINOP_UNIQUE_REGISTER(Opcode)
#define VISIT_OP_LIST(V)
#define Xor64OperandMode
#define Or64OperandMode
#define WORD32_UNARY_OP_LIST(V)
#define WORD64_BIN_OP_LIST(V)
#define DECLARE_UNARY_OP(type, name, op, mode, try_extra)
Node * node
ZoneVector< RpoNumber > & result
uint32_t const mask
#define SIMD_UNOP_LIST(V)
#define SIMD_BINOP_LIST(V)
int m
Definition mul-fft.cc:294
int int32_t
Definition unicode.cc:40
constexpr unsigned CountTrailingZeros64(uint64_t value)
Definition bits.h:164
constexpr unsigned CountTrailingZeros32(uint32_t value)
Definition bits.h:161
constexpr unsigned CountLeadingZeros64(uint64_t value)
Definition bits.h:125
constexpr unsigned CountPopulation(T value)
Definition bits.h:26
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr unsigned CountLeadingZeros32(uint32_t value)
Definition bits.h:122
bool any_of(const C &container, const P &predicate)
auto Reversed(T &t)
Definition iterator.h:105
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word64()> kWord64BitwiseAnd
Definition opmasks.h:171
ShiftMask::For< ShiftOp::Kind::kShiftLeft, WordRepresentation::Word64()> kWord64ShiftLeft
Definition opmasks.h:227
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word32()> kWord32BitwiseAnd
Definition opmasks.h:159
WordBinopMask::For< WordBinopOp::Kind::kBitwiseOr, WordRepresentation::Word32()> kWord32BitwiseOr
Definition opmasks.h:161
WordBinopMask::For< WordBinopOp::Kind::kBitwiseXor, WordRepresentation::Word32()> kWord32BitwiseXor
Definition opmasks.h:163
WordBinopMask::For< WordBinopOp::Kind::kSub, WordRepresentation::Word64()> kWord64Sub
Definition opmasks.h:167
WordBinopMask::For< WordBinopOp::Kind::kSub, WordRepresentation::Word32()> kWord32Sub
Definition opmasks.h:148
WordBinopMask::For< WordBinopOp::Kind::kBitwiseOr, WordRepresentation::Word64()> kWord64BitwiseOr
Definition opmasks.h:173
WordBinopMask::For< WordBinopOp::Kind::kBitwiseXor, WordRepresentation::Word64()> kWord64BitwiseXor
Definition opmasks.h:175
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
static bool TryMatchInt32MulWithOverflow(InstructionSelectorT *selector, OpIndex node)
void VisitAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode, AtomicWidth width)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
static bool IsContiguousMask32(uint32_t value, int *mb, int *me)
ArchOpcode SelectLoadOpcode(MemoryRepresentation loaded_rep, RegisterRepresentation result_rep, ImmediateMode *mode)
static bool TryMatchNegFromSub(InstructionSelectorT *selector, OpIndex node)
static bool TryMatchInt64AddWithOverflow(InstructionSelectorT *selector, OpIndex node)
void EmitInt64MulWithOverflow(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
static Instruction * VisitCompare(InstructionSelectorT *selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuationT *cont)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
base::Flags< OperandMode, uint32_t > OperandModes
static int32_t Pack4Lanes(const uint8_t *shuffle)
std::optional< BaseWithScaledIndexAndDisplacementMatch > TryMatchBaseWithScaledIndexAndDisplacement64(InstructionSelectorT *selector, OpIndex node)
static bool TryMatchInt32AddWithOverflow(InstructionSelectorT *selector, OpIndex node)
static bool TryMatchSignExtInt16OrInt8FromWord32Sar(InstructionSelectorT *selector, OpIndex node)
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
static bool CompareLogical(FlagsContinuationT *cont)
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
static void VisitGeneralStore(InstructionSelectorT *selector, OpIndex node, MachineRepresentation rep, WriteBarrierKind write_barrier_kind=kNoWriteBarrier)
static bool TryMatchInt32OpWithOverflow(InstructionSelectorT *selector, OpIndex node, OperandModes mode)
static bool IsContiguousMask64(uint64_t value, int *mb, int *me)
static bool TryMatchInt64SubWithOverflow(InstructionSelectorT *selector, OpIndex node)
bool TryMatchShiftFromMul(InstructionSelectorT *selector, OpIndex node)
static bool TryMatchInt32SubWithOverflow(InstructionSelectorT *selector, OpIndex node)
static void VisitShift(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
static bool TryMatchDoubleConstructFromInsert(InstructionSelectorT *selector, OpIndex node)
static bool TryMatchInt64OpWithOverflow(InstructionSelectorT *selector, OpIndex node, OperandModes mode)
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int ElementSizeInBits(MachineRepresentation rep)
constexpr int kSimd128Size
Definition globals.h:706
switch(set_by_)
Definition flags.cc:3669
const int kStackFrameExtraParamSlot
constexpr bool CanBeTaggedOrCompressedPointer(MachineRepresentation rep)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr bool IsAnyTagged(MachineRepresentation rep)
constexpr bool IsAnyCompressed(MachineRepresentation rep)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr bool SmiValuesAre31Bits()
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
Operation
Definition operation.h:43
#define shr(value, bits)
Definition sha-256.cc:31
#define UNREACHABLE()
Definition logging.h:67
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define UNIMPLEMENTED()
Definition logging.h:66
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
#define arraysize(array)
Definition macros.h:67
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
bool IsLoadOrLoadImmutable(turboshaft::OpIndex node) const
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
const underlying_operation_t< Op > * TryCast() const
Definition operations.h:990
underlying_operation_t< Op > & Cast()
Definition operations.h:980