v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-selector-riscv32.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/bits.h"
14
15namespace v8 {
16namespace internal {
17namespace compiler {
18
19#define TRACE(...) PrintF(__VA_ARGS__)
20
22 InstructionCode opcode) {
24 case kRiscvShl32:
25 case kRiscvSar32:
26 case kRiscvShr32:
27 return is_uint5(value);
28 case kRiscvAdd32:
29 case kRiscvAnd32:
30 case kRiscvAnd:
31 case kRiscvOr32:
32 case kRiscvOr:
33 case kRiscvTst32:
34 case kRiscvXor:
35 return is_int12(value);
36 case kRiscvLb:
37 case kRiscvLbu:
38 case kRiscvSb:
39 case kRiscvLh:
40 case kRiscvLhu:
41 case kRiscvSh:
42 case kRiscvLw:
43 case kRiscvSw:
44 case kRiscvLoadFloat:
45 case kRiscvStoreFloat:
46 case kRiscvLoadDouble:
47 case kRiscvStoreDouble:
48 return is_int32(value);
49 default:
50 return is_int12(value);
51 }
52}
53
54void EmitLoad(InstructionSelectorT* selector, OpIndex node,
55 InstructionCode opcode, OpIndex output = OpIndex()) {
56 RiscvOperandGeneratorT g(selector);
57 const Operation& op = selector->Get(node);
58 const LoadOp& load = op.Cast<LoadOp>();
59 // The LoadStoreSimplificationReducer transforms all loads into
60 // *(base + index).
61 OpIndex base = load.base();
62 OptionalOpIndex index = load.index();
63 DCHECK_EQ(load.offset, 0);
64 DCHECK_EQ(load.element_size_log2, 0);
65
66 InstructionOperand inputs[3];
67 size_t input_count = 0;
68 InstructionOperand output_op;
69
70 // If output is valid, use that as the output register. This is used when we
71 // merge a conversion into the load.
72 output_op = g.DefineAsRegister(output.valid() ? output : node);
73
74 const Operation& base_op = selector->Get(base);
75 if (base_op.Is<Opmask::kExternalConstant>() && g.IsIntegerConstant(index)) {
76 const ConstantOp& constant_base = base_op.Cast<ConstantOp>();
78 constant_base.external_reference())) {
79 ptrdiff_t const delta =
80 *g.GetOptionalIntegerConstant(index.value()) +
82 selector->isolate(), constant_base.external_reference());
83 input_count = 1;
84 // Check that the delta is a 32-bit integer due to the limitations of
85 // immediate operands.
86 if (is_int32(delta)) {
87 inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
88 opcode |= AddressingModeField::encode(kMode_Root);
89 selector->Emit(opcode, 1, &output_op, input_count, inputs);
90 return;
91 }
92 }
93 }
94
95 if (base_op.Is<LoadRootRegisterOp>()) {
96 DCHECK(g.IsIntegerConstant(index));
97 input_count = 1;
98 inputs[0] = g.UseImmediate64(*g.GetOptionalIntegerConstant(index.value()));
99 opcode |= AddressingModeField::encode(kMode_Root);
100 selector->Emit(opcode, 1, &output_op, input_count, inputs);
101 return;
102 }
103
104 if (load.index().has_value() && g.CanBeImmediate(index.value(), opcode)) {
105 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
106 g.DefineAsRegister(output.valid() ? output : node),
107 g.UseRegister(base), g.UseImmediate(index.value()));
108 } else {
109 if (index.has_value()) {
110 InstructionOperand addr_reg = g.TempRegister();
111 selector->Emit(kRiscvAdd32 | AddressingModeField::encode(kMode_None),
112 addr_reg, g.UseRegister(index.value()),
113 g.UseRegister(base));
114 // Emit desired load opcode, using temp addr_reg.
115 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
116 g.DefineAsRegister(output.valid() ? output : node),
117 addr_reg, g.TempImmediate(0));
118 } else {
119 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
120 g.DefineAsRegister(output.valid() ? output : node),
121 g.UseRegister(base), g.TempImmediate(0));
122 }
123 }
124}
125
127 InstructionCode opcode, VSew sew, Vlmul lmul) {
128 RiscvOperandGeneratorT g(selector);
129 OpIndex base = selector->input_at(node, 0);
130 OpIndex index = selector->input_at(node, 1);
131
132 if (g.CanBeImmediate(index, opcode)) {
133 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
135 g.UseImmediate(index), g.UseImmediate(sew),
136 g.UseImmediate(lmul));
137 } else {
138 InstructionOperand addr_reg = g.TempRegister();
139 selector->Emit(kRiscvAdd32 | AddressingModeField::encode(kMode_None),
140 addr_reg, g.UseRegister(index), g.UseRegister(base));
141 // Emit desired load opcode, using temp addr_reg.
142 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
143 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0),
144 g.UseImmediate(sew), g.UseImmediate(lmul));
145 }
146}
147
148void InstructionSelectorT::VisitStoreLane(OpIndex node) {
149 const Simd128LaneMemoryOp& store = Get(node).Cast<Simd128LaneMemoryOp>();
150 InstructionCode opcode = kRiscvS128StoreLane;
151 opcode |= LaneSizeField::encode(store.lane_size() * kBitsPerByte);
152 if (store.kind.with_trap_handler) {
154 }
155
156 RiscvOperandGeneratorT g(this);
157 OpIndex base = this->input_at(node, 0);
158 OpIndex index = this->input_at(node, 1);
159 InstructionOperand addr_reg = g.TempRegister();
160 Emit(kRiscvAdd32, addr_reg, g.UseRegister(base), g.UseRegister(index));
161 InstructionOperand inputs[4] = {
162 g.UseRegister(input_at(node, 2)),
163 g.UseImmediate(store.lane),
164 addr_reg,
165 g.TempImmediate(0),
166 };
168 Emit(opcode, 0, nullptr, 4, inputs);
169}
170
171void InstructionSelectorT::VisitLoadLane(OpIndex node) {
172 const Simd128LaneMemoryOp& load = this->Get(node).Cast<Simd128LaneMemoryOp>();
173 InstructionCode opcode = kRiscvS128LoadLane;
174 opcode |= LaneSizeField::encode(load.lane_size() * kBitsPerByte);
175 if (load.kind.with_trap_handler) {
177 }
178
179 RiscvOperandGeneratorT g(this);
180 OpIndex base = this->input_at(node, 0);
181 OpIndex index = this->input_at(node, 1);
182 InstructionOperand addr_reg = g.TempRegister();
183 Emit(kRiscvAdd32, addr_reg, g.UseRegister(base), g.UseRegister(index));
185 Emit(opcode, g.DefineSameAsFirst(node),
186 g.UseRegister(this->input_at(node, 2)), g.UseImmediate(load.lane),
187 addr_reg, g.TempImmediate(0));
188}
189
191 auto load = this->load_view(node);
192 LoadRepresentation load_rep = load.loaded_rep();
193 InstructionCode opcode = kArchNop;
194 switch (load_rep.representation()) {
196 opcode = kRiscvLoadFloat;
197 break;
199 opcode = kRiscvLoadDouble;
200 break;
201 case MachineRepresentation::kBit: // Fall through.
203 opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
204 break;
206 opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
207 break;
208 case MachineRepresentation::kTaggedSigned: // Fall through.
209 case MachineRepresentation::kTaggedPointer: // Fall through.
210 case MachineRepresentation::kTagged: // Fall through.
212 opcode = kRiscvLw;
213 break;
215 opcode = kRiscvRvvLd;
216 break;
220 case MachineRepresentation::kMapWord: // Fall through.
223 case MachineRepresentation::kSimd256: // Fall through.
224 case MachineRepresentation::kProtectedPointer: // Fall through.
228 UNREACHABLE();
229 }
230
231 EmitLoad(this, node, opcode);
232}
233
234void InstructionSelectorT::VisitStorePair(OpIndex node) { UNREACHABLE(); }
235
236void InstructionSelectorT::VisitStore(OpIndex node) {
237 RiscvOperandGeneratorT g(this);
238 StoreView store_view = this->store_view(node);
241 OpIndex value = store_view.value();
242
243 WriteBarrierKind write_barrier_kind =
246
247 // TODO(riscv): I guess this could be done in a better way.
248 if (write_barrier_kind != kNoWriteBarrier && index.has_value() &&
249 V8_LIKELY(!v8_flags.disable_write_barriers)) {
251 InstructionOperand inputs[4];
252 size_t input_count = 0;
253 inputs[input_count++] = g.UseUniqueRegister(base);
254 inputs[input_count++] = g.UseUniqueRegister(this->value(index));
255 inputs[input_count++] = g.UseUniqueRegister(value);
256 RecordWriteMode record_write_mode =
257 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
258 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
259 size_t const temp_count = arraysize(temps);
262 DCHECK_EQ(write_barrier_kind, kIndirectPointerWriteBarrier);
263 // In this case we need to add the IndirectPointerTag as additional input.
264 code = kArchStoreIndirectWithWriteBarrier;
266 inputs[input_count++] = g.UseImmediate64(static_cast<int64_t>(tag));
267 } else {
268 code = kArchStoreWithWriteBarrier;
269 }
270 code |= RecordWriteModeField::encode(record_write_mode);
273 }
274 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
275 } else {
277 switch (rep) {
279 code = kRiscvStoreFloat;
280 break;
282 code = kRiscvStoreDouble;
283 break;
284 case MachineRepresentation::kBit: // Fall through.
286 code = kRiscvSb;
287 break;
289 code = kRiscvSh;
290 break;
291 case MachineRepresentation::kTaggedSigned: // Fall through.
292 case MachineRepresentation::kTaggedPointer: // Fall through.
295 code = kRiscvSw;
296 break;
298 code = kRiscvRvvSt;
299 break;
300 case MachineRepresentation::kCompressedPointer: // Fall through.
303 case MachineRepresentation::kMapWord: // Fall through.
306 case MachineRepresentation::kSimd256: // Fall through.
307 case MachineRepresentation::kProtectedPointer: // Fall through.
311 UNREACHABLE();
312 }
313
314 if (this->is_load_root_register(base)) {
315 Emit(code | AddressingModeField::encode(kMode_Root), g.NoOutput(),
316 g.UseRegisterOrImmediateZero(value),
317 index.has_value() ? g.UseImmediate(this->value(index))
318 : g.UseImmediate(0));
319 return;
320 }
321
322 if (index.has_value() && g.CanBeImmediate(this->value(index), code)) {
323 Emit(code | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
324 g.UseRegisterOrImmediateZero(value), g.UseRegister(base),
325 index.has_value() ? g.UseImmediate(this->value(index))
326 : g.UseImmediate(0));
327 } else {
328 if (index.has_value()) {
329 InstructionOperand addr_reg = g.TempRegister();
330 Emit(kRiscvAdd32 | AddressingModeField::encode(kMode_None), addr_reg,
331 g.UseRegister(this->value(index)), g.UseRegister(base));
332 // Emit desired store opcode, using temp addr_reg.
333 Emit(code | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
334 g.UseRegisterOrImmediateZero(value), addr_reg, g.TempImmediate(0));
335 } else {
336 Emit(code | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
337 g.UseRegisterOrImmediateZero(value), g.UseRegister(base),
338 g.UseImmediate(0));
339 }
340 }
341 }
342}
343
344void InstructionSelectorT::VisitProtectedLoad(OpIndex node) {
345 // TODO(eholk)
347}
348
349void InstructionSelectorT::VisitProtectedStore(OpIndex node) {
350 // TODO(eholk)
352}
353
354void InstructionSelectorT::VisitWord32And(OpIndex node) {
355 VisitBinop<Int32BinopMatcher>(this, node, kRiscvAnd, true, kRiscvAnd);
356}
357
358void InstructionSelectorT::VisitWord32Or(OpIndex node) {
359 VisitBinop<Int32BinopMatcher>(this, node, kRiscvOr, true, kRiscvOr);
360}
361
362void InstructionSelectorT::VisitWord32Xor(OpIndex node) {
363 VisitBinop<Int32BinopMatcher>(this, node, kRiscvXor, true, kRiscvXor);
364}
365
366void InstructionSelectorT::VisitWord32Rol(OpIndex node) { UNIMPLEMENTED(); }
367
368void InstructionSelectorT::VisitWord32Ror(OpIndex node) {
369 VisitRRO(this, kRiscvRor32, node);
370}
371
372void InstructionSelectorT::VisitWord32ReverseBits(OpIndex node) {
373 UNREACHABLE();
374}
375
376void InstructionSelectorT::VisitWord64ReverseBytes(OpIndex node) {
377 UNREACHABLE();
378}
379
380void InstructionSelectorT::VisitWord32ReverseBytes(OpIndex node) {
381 RiscvOperandGeneratorT g(this);
382 if (CpuFeatures::IsSupported(ZBB)) {
383 Emit(kRiscvRev8, g.DefineAsRegister(node),
384 g.UseRegister(this->input_at(node, 0)));
385 } else {
386 Emit(kRiscvByteSwap32, g.DefineAsRegister(node),
387 g.UseRegister(this->input_at(node, 0)));
388 }
389}
390
391void InstructionSelectorT::VisitSimd128ReverseBytes(OpIndex node) {
392 UNREACHABLE();
393}
394
395void InstructionSelectorT::VisitWord32Ctz(OpIndex node) {
396 RiscvOperandGeneratorT g(this);
397 Emit(kRiscvCtz, g.DefineAsRegister(node),
398 g.UseRegister(this->input_at(node, 0)));
399}
400
401void InstructionSelectorT::VisitWord32Popcnt(OpIndex node) {
402 RiscvOperandGeneratorT g(this);
403 Emit(kRiscvCpop, g.DefineAsRegister(node),
404 g.UseRegister(this->input_at(node, 0)));
405}
406
407void InstructionSelectorT::VisitInt32Add(OpIndex node) {
408 VisitBinop<Int32BinopMatcher>(this, node, kRiscvAdd32, true, kRiscvAdd32);
409}
410
411void InstructionSelectorT::VisitInt32Sub(OpIndex node) {
412 VisitBinop<Int32BinopMatcher>(this, node, kRiscvSub32);
413}
414
415void InstructionSelectorT::VisitInt32Mul(OpIndex node) {
416 VisitRRR(this, kRiscvMul32, node);
417}
418
419void InstructionSelectorT::VisitInt32MulHigh(OpIndex node) {
420 VisitRRR(this, kRiscvMulHigh32, node);
421}
422
423void InstructionSelectorT::VisitUint32MulHigh(OpIndex node) {
424 VisitRRR(this, kRiscvMulHighU32, node);
425}
426
427void InstructionSelectorT::VisitInt32Div(OpIndex node) {
428 VisitRRR(this, kRiscvDiv32, node,
430}
431
432void InstructionSelectorT::VisitUint32Div(OpIndex node) {
433 VisitRRR(this, kRiscvDivU32, node,
435}
436
437void InstructionSelectorT::VisitInt32Mod(OpIndex node) {
438 VisitRRR(this, kRiscvMod32, node);
439}
440
441void InstructionSelectorT::VisitUint32Mod(OpIndex node) {
442 VisitRRR(this, kRiscvModU32, node);
443}
444
445void InstructionSelectorT::VisitChangeFloat32ToFloat64(OpIndex node) {
446 VisitRR(this, kRiscvCvtDS, node);
447}
448
449void InstructionSelectorT::VisitRoundInt32ToFloat32(OpIndex node) {
450 VisitRR(this, kRiscvCvtSW, node);
451}
452
453void InstructionSelectorT::VisitRoundUint32ToFloat32(OpIndex node) {
454 VisitRR(this, kRiscvCvtSUw, node);
455}
456
457void InstructionSelectorT::VisitChangeInt32ToFloat64(OpIndex node) {
458 VisitRR(this, kRiscvCvtDW, node);
459}
460
461void InstructionSelectorT::VisitChangeUint32ToFloat64(OpIndex node) {
462 VisitRR(this, kRiscvCvtDUw, node);
463}
464
465void InstructionSelectorT::VisitTruncateFloat32ToInt32(OpIndex node) {
466 RiscvOperandGeneratorT g(this);
467
468 const Operation& op = this->Get(node);
469 InstructionCode opcode = kRiscvTruncWS;
471 opcode |= MiscField::encode(true);
472 }
473 Emit(opcode, g.DefineAsRegister(node),
474 g.UseRegister(this->input_at(node, 0)));
475}
476
477void InstructionSelectorT::VisitTruncateFloat32ToUint32(OpIndex node) {
478 RiscvOperandGeneratorT g(this);
479
480 const Operation& op = this->Get(node);
481 InstructionCode opcode = kRiscvTruncUwS;
483 opcode |= MiscField::encode(true);
484 }
485
486 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)));
487}
488
489void InstructionSelectorT::VisitChangeFloat64ToInt32(OpIndex node) {
490 RiscvOperandGeneratorT g(this);
491 OpIndex value = this->input_at(node, 0);
492 using Rep = turboshaft::RegisterRepresentation;
493 if (CanCover(node, value)) {
494 const turboshaft::Operation& op = this->Get(value);
495 if (op.Is<turboshaft::ChangeOp>()) {
496 const turboshaft::ChangeOp& change = op.Cast<turboshaft::ChangeOp>();
498 if (change.from == Rep::Float32() && change.to == Rep::Float64()) {
499 Emit(kRiscvTruncWS, g.DefineAsRegister(node),
500 g.UseRegister(this->input_at(value, 0)));
501 return;
502 }
503 }
504 }
505 }
506 VisitRR(this, kRiscvTruncWD, node);
507}
508
509void InstructionSelectorT::VisitChangeFloat64ToUint32(OpIndex node) {
510 VisitRR(this, kRiscvTruncUwD, node);
511}
512
513void InstructionSelectorT::VisitTruncateFloat64ToUint32(OpIndex node) {
514 VisitRR(this, kRiscvTruncUwD, node);
515}
516
517void InstructionSelectorT::VisitBitcastFloat32ToInt32(OpIndex node) {
518 VisitRR(this, kRiscvBitcastFloat32ToInt32, node);
519}
520
521void InstructionSelectorT::VisitBitcastInt32ToFloat32(OpIndex node) {
522 VisitRR(this, kRiscvBitcastInt32ToFloat32, node);
523}
524
525void InstructionSelectorT::VisitFloat64RoundDown(OpIndex node) {
527}
528
529void InstructionSelectorT::VisitFloat32RoundUp(OpIndex node) {
530 VisitRR(this, kRiscvFloat32RoundUp, node);
531}
532
533void InstructionSelectorT::VisitFloat64RoundUp(OpIndex node) {
535}
536
537void InstructionSelectorT::VisitFloat32RoundTruncate(OpIndex node) {
538 VisitRR(this, kRiscvFloat32RoundTruncate, node);
539}
540
541void InstructionSelectorT::VisitFloat64RoundTruncate(OpIndex node) {
543}
544
545void InstructionSelectorT::VisitFloat64RoundTiesAway(OpIndex node) {
546 UNREACHABLE();
547}
548
549void InstructionSelectorT::VisitFloat32RoundTiesEven(OpIndex node) {
550 VisitRR(this, kRiscvFloat32RoundTiesEven, node);
551}
552
553void InstructionSelectorT::VisitFloat64RoundTiesEven(OpIndex node) {
555}
556
557void InstructionSelectorT::VisitFloat32Neg(OpIndex node) {
558 VisitRR(this, kRiscvNegS, node);
559}
560
561void InstructionSelectorT::VisitFloat64Neg(OpIndex node) {
562 VisitRR(this, kRiscvNegD, node);
563}
564
566 InstructionCode opcode) {
567 RiscvOperandGeneratorT g(this);
568 Emit(opcode, g.DefineAsFixed(node, fa0),
569 g.UseFixed(this->input_at(node, 0), fa0),
570 g.UseFixed(this->input_at(node, 1), fa1))
571 ->MarkAsCall();
572}
573
575 InstructionCode opcode) {
576 RiscvOperandGeneratorT g(this);
577 Emit(opcode, g.DefineAsFixed(node, fa0),
578 g.UseFixed(this->input_at(node, 0), fa1))
579 ->MarkAsCall();
580}
581
583 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
584 OpIndex node) {
585 RiscvOperandGeneratorT g(this);
586
587 // Prepare for C function call.
588 if (call_descriptor->IsCFunctionCall()) {
589 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
590 call_descriptor->ParameterCount())),
591 0, nullptr, 0, nullptr);
592
593 // Poke any stack arguments.
594 int slot = kCArgSlotCount;
595 for (PushParameter input : (*arguments)) {
596 Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
597 g.TempImmediate(slot << kSystemPointerSizeLog2));
598 ++slot;
599 }
600 } else {
601 int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
602 if (push_count > 0) {
603 Emit(kRiscvStackClaim, g.NoOutput(),
604 g.TempImmediate(arguments->size() << kSystemPointerSizeLog2));
605 }
606 for (size_t n = 0; n < arguments->size(); ++n) {
607 PushParameter input = (*arguments)[n];
608 if (input.node.valid()) {
609 Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
610 g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
611 }
612 }
613 }
614}
615
616void InstructionSelectorT::VisitUnalignedLoad(OpIndex node) {
617 auto load = this->load_view(node);
618 LoadRepresentation load_rep = load.loaded_rep();
619 RiscvOperandGeneratorT g(this);
620 OpIndex base = load.base();
621 OpIndex index = load.index();
622
624 switch (load_rep.representation()) {
626 opcode = kRiscvULoadFloat;
627 break;
629 opcode = kRiscvULoadDouble;
630 break;
632 opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
633 break;
635 opcode = load_rep.IsUnsigned() ? kRiscvUlhu : kRiscvUlh;
636 break;
637 case MachineRepresentation::kTaggedSigned: // Fall through.
638 case MachineRepresentation::kTaggedPointer: // Fall through.
639 case MachineRepresentation::kTagged: // Fall through.
641 opcode = kRiscvUlw;
642 break;
644 opcode = kRiscvRvvLd;
645 break;
646 case MachineRepresentation::kSimd256: // Fall through.
647 case MachineRepresentation::kBit: // Fall through.
648 case MachineRepresentation::kCompressedPointer: // Fall through.
649 case MachineRepresentation::kCompressed: // Fall through.
650 case MachineRepresentation::kSandboxedPointer: // Fall through.
651 case MachineRepresentation::kMapWord: // Fall through.
652 case MachineRepresentation::kProtectedPointer: // Fall through.
658 UNREACHABLE();
659 }
660
661 if (g.CanBeImmediate(index, opcode)) {
663 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
664 } else {
665 InstructionOperand addr_reg = g.TempRegister();
666 Emit(kRiscvAdd32 | AddressingModeField::encode(kMode_None), addr_reg,
667 g.UseRegister(index), g.UseRegister(base));
668 // Emit desired load opcode, using temp addr_reg.
670 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
671 }
672}
673
674void InstructionSelectorT::VisitUnalignedStore(OpIndex node) {
675 RiscvOperandGeneratorT g(this);
676 OpIndex base = this->input_at(node, 0);
677 OpIndex index = this->input_at(node, 1);
678 OpIndex value = this->input_at(node, 2);
681 switch (rep) {
683 opcode = kRiscvUStoreFloat;
684 break;
686 opcode = kRiscvUStoreDouble;
687 break;
689 opcode = kRiscvSb;
690 break;
692 opcode = kRiscvUsh;
693 break;
694 case MachineRepresentation::kTaggedSigned: // Fall through.
695 case MachineRepresentation::kTaggedPointer: // Fall through.
696 case MachineRepresentation::kTagged: // Fall through.
698 opcode = kRiscvUsw;
699 break;
701 opcode = kRiscvRvvSt;
702 break;
703 case MachineRepresentation::kSimd256: // Fall through.
704 case MachineRepresentation::kBit: // Fall through.
705 case MachineRepresentation::kCompressedPointer: // Fall through.
706 case MachineRepresentation::kCompressed: // Fall through.
708 case MachineRepresentation::kMapWord: // Fall through.
709 case MachineRepresentation::kProtectedPointer: // Fall through.
715 UNREACHABLE();
716 }
717
718 if (g.CanBeImmediate(index, opcode)) {
719 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
720 g.UseRegister(base), g.UseImmediate(index),
721 g.UseRegisterOrImmediateZero(value));
722 } else {
723 InstructionOperand addr_reg = g.TempRegister();
724 Emit(kRiscvAdd32 | AddressingModeField::encode(kMode_None), addr_reg,
725 g.UseRegister(index), g.UseRegister(base));
726 // Emit desired store opcode, using temp addr_reg.
727 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
728 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
729 }
730}
731
732namespace {
733
734void VisitWordCompare(InstructionSelectorT* selector, OpIndex node,
735 FlagsContinuationT* cont) {
736 VisitWordCompare(selector, node, kRiscvCmp, cont, false);
737}
738
739void VisitAtomicLoad(InstructionSelectorT* selector, OpIndex node,
740 ArchOpcode opcode, AtomicWidth width) {
741 using OpIndex = OpIndex;
742 RiscvOperandGeneratorT g(selector);
743 auto load = selector->load_view(node);
744 OpIndex base = load.base();
745 OpIndex index = load.index();
746 if (g.CanBeImmediate(index, opcode)) {
747 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
749 g.DefineAsRegister(node), g.UseRegister(base),
750 g.UseImmediate(index));
751 } else {
752 InstructionOperand addr_reg = g.TempRegister();
753 selector->Emit(kRiscvAdd32 | AddressingModeField::encode(kMode_None),
754 addr_reg, g.UseRegister(index), g.UseRegister(base));
755 // Emit desired load opcode, using temp addr_reg.
756 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
758 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
759 }
760}
761
762void VisitAtomicStore(InstructionSelectorT* selector, OpIndex node,
763 ArchOpcode opcode, AtomicWidth width) {
764 RiscvOperandGeneratorT g(selector);
765 using OpIndex = OpIndex;
766 auto store = selector->store_view(node);
767 OpIndex base = store.base();
768 OpIndex index = selector->value(store.index());
769 OpIndex value = store.value();
770
771 if (g.CanBeImmediate(index, opcode)) {
772 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
774 g.NoOutput(), g.UseRegisterOrImmediateZero(value),
775 g.UseRegister(base), g.UseImmediate(index));
776 } else {
777 InstructionOperand addr_reg = g.TempRegister();
778 selector->Emit(kRiscvAdd32 | AddressingModeField::encode(kMode_None),
779 addr_reg, g.UseRegister(index), g.UseRegister(base));
780 // Emit desired store opcode, using temp addr_reg.
781 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
783 g.NoOutput(), g.UseRegisterOrImmediateZero(value), addr_reg,
784 g.TempImmediate(0));
785 }
786}
787
788void VisitAtomicBinop(InstructionSelectorT* selector, OpIndex node,
789 ArchOpcode opcode) {
790 RiscvOperandGeneratorT g(selector);
791 using OpIndex = OpIndex;
792 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
793 OpIndex base = atomic_op.base();
794 OpIndex index = atomic_op.index();
795 OpIndex value = atomic_op.value();
796
797 AddressingMode addressing_mode = kMode_MRI;
798 InstructionOperand inputs[3];
799 size_t input_count = 0;
800 inputs[input_count++] = g.UseUniqueRegister(base);
801 inputs[input_count++] = g.UseUniqueRegister(index);
802 inputs[input_count++] = g.UseUniqueRegister(value);
803 InstructionOperand outputs[1];
804 outputs[0] = g.UseUniqueRegister(node);
805 InstructionOperand temps[4];
806 temps[0] = g.TempRegister();
807 temps[1] = g.TempRegister();
808 temps[2] = g.TempRegister();
809 temps[3] = g.TempRegister();
810 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
811 selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
812}
813
814} // namespace
815
817 OpIndex node, FlagsContinuationT* cont) {
820 const auto& op = this->turboshaft_graph()
821 ->Get(node)
823 kind = op.kind;
824 value = op.stack_limit();
826 kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
827
828 RiscvOperandGeneratorT g(this);
829
830 // No outputs.
831 InstructionOperand* const outputs = nullptr;
832 const int output_count = 0;
833
834 // Applying an offset to this stack check requires a temp register. Offsets
835 // are only applied to the first stack check. If applying an offset, we must
836 // ensure the input and temp registers do not alias, thus kUniqueRegister.
837 InstructionOperand temps[] = {g.TempRegister()};
838 const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0);
839 const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
842
843 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
844 static constexpr int input_count = arraysize(inputs);
845
846 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
847 temp_count, temps, cont);
848}
849
850// Shared routine for word comparisons against zero.
852 FlagsContinuation* cont) {
853 // Try to combine with comparisons against 0 by simply inverting the branch.
854 while (const ComparisonOp* equal =
855 this->TryCast<Opmask::kWord32Equal>(value)) {
856 if (!CanCover(user, value)) break;
857 if (!MatchIntegralZero(equal->right())) break;
858
859 user = value;
860 value = equal->left();
861 cont->Negate();
862 }
863
864 const Operation& value_op = Get(value);
865 if (CanCover(user, value)) {
866 if (const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
867 switch (comparison->rep.value()) {
869 cont->OverwriteAndNegateIfEqual(
870 GetComparisonFlagCondition(*comparison));
871 return VisitWordCompare(this, value, cont);
873 switch (comparison->kind) {
874 case ComparisonOp::Kind::kEqual:
875 cont->OverwriteAndNegateIfEqual(kEqual);
876 return VisitFloat32Compare(this, value, cont);
877 case ComparisonOp::Kind::kSignedLessThan:
878 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
879 return VisitFloat32Compare(this, value, cont);
880 case ComparisonOp::Kind::kSignedLessThanOrEqual:
881 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
882 return VisitFloat32Compare(this, value, cont);
883 default:
884 UNREACHABLE();
885 }
887 switch (comparison->kind) {
888 case ComparisonOp::Kind::kEqual:
889 cont->OverwriteAndNegateIfEqual(kEqual);
890 return VisitFloat64Compare(this, value, cont);
891 case ComparisonOp::Kind::kSignedLessThan:
892 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
893 return VisitFloat64Compare(this, value, cont);
894 case ComparisonOp::Kind::kSignedLessThanOrEqual:
895 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
896 return VisitFloat64Compare(this, value, cont);
897 default:
898 UNREACHABLE();
899 }
900 default:
901 break;
902 }
903 } else if (const ProjectionOp* projection =
904 value_op.TryCast<ProjectionOp>()) {
905 // Check if this is the overflow output projection of an
906 // <Operation>WithOverflow node.
907 if (projection->index == 1u) {
908 // We cannot combine the <Operation>WithOverflow with this branch
909 // unless the 0th projection (the use of the actual value of the
910 // <Operation> is either nullptr, which means there's no use of the
911 // actual value, or was already defined, which means it is scheduled
912 // *AFTER* this branch).
913 OpIndex node = projection->input();
914 if (const OverflowCheckedBinopOp* binop =
916 binop && CanDoBranchIfOverflowFusion(node)) {
917 const bool is64 = binop->rep == WordRepresentation::Word64();
918 if (is64) {
919 UNREACHABLE();
920 } else {
921 switch (binop->kind) {
922 case OverflowCheckedBinopOp::Kind::kSignedAdd:
923 cont->OverwriteAndNegateIfEqual(kOverflow);
924 return VisitBinop<Int32BinopMatcher>(this, node, kRiscvAddOvf,
925 cont);
926 case OverflowCheckedBinopOp::Kind::kSignedSub:
927 cont->OverwriteAndNegateIfEqual(kOverflow);
928 return VisitBinop<Int32BinopMatcher>(this, node, kRiscvSubOvf,
929 cont);
930 case OverflowCheckedBinopOp::Kind::kSignedMul:
931 cont->OverwriteAndNegateIfEqual(kOverflow);
932 return VisitBinop<Int32BinopMatcher>(this, node, kRiscvMulOvf32,
933 cont);
934 }
935 }
936 }
937 }
938 }
939 }
940
941 // Continuation could not be combined with a compare, emit compare against
942 // 0.
943 EmitWordCompareZero(this, value, cont);
944}
945
946void InstructionSelectorT::VisitWord32Equal(OpIndex node) {
947 const Operation& equal = Get(node);
948 DCHECK(equal.Is<ComparisonOp>());
949 OpIndex left = equal.input(0);
950 OpIndex right = equal.input(1);
951 OpIndex user = node;
952 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
953
954 if (MatchZero(right)) {
955 return VisitWordCompareZero(user, left, &cont);
956 }
957 VisitWordCompare(this, node, &cont);
958}
959
960void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
961 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
962 VisitWordCompare(this, node, &cont);
963}
964
965void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
966 FlagsContinuation cont =
967 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
968 VisitWordCompare(this, node, &cont);
969}
970
971void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
972 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
973 VisitWordCompare(this, node, &cont);
974}
975
976void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
977 FlagsContinuation cont =
978 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
979 VisitWordCompare(this, node, &cont);
980}
981
982void InstructionSelectorT::VisitInt32AddWithOverflow(OpIndex node) {
983 OptionalOpIndex ovf = FindProjection(node, 1);
984 if (ovf.valid() && IsUsed(ovf.value())) {
985 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
986 return VisitBinop<Int32BinopMatcher>(this, node, kRiscvAddOvf, &cont);
987 }
988 FlagsContinuation cont;
989 VisitBinop<Int32BinopMatcher>(this, node, kRiscvAddOvf, &cont);
990}
991
992void InstructionSelectorT::VisitInt32SubWithOverflow(OpIndex node) {
993 OptionalOpIndex ovf = FindProjection(node, 1);
994 if (ovf.valid() && IsUsed(ovf.value())) {
995 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
996 return VisitBinop<Int32BinopMatcher>(this, node, kRiscvSubOvf, &cont);
997 }
998 FlagsContinuation cont;
999 VisitBinop<Int32BinopMatcher>(this, node, kRiscvSubOvf, &cont);
1000}
1001
1002void InstructionSelectorT::VisitInt32MulWithOverflow(OpIndex node) {
1003 OptionalOpIndex ovf = FindProjection(node, 1);
1004 if (ovf.valid() && IsUsed(ovf.value())) {
1005 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1006 return VisitBinop<Int32BinopMatcher>(this, node, kRiscvMulOvf32, &cont);
1007 }
1008 FlagsContinuation cont;
1009 VisitBinop<Int32BinopMatcher>(this, node, kRiscvMulOvf32, &cont);
1010}
1011
1012void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
1013 auto load = this->load_view(node);
1014 ArchOpcode opcode;
1015 LoadRepresentation load_rep = load.loaded_rep();
1016 switch (load_rep.representation()) {
1017 case MachineRepresentation::kWord8:
1018 opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
1019 break;
1020 case MachineRepresentation::kWord16:
1021 opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
1022 break;
1023 case MachineRepresentation::kTaggedSigned: // Fall through.
1024 case MachineRepresentation::kTaggedPointer: // Fall through.
1025 case MachineRepresentation::kTagged: // Fall through.
1026 case MachineRepresentation::kWord32:
1027 opcode = kAtomicLoadWord32;
1028 break;
1029 default:
1030 UNREACHABLE();
1031 }
1032 VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord32);
1033}
1034
1035void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
1036 auto store = this->store_view(node);
1037 AtomicStoreParameters store_params(store.stored_rep().representation(),
1038 store.stored_rep().write_barrier_kind(),
1039 store.memory_order().value(),
1040 store.access_kind());
1041 MachineRepresentation rep = store_params.representation();
1042 ArchOpcode opcode;
1043 switch (rep) {
1044 case MachineRepresentation::kWord8:
1045 opcode = kAtomicStoreWord8;
1046 break;
1047 case MachineRepresentation::kWord16:
1048 opcode = kAtomicStoreWord16;
1049 break;
1050 case MachineRepresentation::kTaggedSigned: // Fall through.
1051 case MachineRepresentation::kTaggedPointer: // Fall through.
1052 case MachineRepresentation::kTagged:
1053 case MachineRepresentation::kWord32:
1054 opcode = kAtomicStoreWord32;
1055 break;
1056 default:
1057 UNREACHABLE();
1058 }
1059
1060 VisitAtomicStore(this, node, opcode, AtomicWidth::kWord32);
1061}
1062
1064 ArchOpcode opcode, AtomicWidth width) {
1065 RiscvOperandGeneratorT g(selector);
1066 using OpIndex = OpIndex;
1067 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
1068 OpIndex base = atomic_op.base();
1069 OpIndex index = atomic_op.index();
1070 OpIndex value = atomic_op.value();
1071
1072 AddressingMode addressing_mode = kMode_MRI;
1073 InstructionOperand inputs[3];
1074 size_t input_count = 0;
1075 inputs[input_count++] = g.UseUniqueRegister(base);
1076 inputs[input_count++] = g.UseUniqueRegister(index);
1077 inputs[input_count++] = g.UseUniqueRegister(value);
1078 InstructionOperand outputs[1];
1079 outputs[0] = g.UseUniqueRegister(node);
1080 InstructionOperand temp[3];
1081 temp[0] = g.TempRegister();
1082 temp[1] = g.TempRegister();
1083 temp[2] = g.TempRegister();
1084 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
1085 AtomicWidthField::encode(width);
1086 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
1087}
1088
1090 ArchOpcode opcode, AtomicWidth width) {
1091 using OpIndex = OpIndex;
1092 RiscvOperandGeneratorT g(selector);
1093 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
1094 OpIndex base = atomic_op.base();
1095 OpIndex index = atomic_op.index();
1096 OpIndex old_value = atomic_op.expected().value();
1097 OpIndex new_value = atomic_op.value();
1098
1099 AddressingMode addressing_mode = kMode_MRI;
1100 InstructionOperand inputs[4];
1101 size_t input_count = 0;
1102 inputs[input_count++] = g.UseUniqueRegister(base);
1103 inputs[input_count++] = g.UseUniqueRegister(index);
1104 inputs[input_count++] = g.UseUniqueRegister(old_value);
1105 inputs[input_count++] = g.UseUniqueRegister(new_value);
1106 InstructionOperand outputs[1];
1107 outputs[0] = g.UseUniqueRegister(node);
1108 InstructionOperand temp[3];
1109 temp[0] = g.TempRegister();
1110 temp[1] = g.TempRegister();
1111 temp[2] = g.TempRegister();
1112 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
1113 AtomicWidthField::encode(width);
1114 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
1115}
1116
1117void InstructionSelectorT::VisitWord32AtomicExchange(OpIndex node) {
1118 ArchOpcode opcode;
1119
1120 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
1121 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
1122 opcode = kAtomicExchangeInt8;
1123 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
1124 opcode = kAtomicExchangeUint8;
1125 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
1126 opcode = kAtomicExchangeInt16;
1127 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
1128 opcode = kAtomicExchangeUint16;
1129 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
1130 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
1131 opcode = kAtomicExchangeWord32;
1132 } else {
1133 UNREACHABLE();
1134 }
1135
1136 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
1137}
1138
1139void InstructionSelectorT::VisitWord32AtomicCompareExchange(OpIndex node) {
1140 ArchOpcode opcode;
1141
1142 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
1143 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
1144 opcode = kAtomicCompareExchangeInt8;
1145 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
1146 opcode = kAtomicCompareExchangeUint8;
1147 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
1148 opcode = kAtomicCompareExchangeInt16;
1149 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
1150 opcode = kAtomicCompareExchangeUint16;
1151 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
1152 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
1153 opcode = kAtomicCompareExchangeWord32;
1154 } else {
1155 UNREACHABLE();
1156 }
1157
1158 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
1159}
1160
1161void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
1162 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
1163 ArchOpcode uint16_op, ArchOpcode word32_op) {
1164 ArchOpcode opcode;
1165
1166 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
1167 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
1168 opcode = int8_op;
1169 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
1170 opcode = uint8_op;
1171 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
1172 opcode = int16_op;
1173 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
1174 opcode = uint16_op;
1175 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
1176 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
1177 opcode = word32_op;
1178 } else {
1179 UNREACHABLE();
1180 }
1181
1182 VisitAtomicBinop(this, node, opcode);
1183}
1184
1185#define VISIT_ATOMIC_BINOP(op) \
1186 \
1187 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
1188 VisitWord32AtomicBinaryOperation( \
1189 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
1190 kAtomic##op##Uint16, kAtomic##op##Word32); \
1191 }
1197#undef VISIT_ATOMIC_BINOP
1198
1199void InstructionSelectorT::VisitInt32AbsWithOverflow(OpIndex node) {
1200 UNREACHABLE();
1201}
1202
1203void InstructionSelectorT::VisitInt64AbsWithOverflow(OpIndex node) {
1204 UNREACHABLE();
1205}
1206
1207template <unsigned N>
1209 InstructionCode pair_opcode,
1210 InstructionCode single_opcode, OpIndex node) {
1211 static_assert(N == 3 || N == 4,
1212 "Pair operations can only have 3 or 4 inputs");
1213
1214 RiscvOperandGeneratorT g(selector);
1215 OptionalOpIndex projection1 = selector->FindProjection(node, 1);
1216
1217 if (projection1.valid()) {
1218 InstructionOperand outputs[] = {g.DefineAsRegister(node),
1219 g.DefineAsRegister(projection1.value())};
1220
1221 if constexpr (N == 3) {
1222 // We use UseUniqueRegister here to avoid register sharing with the output
1223 // register.
1224 InstructionOperand inputs[] = {
1225 g.UseUniqueRegister(selector->input_at(node, 0)),
1226 g.UseUniqueRegister(selector->input_at(node, 1)),
1227 g.UseUniqueRegister(selector->input_at(node, 2))};
1228
1229 selector->Emit(pair_opcode, 2, outputs, N, inputs);
1230
1231 } else if constexpr (N == 4) {
1232 // We use UseUniqueRegister here to avoid register sharing with the output
1233 // register.
1234 InstructionOperand inputs[] = {
1235 g.UseUniqueRegister(selector->input_at(node, 0)),
1236 g.UseUniqueRegister(selector->input_at(node, 1)),
1237 g.UseUniqueRegister(selector->input_at(node, 2)),
1238 g.UseUniqueRegister(selector->input_at(node, 3))};
1239
1240 selector->Emit(pair_opcode, 2, outputs, N, inputs);
1241 }
1242
1243 } else {
1244 // The high word of the result is not used, so we emit the standard 32 bit
1245 // instruction.
1246 selector->Emit(single_opcode, g.DefineSameAsFirst(node),
1247 g.UseRegister(selector->input_at(node, 0)),
1248 g.UseRegister(selector->input_at(node, 2)));
1249 }
1250}
1251
1252void InstructionSelectorT::VisitInt32PairAdd(OpIndex node) {
1253 VisitInt32PairBinop<4>(this, kRiscvAddPair, kRiscvAdd32, node);
1254}
1255
1256void InstructionSelectorT::VisitInt32PairSub(OpIndex node) {
1257 VisitInt32PairBinop<4>(this, kRiscvSubPair, kRiscvSub32, node);
1258}
1259
1260void InstructionSelectorT::VisitInt32PairMul(OpIndex node) {
1261 VisitInt32PairBinop<4>(this, kRiscvMulPair, kRiscvMul32, node);
1262}
1263
1264void InstructionSelectorT::VisitI64x2SplatI32Pair(OpIndex node) {
1265 RiscvOperandGeneratorT g(this);
1266 InstructionOperand low = g.UseRegister(this->input_at(node, 0));
1267 InstructionOperand high = g.UseRegister(this->input_at(node, 1));
1268 Emit(kRiscvI64x2SplatI32Pair, g.DefineAsRegister(node), low, high);
1269}
1270
1271void InstructionSelectorT::VisitI64x2ReplaceLaneI32Pair(OpIndex node) {
1272 // In turboshaft it gets lowered to an I32x4ReplaceLane.
1273 UNREACHABLE();
1274}
1275
1276// Shared routine for multiple shift operations.
1277
1278static void VisitWord32PairShift(InstructionSelectorT* selector,
1279 InstructionCode opcode, OpIndex node) {
1280 RiscvOperandGeneratorT g(selector);
1281 InstructionOperand shift_operand;
1282 OpIndex shift_by = selector->input_at(node, 2);
1283 if (g.IsIntegerConstant(shift_by)) {
1284 shift_operand = g.UseImmediate(shift_by);
1285 } else {
1286 shift_operand = g.UseUniqueRegister(shift_by);
1287 }
1288
1289 // We use UseUniqueRegister here to avoid register sharing with the output
1290 // register.
1291 InstructionOperand inputs[] = {
1292 g.UseUniqueRegister(selector->input_at(node, 0)),
1293 g.UseUniqueRegister(selector->input_at(node, 1)), shift_operand};
1294
1295 OptionalOpIndex projection1 = selector->FindProjection(node, 1);
1296
1297 InstructionOperand outputs[2];
1298 InstructionOperand temps[1];
1299 int32_t output_count = 0;
1300 int32_t temp_count = 0;
1301
1302 outputs[output_count++] = g.DefineAsRegister(node);
1303 if (projection1.valid()) {
1304 outputs[output_count++] = g.DefineAsRegister(projection1.value());
1305 } else {
1306 temps[temp_count++] = g.TempRegister();
1307 }
1308
1309 selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
1310}
1311
1312void InstructionSelectorT::VisitWord32PairShl(OpIndex node) {
1313 VisitWord32PairShift(this, kRiscvShlPair, node);
1314}
1315
1316void InstructionSelectorT::VisitWord32PairShr(OpIndex node) {
1317 VisitWord32PairShift(this, kRiscvShrPair, node);
1318}
1319
1320void InstructionSelectorT::VisitWord32PairSar(OpIndex node) {
1321 VisitWord32PairShift(this, kRiscvSarPair, node);
1322}
1323
1324void InstructionSelectorT::VisitWord32AtomicPairLoad(OpIndex node) {
1325 RiscvOperandGeneratorT g(this);
1326 OpIndex base = this->input_at(node, 0);
1327 OpIndex index = this->input_at(node, 1);
1328
1329 ArchOpcode opcode = kRiscvWord32AtomicPairLoad;
1330 AddressingMode addressing_mode = kMode_MRI;
1331 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1332 InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
1333 InstructionOperand temps[3];
1334 size_t temp_count = 0;
1335 temps[temp_count++] = g.TempRegister(t0);
1336 InstructionOperand outputs[2];
1337 size_t output_count = 0;
1338
1339 OptionalOpIndex projection0 = this->FindProjection(node, 0);
1340 OptionalOpIndex projection1 = this->FindProjection(node, 1);
1341 if (projection0.valid()) {
1342 outputs[output_count++] = g.DefineAsFixed(projection0.value(), a0);
1343 } else {
1344 temps[temp_count++] = g.TempRegister(a0);
1345 }
1346 if (projection1.valid()) {
1347 outputs[output_count++] = g.DefineAsFixed(projection1.value(), a1);
1348 } else {
1349 temps[temp_count++] = g.TempRegister(a1);
1350 }
1351 Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count,
1352 temps);
1353}
1354
1355void InstructionSelectorT::VisitWord32AtomicPairStore(OpIndex node) {
1356 RiscvOperandGeneratorT g(this);
1357 const AtomicWord32PairOp& store = Cast<AtomicWord32PairOp>(node);
1358
1359 OpIndex base = store.base();
1360 OpIndex index = store.index().value();
1361 OpIndex value_low = store.value_low().value();
1362 OpIndex value_high = store.value_high().value();
1363
1364 InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
1365 g.UseFixed(value_low, a1),
1366 g.UseFixed(value_high, a2)};
1367 InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(),
1368 g.TempRegister()};
1369 Emit(kRiscvWord32AtomicPairStore | AddressingModeField::encode(kMode_MRI), 0,
1370 nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
1371}
1372
1374 ArchOpcode opcode) {
1375 using OpIndex = OpIndex;
1376 RiscvOperandGeneratorT g(selector);
1377 OpIndex base = selector->input_at(node, 0);
1378 OpIndex index = selector->input_at(node, 1);
1379 OpIndex value = selector->input_at(node, 2);
1380 OpIndex value_high = selector->input_at(node, 3);
1381
1382 AddressingMode addressing_mode = kMode_None;
1383 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1384 InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
1385 g.UseFixed(value, a1),
1386 g.UseFixed(value_high, a2)};
1387 InstructionOperand outputs[2];
1388 size_t output_count = 0;
1389 InstructionOperand temps[3];
1390 size_t temp_count = 0;
1391 temps[temp_count++] = g.TempRegister(t0);
1392
1393 OptionalOpIndex projection0 = selector->FindProjection(node, 0);
1394 OptionalOpIndex projection1 = selector->FindProjection(node, 1);
1395 if (projection0.valid()) {
1396 outputs[output_count++] = g.DefineAsFixed(projection0.value(), a0);
1397 } else {
1398 temps[temp_count++] = g.TempRegister(a0);
1399 }
1400 if (projection1.valid()) {
1401 outputs[output_count++] = g.DefineAsFixed(projection1.value(), a1);
1402 } else {
1403 temps[temp_count++] = g.TempRegister(a1);
1404 }
1405 selector->Emit(code, output_count, outputs, arraysize(inputs), inputs,
1406 temp_count, temps);
1407}
1408
1409void InstructionSelectorT::VisitWord32AtomicPairAdd(OpIndex node) {
1410 VisitPairAtomicBinop(this, node, kRiscvWord32AtomicPairAdd);
1411}
1412
1413void InstructionSelectorT::VisitWord32AtomicPairSub(OpIndex node) {
1414 VisitPairAtomicBinop(this, node, kRiscvWord32AtomicPairSub);
1415}
1416
1417void InstructionSelectorT::VisitWord32AtomicPairAnd(OpIndex node) {
1418 VisitPairAtomicBinop(this, node, kRiscvWord32AtomicPairAnd);
1419}
1420
1421void InstructionSelectorT::VisitWord32AtomicPairOr(OpIndex node) {
1422 VisitPairAtomicBinop(this, node, kRiscvWord32AtomicPairOr);
1423}
1424
1425void InstructionSelectorT::VisitWord32AtomicPairXor(OpIndex node) {
1426 VisitPairAtomicBinop(this, node, kRiscvWord32AtomicPairXor);
1427}
1428
1429void InstructionSelectorT::VisitWord32AtomicPairExchange(OpIndex node) {
1430 VisitPairAtomicBinop(this, node, kRiscvWord32AtomicPairExchange);
1431}
1432
1433void InstructionSelectorT::VisitWord32AtomicPairCompareExchange(OpIndex node) {
1434 RiscvOperandGeneratorT g(this);
1435 // In the Turbofan and the Turboshaft graph the order of expected and value is
1436 // swapped.
1437 const size_t expected_offset = 4;
1438 const size_t value_offset = 2;
1439 InstructionOperand inputs[] = {
1440 g.UseRegister(this->input_at(node, 0)),
1441 g.UseRegister(this->input_at(node, 1)),
1442 g.UseFixed(this->input_at(node, expected_offset), a1),
1443 g.UseFixed(this->input_at(node, expected_offset + 1), a2),
1444 g.UseFixed(this->input_at(node, value_offset), a3),
1445 g.UseFixed(this->input_at(node, value_offset + 1), a4)};
1446
1447 InstructionCode code = kRiscvWord32AtomicPairCompareExchange |
1448 AddressingModeField::encode(kMode_MRI);
1449 OptionalOpIndex projection0 = this->FindProjection(node, 0);
1450 OptionalOpIndex projection1 = this->FindProjection(node, 1);
1451 InstructionOperand outputs[2];
1452 size_t output_count = 0;
1453 InstructionOperand temps[3];
1454 size_t temp_count = 0;
1455 temps[temp_count++] = g.TempRegister(t0);
1456 if (projection0.valid()) {
1457 outputs[output_count++] = g.DefineAsFixed(projection0.value(), a0);
1458 } else {
1459 temps[temp_count++] = g.TempRegister(a0);
1460 }
1461 if (projection1.valid()) {
1462 outputs[output_count++] = g.DefineAsFixed(projection1.value(), a1);
1463 } else {
1464 temps[temp_count++] = g.TempRegister(a1);
1465 }
1466 Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count,
1467 temps);
1468}
1469
1470void InstructionSelectorT::VisitF64x2Min(OpIndex node) {
1471 RiscvOperandGeneratorT g(this);
1472 InstructionOperand temp1 = g.TempFpRegister(v0);
1473 InstructionOperand mask_reg = g.TempFpRegister(v0);
1474 InstructionOperand temp2 = g.TempFpRegister(kSimd128ScratchReg);
1475 const int32_t kNaN = 0x7ff80000L, kNaNShift = 32;
1476 this->Emit(kRiscvVmfeqVv, temp1, g.UseRegister(this->input_at(node, 0)),
1477 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
1478 g.UseImmediate(m1));
1479 this->Emit(kRiscvVmfeqVv, temp2, g.UseRegister(this->input_at(node, 1)),
1480 g.UseRegister(this->input_at(node, 1)), g.UseImmediate(E64),
1481 g.UseImmediate(m1));
1482 this->Emit(kRiscvVandVv, mask_reg, temp2, temp1, g.UseImmediate(E64),
1483 g.UseImmediate(m1));
1484
1485 InstructionOperand temp3 = g.TempFpRegister(kSimd128ScratchReg);
1486 InstructionOperand temp4 = g.TempFpRegister(kSimd128ScratchReg);
1487 InstructionOperand temp5 = g.TempFpRegister(kSimd128ScratchReg);
1488 this->Emit(kRiscvVmv, temp3, g.UseImmediate(kNaN), g.UseImmediate(E64),
1489 g.UseImmediate(m1));
1490 this->Emit(kRiscvVsll, temp4, temp3, g.UseImmediate(kNaNShift),
1491 g.UseImmediate(E64), g.UseImmediate(m1));
1492 this->Emit(kRiscvVfminVv, temp5, g.UseRegister(this->input_at(node, 1)),
1493 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
1494 g.UseImmediate(m1), g.UseImmediate(Mask));
1495 this->Emit(kRiscvVmv, g.DefineAsRegister(node), temp5, g.UseImmediate(E64),
1496 g.UseImmediate(m1));
1497}
1498
1499void InstructionSelectorT::VisitF64x2Max(OpIndex node) {
1500 RiscvOperandGeneratorT g(this);
1501 InstructionOperand temp1 = g.TempFpRegister(v0);
1502 InstructionOperand mask_reg = g.TempFpRegister(v0);
1503 InstructionOperand temp2 = g.TempFpRegister(kSimd128ScratchReg);
1504 const int32_t kNaN = 0x7ff80000L, kNaNShift = 32;
1505 this->Emit(kRiscvVmfeqVv, temp1, g.UseRegister(this->input_at(node, 0)),
1506 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
1507 g.UseImmediate(m1));
1508 this->Emit(kRiscvVmfeqVv, temp2, g.UseRegister(this->input_at(node, 1)),
1509 g.UseRegister(this->input_at(node, 1)), g.UseImmediate(E64),
1510 g.UseImmediate(m1));
1511 this->Emit(kRiscvVandVv, mask_reg, temp2, temp1, g.UseImmediate(E64),
1512 g.UseImmediate(m1));
1513
1514 InstructionOperand temp3 = g.TempFpRegister(kSimd128ScratchReg);
1515 InstructionOperand temp4 = g.TempFpRegister(kSimd128ScratchReg);
1516 InstructionOperand temp5 = g.TempFpRegister(kSimd128ScratchReg);
1517 this->Emit(kRiscvVmv, temp3, g.UseImmediate(kNaN), g.UseImmediate(E64),
1518 g.UseImmediate(m1));
1519 this->Emit(kRiscvVsll, temp4, temp3, g.UseImmediate(kNaNShift),
1520 g.UseImmediate(E64), g.UseImmediate(m1));
1521 this->Emit(kRiscvVfmaxVv, temp5, g.UseRegister(this->input_at(node, 1)),
1522 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
1523 g.UseImmediate(m1), g.UseImmediate(Mask));
1524 this->Emit(kRiscvVmv, g.DefineAsRegister(node), temp5, g.UseImmediate(E64),
1525 g.UseImmediate(m1));
1526}
1527// static
1528MachineOperatorBuilder::Flags
1529InstructionSelector::SupportedMachineOperatorFlags() {
1530 MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
1531 flags |= MachineOperatorBuilder::kInt32DivIsSafe |
1532 MachineOperatorBuilder::kUint32DivIsSafe |
1533 MachineOperatorBuilder::kFloat32RoundDown |
1534 MachineOperatorBuilder::kFloat32RoundUp |
1535 MachineOperatorBuilder::kFloat32RoundTruncate |
1536 MachineOperatorBuilder::kFloat32RoundTiesEven;
1537 if (CpuFeatures::IsSupported(ZBB)) {
1538 flags |= MachineOperatorBuilder::kWord32Ctz |
1539 MachineOperatorBuilder::kWord32Popcnt;
1540 }
1541 return flags;
1542}
1543
1544#undef TRACE
1545} // namespace compiler
1546} // namespace internal
1547} // namespace v8
Builtins::Kind kind
Definition builtins.cc:40
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr U encode(T value)
Definition bit-field.h:55
static bool IsSupported(CpuFeature f)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
FlagsCondition GetComparisonFlagCondition(const turboshaft::ComparisonOp &op) const
void EmitPrepareArguments(ZoneVector< PushParameter > *arguments, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
bool CanAddressRelativeToRootsRegister(const ExternalReference &reference) const
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
InstructionOperand UseFixed(turboshaft::OpIndex node, Register reg)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand DefineSameAsFirst(turboshaft::OpIndex node)
InstructionOperand UseUniqueRegister(turboshaft::OpIndex node)
InstructionOperand UseRegister(turboshaft::OpIndex node)
InstructionOperand DefineAsFixed(turboshaft::OpIndex node, Register reg)
bool CanBeImmediate(OpIndex node, InstructionCode mode)
MachineRepresentation representation() const
V8_INLINE const Operation & Get(OpIndex i) const
Definition graph.h:618
const Operation & Get(V< AnyOrNone > op_idx) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Float32()
Handle< Code > code
other heap size flags(e.g. initial_heap_size) take precedence") DEFINE_SIZE_T( max_shared_heap_size
#define VISIT_ATOMIC_BINOP(op)
Node * node
int n
Definition mul-fft.cc:296
int int32_t
Definition unicode.cc:40
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToInt32OverflowToMin
Definition opmasks.h:281
ChangeOpMask::For< ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToUint32OverflowToMin
Definition opmasks.h:286
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
void VisitAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode, AtomicWidth width)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
void VisitWord32PairShift(InstructionSelectorT *selector, InstructionCode opcode, OpIndex node)
static void VisitRRO(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void EmitS128Load(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, VSew sew, Vlmul lmul)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
void EmitLoad(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, ImmediateMode immediate_mode, MachineRepresentation rep, OptionalOpIndex output={})
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
static void VisitInt32PairBinop(InstructionSelectorT *selector, InstructionCode pair_opcode, InstructionCode single_opcode, OpIndex node)
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
MachineRepresentation UnalignedStoreRepresentation
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
UnalignedStoreRepresentation const & UnalignedStoreRepresentationOf(Operator const *op)
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
void VisitPairAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int kBitsPerByte
Definition globals.h:682
switch(set_by_)
Definition flags.cc:3669
constexpr int N
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
void EmitWordCompareZero(InstructionSelectorT *selector, OpIndex value, FlagsContinuationT *cont)
constexpr bool CanBeTaggedPointer(MachineRepresentation rep)
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
const int kCArgSlotCount
Operation
Definition operation.h:43
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define arraysize(array)
Definition macros.h:67
bool is_load_root_register(turboshaft::OpIndex node) const
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
underlying_operation_t< Op > & Cast()
Definition operations.h:980
#define V8_LIKELY(condition)
Definition v8config.h:661