v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-selector-riscv64.cc
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/bits.h"
6#include "src/base/logging.h"
15
16namespace v8 {
17namespace internal {
18namespace compiler {
19
21 InstructionCode opcode) {
23 case kRiscvShl32:
24 case kRiscvSar32:
25 case kRiscvShr32:
26 return is_uint5(value);
27 case kRiscvShl64:
28 case kRiscvSar64:
29 case kRiscvShr64:
30 return is_uint6(value);
31 case kRiscvAdd32:
32 case kRiscvAnd32:
33 case kRiscvAnd:
34 case kRiscvAdd64:
35 case kRiscvOr32:
36 case kRiscvOr:
37 case kRiscvTst64:
38 case kRiscvTst32:
39 case kRiscvXor:
40 return is_int12(value);
41 case kRiscvLb:
42 case kRiscvLbu:
43 case kRiscvSb:
44 case kRiscvLh:
45 case kRiscvLhu:
46 case kRiscvSh:
47 case kRiscvLw:
48 case kRiscvSw:
49 case kRiscvLd:
50 case kRiscvSd:
51 case kRiscvLoadFloat:
52 case kRiscvStoreFloat:
53 case kRiscvLoadDouble:
54 case kRiscvStoreDouble:
55 return is_int32(value);
56 default:
57 return is_int12(value);
58 }
59}
60
61struct ExtendingLoadMatcher {
63 : matches_(false), selector_(selector), immediate_(0) {
64 Initialize(node);
65 }
66
67 bool Matches() const { return matches_; }
68
69 OpIndex base() const {
70 DCHECK(Matches());
71 return base_;
72 }
73 int64_t immediate() const {
74 DCHECK(Matches());
75 return immediate_;
76 }
78 DCHECK(Matches());
79 return opcode_;
80 }
81
82 private:
83 bool matches_;
84 InstructionSelectorT* selector_;
85 OpIndex base_{};
86 int64_t immediate_;
87 ArchOpcode opcode_;
88
90 const ShiftOp& shift = selector_->Get(node).template Cast<ShiftOp>();
91 DCHECK(shift.kind == ShiftOp::Kind::kShiftRightArithmetic ||
92 shift.kind == ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros);
93 // When loading a 64-bit value and shifting by 32, we should
94 // just load and sign-extend the interesting 4 bytes instead.
95 // This happens, for example, when we're loading and untagging SMIs.
96 const Operation& lhs = selector_->Get(shift.left());
97 int64_t constant_rhs;
98
99 if (lhs.Is<LoadOp>() &&
100 selector_->MatchIntegralWord64Constant(shift.right(), &constant_rhs) &&
101 constant_rhs == 32 && selector_->CanCover(node, shift.left())) {
103 const LoadOp& load = lhs.Cast<LoadOp>();
104 base_ = load.base();
105 opcode_ = kRiscvLw;
106 if (load.index().has_value()) {
107 int64_t index_constant;
108 if (selector_->MatchIntegralWord64Constant(load.index().value(),
109 &index_constant)) {
110 DCHECK_EQ(load.element_size_log2, 0);
111 immediate_ = index_constant + 4;
112 matches_ = g.CanBeImmediate(immediate_, kRiscvLw);
113 }
114 } else {
115 immediate_ = load.offset + 4;
116 matches_ = g.CanBeImmediate(immediate_, kRiscvLw);
117 }
118 }
119 }
120};
121
122bool TryEmitExtendingLoad(InstructionSelectorT* selector, OpIndex node,
123 OpIndex output_node) {
124 ExtendingLoadMatcher m(node, selector);
125 RiscvOperandGeneratorT g(selector);
126 if (m.Matches()) {
127 InstructionOperand inputs[2];
128 inputs[0] = g.UseRegister(m.base());
129 InstructionCode opcode =
130 m.opcode() | AddressingModeField::encode(kMode_MRI);
131 DCHECK(is_int32(m.immediate()));
132 inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
133 InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
134 selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
135 inputs);
136 return true;
137 }
138 return false;
139}
140
141void EmitLoad(InstructionSelectorT* selector, OpIndex node,
142 InstructionCode opcode, OpIndex output = OpIndex()) {
143 RiscvOperandGeneratorT g(selector);
144
145 const Operation& op = selector->Get(node);
146 const LoadOp& load = op.Cast<LoadOp>();
147
148 // The LoadStoreSimplificationReducer transforms all loads into
149 // *(base + index).
150 OpIndex base = load.base();
151 OpIndex index = load.index().value();
152 DCHECK_EQ(load.offset, 0);
153 DCHECK_EQ(load.element_size_log2, 0);
154
155 InstructionOperand inputs[3];
156 size_t input_count = 0;
157 InstructionOperand output_op;
158
159 // If output is valid, use that as the output register. This is used when we
160 // merge a conversion into the load.
161 output_op = g.DefineAsRegister(output.valid() ? output : node);
162 int64_t index_value;
163 const Operation& base_op = selector->Get(base);
164 if (base_op.Is<Opmask::kExternalConstant>() &&
165 selector->MatchSignedIntegralConstant(index, &index_value)) {
166 const ConstantOp& constant_base = base_op.Cast<ConstantOp>();
167 if (selector->CanAddressRelativeToRootsRegister(
168 constant_base.external_reference())) {
169 ptrdiff_t const delta =
170 index_value +
172 selector->isolate(), constant_base.external_reference());
173 input_count = 1;
174 // Check that the delta is a 32-bit integer due to the limitations of
175 // immediate operands.
176 if (is_int32(delta)) {
177 inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
178 opcode |= AddressingModeField::encode(kMode_Root);
179 selector->Emit(opcode, 1, &output_op, input_count, inputs);
180 return;
181 }
182 }
183 }
184
185 if (base_op.Is<LoadRootRegisterOp>()) {
186 int64_t index_value;
187 selector->MatchSignedIntegralConstant(index, &index_value);
188 input_count = 1;
189 inputs[0] = g.UseImmediate64(index_value);
190 opcode |= AddressingModeField::encode(kMode_Root);
191 selector->Emit(opcode, 1, &output_op, input_count, inputs);
192 return;
193 }
194
195 if (g.CanBeImmediate(index, opcode)) {
196 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
197 g.DefineAsRegister(output.valid() ? output : node),
198 g.UseRegister(base), g.UseImmediate(index));
199 } else {
200 InstructionOperand addr_reg = g.TempRegister();
201 selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
202 addr_reg, g.UseRegister(index), g.UseRegister(base));
203 // Emit desired load opcode, using temp addr_reg.
204 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
205 g.DefineAsRegister(output.valid() ? output : node), addr_reg,
206 g.TempImmediate(0));
207 }
208}
209
210void EmitS128Load(InstructionSelectorT* selector, OpIndex node,
211 InstructionCode opcode, VSew sew, Vlmul lmul) {
212 RiscvOperandGeneratorT g(selector);
213 OpIndex base = selector->input_at(node, 0);
214 OpIndex index = selector->input_at(node, 1);
215 if (g.CanBeImmediate(index, opcode)) {
216 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
217 g.DefineAsRegister(node), g.UseRegister(base),
218 g.UseImmediate(index), g.UseImmediate(sew),
219 g.UseImmediate(lmul));
220 } else {
221 InstructionOperand addr_reg = g.TempRegister();
222 selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
223 addr_reg, g.UseRegister(index), g.UseRegister(base));
224 // Emit desired load opcode, using temp addr_reg.
225 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
226 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0),
227 g.UseImmediate(sew), g.UseImmediate(lmul));
228 }
229}
230
231void InstructionSelectorT::VisitStoreLane(OpIndex node) {
232 const Simd128LaneMemoryOp& store = Get(node).Cast<Simd128LaneMemoryOp>();
233 InstructionCode opcode = kRiscvS128StoreLane;
234 opcode |= LaneSizeField::encode(store.lane_size() * kBitsPerByte);
235 if (store.kind.with_trap_handler) {
237 }
238
239 RiscvOperandGeneratorT g(this);
240 OpIndex base = this->input_at(node, 0);
241 OpIndex index = this->input_at(node, 1);
242 InstructionOperand addr_reg = g.TempRegister();
243 Emit(kRiscvAdd64, addr_reg, g.UseRegister(base), g.UseRegister(index));
244 InstructionOperand inputs[4] = {
245 g.UseRegister(input_at(node, 2)),
246 g.UseImmediate(store.lane),
247 addr_reg,
248 g.TempImmediate(0),
249 };
251 Emit(opcode, 0, nullptr, 4, inputs);
252}
253
254void InstructionSelectorT::VisitLoadLane(OpIndex node) {
255 const Simd128LaneMemoryOp& load = this->Get(node).Cast<Simd128LaneMemoryOp>();
256 InstructionCode opcode = kRiscvS128LoadLane;
257 opcode |= LaneSizeField::encode(load.lane_size() * kBitsPerByte);
258 if (load.kind.with_trap_handler) {
260 }
261
262 RiscvOperandGeneratorT g(this);
263 OpIndex base = this->input_at(node, 0);
264 OpIndex index = this->input_at(node, 1);
265 InstructionOperand addr_reg = g.TempRegister();
266 Emit(kRiscvAdd64, addr_reg, g.UseRegister(base), g.UseRegister(index));
268 Emit(opcode, g.DefineSameAsFirst(node),
269 g.UseRegister(this->input_at(node, 2)), g.UseImmediate(load.lane),
270 addr_reg, g.TempImmediate(0));
271}
272
273namespace {
274ArchOpcode GetLoadOpcode(MemoryRepresentation loaded_rep,
275 RegisterRepresentation result_rep) {
276 // NOTE: The meaning of `loaded_rep` = `MemoryRepresentation::AnyTagged()` is
277 // we are loading a compressed tagged field, while `result_rep` =
278 // `RegisterRepresentation::Tagged()` refers to an uncompressed tagged value.
279
280 switch (loaded_rep) {
282 DCHECK_EQ(result_rep, RegisterRepresentation::Word32());
283 return kRiscvLb;
285 DCHECK_EQ(result_rep, RegisterRepresentation::Word32());
286 return kRiscvLbu;
288 DCHECK_EQ(result_rep, RegisterRepresentation::Word32());
289 return kRiscvLh;
291 DCHECK_EQ(result_rep, RegisterRepresentation::Word32());
292 return kRiscvLhu;
294 DCHECK_EQ(result_rep, RegisterRepresentation::Word32());
295 return kRiscvLw;
297 DCHECK_EQ(result_rep, RegisterRepresentation::Word32());
298 return kRiscvLwu;
300 case MemoryRepresentation::Uint64():
301 DCHECK_EQ(result_rep, RegisterRepresentation::Word64());
302 return kRiscvLd;
306 DCHECK_EQ(result_rep, RegisterRepresentation::Float32());
307 return kRiscvLoadFloat;
309 DCHECK_EQ(result_rep, RegisterRepresentation::Float64());
310 return kRiscvLoadDouble;
311#ifdef V8_COMPRESS_POINTERS
313 case MemoryRepresentation::TaggedPointer():
314 if (result_rep == RegisterRepresentation::Compressed()) {
315 return kRiscvLwu;
316 }
318 return kRiscvLoadDecompressTagged;
320 if (result_rep == RegisterRepresentation::Compressed()) {
321 return kRiscvLwu;
322 }
324 return kRiscvLoadDecompressTaggedSigned;
325#else
327 case MemoryRepresentation::TaggedPointer():
328 case MemoryRepresentation::TaggedSigned():
329 DCHECK_EQ(result_rep, RegisterRepresentation::Tagged());
330 return kRiscvLd;
331#endif
333 case MemoryRepresentation::UncompressedTaggedPointer():
334 case MemoryRepresentation::UncompressedTaggedSigned():
335 DCHECK_EQ(result_rep, RegisterRepresentation::Tagged());
336 return kRiscvLd;
339 return kRiscvLoadDecompressProtected;
341 UNREACHABLE();
343 return kRiscvLoadDecodeSandboxedPointer;
345 return kRiscvRvvLd;
347 UNREACHABLE();
348 }
349}
350
351ArchOpcode GetStoreOpcode(MemoryRepresentation stored_rep) {
352 switch (stored_rep) {
354 case MemoryRepresentation::Uint8():
355 return kRiscvSb;
357 case MemoryRepresentation::Uint16():
358 return kRiscvSh;
360 case MemoryRepresentation::Uint32():
361 return kRiscvSw;
363 case MemoryRepresentation::Uint64():
364 return kRiscvSd;
368 return kRiscvStoreFloat;
370 return kRiscvStoreDouble;
372 case MemoryRepresentation::TaggedPointer():
373 case MemoryRepresentation::TaggedSigned():
374 return kRiscvStoreCompressTagged;
376 case MemoryRepresentation::UncompressedTaggedPointer():
377 case MemoryRepresentation::UncompressedTaggedSigned():
378 return kRiscvSd;
380 // We never store directly to protected pointers from generated code.
381 UNREACHABLE();
383 return kRiscvStoreIndirectPointer;
385 return kRiscvStoreEncodeSandboxedPointer;
387 return kRiscvRvvSt;
389 UNREACHABLE();
390 }
391}
392} // namespace
393
395 auto load = this->load_view(node);
396 InstructionCode opcode = kArchNop;
397 opcode = GetLoadOpcode(load.ts_loaded_rep(), load.ts_result_rep());
398 bool traps_on_null;
399 if (load.is_protected(&traps_on_null)) {
400 if (traps_on_null) {
402 } else {
404 }
405 }
406 EmitLoad(this, node, opcode);
407}
408
409void InstructionSelectorT::VisitStorePair(OpIndex node) { UNREACHABLE(); }
410
411void InstructionSelectorT::VisitProtectedLoad(OpIndex node) { VisitLoad(node); }
412
413void InstructionSelectorT::VisitStore(OpIndex node) {
414 RiscvOperandGeneratorT g(this);
415 StoreView store_view = this->store_view(node);
418 OpIndex index = this->value(store_view.index());
419 OpIndex value = store_view.value();
420
421 WriteBarrierKind write_barrier_kind =
424
425 // TODO(riscv): I guess this could be done in a better way.
426 if (write_barrier_kind != kNoWriteBarrier &&
427 V8_LIKELY(!v8_flags.disable_write_barriers)) {
429 InstructionOperand inputs[4];
430 size_t input_count = 0;
431 inputs[input_count++] = g.UseUniqueRegister(base);
432 inputs[input_count++] = g.UseUniqueRegister(index);
433 inputs[input_count++] = g.UseUniqueRegister(value);
434 RecordWriteMode record_write_mode =
435 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
436 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
437 size_t const temp_count = arraysize(temps);
440 DCHECK_EQ(write_barrier_kind, kIndirectPointerWriteBarrier);
441 // In this case we need to add the IndirectPointerTag as additional input.
442 code = kArchStoreIndirectWithWriteBarrier;
444 inputs[input_count++] = g.UseImmediate64(static_cast<int64_t>(tag));
445 } else {
446 code = kArchStoreWithWriteBarrier;
447 }
448 code |= RecordWriteModeField::encode(record_write_mode);
451 }
452 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
453 return;
454 }
455
457 code = GetStoreOpcode(store_view.ts_stored_rep());
458
459 if (this->is_load_root_register(base)) {
460 Emit(code | AddressingModeField::encode(kMode_Root), g.NoOutput(),
461 g.UseRegisterOrImmediateZero(value), g.UseImmediate(index));
462 return;
463 }
464
467 } else if (store_view.access_kind() ==
470 }
471
472 if (g.CanBeImmediate(index, code)) {
473 Emit(code | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
474 g.UseRegisterOrImmediateZero(value), g.UseRegister(base),
475 g.UseImmediate(index));
476 } else {
477 InstructionOperand addr_reg = g.TempRegister();
478 Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
479 g.UseRegister(index), g.UseRegister(base));
480 // Emit desired store opcode, using temp addr_reg.
481 Emit(code | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
482 g.UseRegisterOrImmediateZero(value), addr_reg, g.TempImmediate(0));
483 }
484}
485
486void InstructionSelectorT::VisitProtectedStore(OpIndex node) {
487 VisitStore(node);
488}
489
490void InstructionSelectorT::VisitWord32And(OpIndex node) {
491 VisitBinop<Int32BinopMatcher>(this, node, kRiscvAnd32, true, kRiscvAnd32);
492}
493
494void InstructionSelectorT::VisitWord64And(OpIndex node) {
495 VisitBinop<Int64BinopMatcher>(this, node, kRiscvAnd, true, kRiscvAnd);
496}
497
498void InstructionSelectorT::VisitWord32Or(OpIndex node) {
499 VisitBinop<Int32BinopMatcher>(this, node, kRiscvOr32, true, kRiscvOr32);
500}
501
502void InstructionSelectorT::VisitWord64Or(OpIndex node) {
503 VisitBinop<Int64BinopMatcher>(this, node, kRiscvOr, true, kRiscvOr);
504}
505
506void InstructionSelectorT::VisitWord32Xor(OpIndex node) {
507 VisitBinop<Int32BinopMatcher>(this, node, kRiscvXor32, true, kRiscvXor32);
508}
509
510void InstructionSelectorT::VisitWord64Xor(OpIndex node) {
511 VisitBinop<Int64BinopMatcher>(this, node, kRiscvXor, true, kRiscvXor);
512}
513
514void InstructionSelectorT::VisitWord64Shl(OpIndex node) {
515 const ShiftOp& shift_op = this->Get(node).template Cast<ShiftOp>();
516 const Operation& lhs = this->Get(shift_op.left());
517 const Operation& rhs = this->Get(shift_op.right());
518 if ((lhs.Is<Opmask::kChangeInt32ToInt64>() ||
520 rhs.Is<Opmask::kWord32Constant>()) {
521 int64_t shift_by = rhs.Cast<ConstantOp>().signed_integral();
522 if (base::IsInRange(shift_by, 32, 63) && CanCover(node, shift_op.left())) {
523 RiscvOperandGeneratorT g(this);
524 // There's no need to sign/zero-extend to 64-bit if we shift out the
525 // upper 32 bits anyway.
526 Emit(kRiscvShl64, g.DefineSameAsFirst(node),
527 g.UseRegister(lhs.Cast<ChangeOp>().input()),
528 g.UseImmediate64(shift_by));
529 return;
530 }
531 }
532 VisitRRO(this, kRiscvShl64, node);
533}
534
535void InstructionSelectorT::VisitWord64Shr(OpIndex node) {
536 VisitRRO(this, kRiscvShr64, node);
537}
538
539void InstructionSelectorT::VisitWord64Sar(OpIndex node) {
540 if (TryEmitExtendingLoad(this, node, node)) return;
541 // Select Sbfx(x, imm, 32-imm) for Word64Sar(ChangeInt32ToInt64(x), imm)
542 // where possible
543 const ShiftOp& shiftop = Get(node).Cast<ShiftOp>();
544 const Operation& lhs = Get(shiftop.left());
545
546 int64_t constant_rhs;
547 if (lhs.Is<Opmask::kChangeInt32ToInt64>() &&
548 MatchIntegralWord64Constant(shiftop.right(), &constant_rhs) &&
549 is_uint5(constant_rhs) && CanCover(node, shiftop.left())) {
550 // Don't select Sbfx here if Asr(Ldrsw(x), imm) can be selected for
551 // Word64Sar(ChangeInt32ToInt64(Load(x)), imm)
552 OpIndex input = lhs.Cast<ChangeOp>().input();
553 if (!Get(input).Is<LoadOp>() || !CanCover(shiftop.left(), input)) {
554 RiscvOperandGeneratorT g(this);
555 int right = static_cast<int>(constant_rhs);
556 Emit(kRiscvSar32, g.DefineAsRegister(node), g.UseRegister(input),
557 g.UseImmediate(right));
558 return;
559 }
560 }
561 VisitRRO(this, kRiscvSar64, node);
562}
563
564void InstructionSelectorT::VisitWord32Rol(OpIndex node) { UNIMPLEMENTED(); }
565
566void InstructionSelectorT::VisitWord64Rol(OpIndex node) { UNREACHABLE(); }
567
568void InstructionSelectorT::VisitWord32Ror(OpIndex node) {
569 VisitRRO(this, kRiscvRor32, node);
570}
571
572void InstructionSelectorT::VisitWord32ReverseBits(OpIndex node) {
573 UNREACHABLE();
574}
575
576void InstructionSelectorT::VisitWord64ReverseBits(OpIndex node) {
577 UNREACHABLE();
578}
579
580void InstructionSelectorT::VisitWord64ReverseBytes(OpIndex node) {
581 RiscvOperandGeneratorT g(this);
582 if (CpuFeatures::IsSupported(ZBB)) {
583 Emit(kRiscvRev8, g.DefineAsRegister(node),
584 g.UseRegister(this->input_at(node, 0)));
585 } else {
586 Emit(kRiscvByteSwap64, g.DefineAsRegister(node),
587 g.UseRegister(this->input_at(node, 0)));
588 }
589}
590
591void InstructionSelectorT::VisitWord32ReverseBytes(OpIndex node) {
592 RiscvOperandGeneratorT g(this);
593 if (CpuFeatures::IsSupported(ZBB)) {
594 InstructionOperand temp = g.TempRegister();
595 Emit(kRiscvRev8, temp, g.UseRegister(this->input_at(node, 0)));
596 Emit(kRiscvShr64, g.DefineAsRegister(node), temp, g.TempImmediate(32));
597 } else {
598 Emit(kRiscvByteSwap32, g.DefineAsRegister(node),
599 g.UseRegister(this->input_at(node, 0)));
600 }
601}
602
603void InstructionSelectorT::VisitSimd128ReverseBytes(OpIndex node) {
604 UNREACHABLE();
605}
606
607void InstructionSelectorT::VisitWord32Ctz(OpIndex node) {
608 RiscvOperandGeneratorT g(this);
609 Emit(kRiscvCtzw, g.DefineAsRegister(node),
610 g.UseRegister(this->input_at(node, 0)));
611}
612
613void InstructionSelectorT::VisitWord64Ctz(OpIndex node) {
614 RiscvOperandGeneratorT g(this);
615 Emit(kRiscvCtz, g.DefineAsRegister(node),
616 g.UseRegister(this->input_at(node, 0)));
617}
618
619void InstructionSelectorT::VisitWord32Popcnt(OpIndex node) {
620 RiscvOperandGeneratorT g(this);
621 Emit(kRiscvCpopw, g.DefineAsRegister(node),
622 g.UseRegister(this->input_at(node, 0)));
623}
624
625void InstructionSelectorT::VisitWord64Popcnt(OpIndex node) {
626 RiscvOperandGeneratorT g(this);
627 Emit(kRiscvCpop, g.DefineAsRegister(node),
628 g.UseRegister(this->input_at(node, 0)));
629}
630
631void InstructionSelectorT::VisitWord64Ror(OpIndex node) {
632 VisitRRO(this, kRiscvRor64, node);
633}
634
635void InstructionSelectorT::VisitWord64Clz(OpIndex node) {
636 VisitRR(this, kRiscvClz64, node);
637}
638
639void InstructionSelectorT::VisitInt32Add(OpIndex node) {
640 VisitBinop<Int32BinopMatcher>(this, node, kRiscvAdd32, true, kRiscvAdd32);
641}
642
643void InstructionSelectorT::VisitInt64Add(OpIndex node) {
644 VisitBinop<Int64BinopMatcher>(this, node, kRiscvAdd64, true, kRiscvAdd64);
645}
646
647void InstructionSelectorT::VisitInt32Sub(OpIndex node) {
648 VisitBinop<Int32BinopMatcher>(this, node, kRiscvSub32);
649}
650
651void InstructionSelectorT::VisitInt64Sub(OpIndex node) {
652 VisitBinop<Int64BinopMatcher>(this, node, kRiscvSub64);
653}
654
655void InstructionSelectorT::VisitInt32Mul(OpIndex node) {
656 OpIndex left = this->input_at(node, 0);
657 OpIndex right = this->input_at(node, 1);
658 if (CanCover(node, left) && CanCover(node, right)) {
659 const Operation& left_op = this->Get(left);
660 const Operation& right_op = this->Get(right);
661 if (left_op.Is<Opmask::kWord64ShiftRightLogical>() &&
662 right_op.Is<Opmask::kWord64ShiftRightLogical>()) {
663 RiscvOperandGeneratorT g(this);
664 int64_t constant_left;
665 MatchSignedIntegralConstant(this->input_at(left, 1), &constant_left);
666 int64_t constant_right;
667 MatchSignedIntegralConstant(this->input_at(right, 1), &constant_right);
668 if (constant_right == 32 && constant_right == 32) {
669 // Combine untagging shifts with Dmul high.
670 Emit(kRiscvMulHigh64, g.DefineSameAsFirst(node),
671 g.UseRegister(this->input_at(left, 0)),
672 g.UseRegister(this->input_at(right, 0)));
673 return;
674 }
675 }
676 }
677 VisitRRR(this, kRiscvMul32, node);
678}
679
680void InstructionSelectorT::VisitInt32MulHigh(OpIndex node) {
681 VisitRRR(this, kRiscvMulHigh32, node);
682}
683
684void InstructionSelectorT::VisitInt64MulHigh(OpIndex node) {
685 return VisitRRR(this, kRiscvMulHigh64, node);
686}
687
688void InstructionSelectorT::VisitUint32MulHigh(OpIndex node) {
689 VisitRRR(this, kRiscvMulHighU32, node);
690}
691
692void InstructionSelectorT::VisitUint64MulHigh(OpIndex node) {
693 VisitRRR(this, kRiscvMulHighU64, node);
694}
695
696void InstructionSelectorT::VisitInt64Mul(OpIndex node) {
697 VisitRRR(this, kRiscvMul64, node);
698}
699
700void InstructionSelectorT::VisitInt32Div(OpIndex node) {
701 VisitRRR(this, kRiscvDiv32, node,
703}
704
705void InstructionSelectorT::VisitUint32Div(OpIndex node) {
706 VisitRRR(this, kRiscvDivU32, node,
708}
709
710void InstructionSelectorT::VisitInt32Mod(OpIndex node) {
711 VisitRRR(this, kRiscvMod32, node);
712}
713
714void InstructionSelectorT::VisitUint32Mod(OpIndex node) {
715 VisitRRR(this, kRiscvModU32, node);
716}
717
718void InstructionSelectorT::VisitInt64Div(OpIndex node) {
719 VisitRRR(this, kRiscvDiv64, node,
721}
722
723void InstructionSelectorT::VisitUint64Div(OpIndex node) {
724 VisitRRR(this, kRiscvDivU64, node,
726}
727
728void InstructionSelectorT::VisitInt64Mod(OpIndex node) {
729 VisitRRR(this, kRiscvMod64, node);
730}
731
732void InstructionSelectorT::VisitUint64Mod(OpIndex node) {
733 VisitRRR(this, kRiscvModU64, node);
734}
735
736void InstructionSelectorT::VisitChangeFloat32ToFloat64(OpIndex node) {
737 VisitRR(this, kRiscvCvtDS, node);
738}
739
740void InstructionSelectorT::VisitRoundInt32ToFloat32(OpIndex node) {
741 VisitRR(this, kRiscvCvtSW, node);
742}
743
744void InstructionSelectorT::VisitRoundUint32ToFloat32(OpIndex node) {
745 VisitRR(this, kRiscvCvtSUw, node);
746}
747
748void InstructionSelectorT::VisitChangeInt32ToFloat64(OpIndex node) {
749 VisitRR(this, kRiscvCvtDW, node);
750}
751
752void InstructionSelectorT::VisitChangeInt64ToFloat64(OpIndex node) {
753 VisitRR(this, kRiscvCvtDL, node);
754}
755
756void InstructionSelectorT::VisitChangeUint32ToFloat64(OpIndex node) {
757 VisitRR(this, kRiscvCvtDUw, node);
758}
759
760void InstructionSelectorT::VisitTruncateFloat32ToInt32(OpIndex node) {
761 RiscvOperandGeneratorT g(this);
762
763 const Operation& op = this->Get(node);
764 InstructionCode opcode = kRiscvTruncWS;
765 opcode |=
767 Emit(opcode, g.DefineAsRegister(node),
768 g.UseRegister(this->input_at(node, 0)));
769}
770
771void InstructionSelectorT::VisitTruncateFloat32ToUint32(OpIndex node) {
772 RiscvOperandGeneratorT g(this);
773
774 const Operation& op = this->Get(node);
775 InstructionCode opcode = kRiscvTruncUwS;
777 opcode |= MiscField::encode(true);
778 }
779
780 Emit(opcode, g.DefineAsRegister(node),
781 g.UseRegister(this->input_at(node, 0)));
782}
783
784void InstructionSelectorT::VisitChangeFloat64ToInt32(OpIndex node) {
785 RiscvOperandGeneratorT g(this);
786 auto value = this->input_at(node, 0);
787 if (CanCover(node, value)) {
788 const Operation& op = this->Get(value);
789 if (const FloatUnaryOp* load = op.TryCast<FloatUnaryOp>()) {
790 DCHECK(load->rep == FloatRepresentation::Float64());
791 switch (load->kind) {
792 case FloatUnaryOp::Kind::kRoundDown:
793 Emit(kRiscvFloorWD, g.DefineAsRegister(node),
794 g.UseRegister(this->input_at(value, 0)));
795 return;
796 case FloatUnaryOp::Kind::kRoundUp:
797 Emit(kRiscvCeilWD, g.DefineAsRegister(node),
798 g.UseRegister(this->input_at(value, 0)));
799 return;
800 case FloatUnaryOp::Kind::kRoundToZero:
801 Emit(kRiscvTruncWD, g.DefineAsRegister(node),
802 g.UseRegister(this->input_at(value, 0)));
803 return;
804 case FloatUnaryOp::Kind::kRoundTiesEven:
805 Emit(kRiscvRoundWD, g.DefineAsRegister(node),
806 g.UseRegister(this->input_at(value, 0)));
807 return;
808 default:
809 break;
810 }
811 }
812 if (op.Is<ChangeOp>()) {
813 const ChangeOp& change = op.Cast<ChangeOp>();
814 using Rep = turboshaft::RegisterRepresentation;
815 if (change.from == Rep::Float32() && change.to == Rep::Float64()) {
816 auto next = this->input_at(value, 0);
817 if (CanCover(value, next)) {
818 const Operation& next_op = this->Get(next);
819 if (const FloatUnaryOp* round = next_op.TryCast<FloatUnaryOp>()) {
820 DCHECK(round->rep == FloatRepresentation::Float32());
821 switch (round->kind) {
822 case FloatUnaryOp::Kind::kRoundDown:
823 Emit(kRiscvFloorWS, g.DefineAsRegister(node),
824 g.UseRegister(this->input_at(next, 0)));
825 return;
826 case FloatUnaryOp::Kind::kRoundUp:
827 Emit(kRiscvCeilWS, g.DefineAsRegister(node),
828 g.UseRegister(this->input_at(next, 0)));
829 return;
830 case FloatUnaryOp::Kind::kRoundToZero:
831 Emit(kRiscvTruncWS, g.DefineAsRegister(node),
832 g.UseRegister(this->input_at(next, 0)));
833 return;
834 case FloatUnaryOp::Kind::kRoundTiesEven:
835 Emit(kRiscvRoundWS, g.DefineAsRegister(node),
836 g.UseRegister(this->input_at(next, 0)));
837 return;
838 default:
839 break;
840 }
841 }
842 }
843 // Match float32 -> float64 -> int32 representation change path.
844 Emit(kRiscvTruncWS, g.DefineAsRegister(node),
845 g.UseRegister(this->input_at(value, 0)));
846 return;
847 }
848 }
849 }
850 VisitRR(this, kRiscvTruncWD, node);
851}
852
853void InstructionSelectorT::VisitTryTruncateFloat64ToInt32(OpIndex node) {
854 RiscvOperandGeneratorT g(this);
855 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
856 InstructionOperand outputs[2];
857 size_t output_count = 0;
858 outputs[output_count++] = g.DefineAsRegister(node);
859
860 OptionalOpIndex success_output = FindProjection(node, 1);
861 if (success_output.valid()) {
862 outputs[output_count++] = g.DefineAsRegister(success_output.value());
863 }
864
865 this->Emit(kRiscvTruncWD, output_count, outputs, 1, inputs);
866}
867
868void InstructionSelectorT::VisitTryTruncateFloat64ToUint32(OpIndex node) {
869 RiscvOperandGeneratorT g(this);
870 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
871 InstructionOperand outputs[2];
872 size_t output_count = 0;
873 outputs[output_count++] = g.DefineAsRegister(node);
874
875 OptionalOpIndex success_output = FindProjection(node, 1);
876 if (success_output.valid()) {
877 outputs[output_count++] = g.DefineAsRegister(success_output.value());
878 }
879
880 Emit(kRiscvTruncUwD, output_count, outputs, 1, inputs);
881}
882
883void InstructionSelectorT::VisitChangeFloat64ToInt64(OpIndex node) {
884 VisitRR(this, kRiscvTruncLD, node);
885}
886
887void InstructionSelectorT::VisitChangeFloat64ToUint32(OpIndex node) {
888 VisitRR(this, kRiscvTruncUwD, node);
889}
890
891void InstructionSelectorT::VisitChangeFloat64ToUint64(OpIndex node) {
892 VisitRR(this, kRiscvTruncUlD, node);
893}
894
895void InstructionSelectorT::VisitTruncateFloat64ToUint32(OpIndex node) {
896 VisitRR(this, kRiscvTruncUwD, node);
897}
898
899void InstructionSelectorT::VisitTruncateFloat64ToInt64(OpIndex node) {
900 RiscvOperandGeneratorT g(this);
901
902 InstructionCode opcode = kRiscvTruncLD;
903 const Operation& op = this->Get(node);
905 opcode |= MiscField::encode(true);
906 }
907
908 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)));
909}
910
911void InstructionSelectorT::VisitTryTruncateFloat32ToInt64(OpIndex node) {
912 RiscvOperandGeneratorT g(this);
913 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
914 InstructionOperand outputs[2];
915 size_t output_count = 0;
916 outputs[output_count++] = g.DefineAsRegister(node);
917
918 OptionalOpIndex success_output = FindProjection(node, 1);
919 if (success_output.valid()) {
920 outputs[output_count++] = g.DefineAsRegister(success_output.value());
921 }
922
923 this->Emit(kRiscvTruncLS, output_count, outputs, 1, inputs);
924}
925
926void InstructionSelectorT::VisitTryTruncateFloat64ToInt64(OpIndex node) {
927 RiscvOperandGeneratorT g(this);
928 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
929 InstructionOperand outputs[2];
930 size_t output_count = 0;
931 outputs[output_count++] = g.DefineAsRegister(node);
932
933 OptionalOpIndex success_output = FindProjection(node, 1);
934 if (success_output.valid()) {
935 outputs[output_count++] = g.DefineAsRegister(success_output.value());
936 }
937
938 Emit(kRiscvTruncLD, output_count, outputs, 1, inputs);
939}
940
941void InstructionSelectorT::VisitTryTruncateFloat32ToUint64(OpIndex node) {
942 RiscvOperandGeneratorT g(this);
943 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
944 InstructionOperand outputs[2];
945 size_t output_count = 0;
946 outputs[output_count++] = g.DefineAsRegister(node);
947
948 OptionalOpIndex success_output = FindProjection(node, 1);
949 if (success_output.valid()) {
950 outputs[output_count++] = g.DefineAsRegister(success_output.value());
951 }
952
953 Emit(kRiscvTruncUlS, output_count, outputs, 1, inputs);
954}
955
956void InstructionSelectorT::VisitTryTruncateFloat64ToUint64(OpIndex node) {
957 RiscvOperandGeneratorT g(this);
958
959 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
960 InstructionOperand outputs[2];
961 size_t output_count = 0;
962 outputs[output_count++] = g.DefineAsRegister(node);
963
964 OptionalOpIndex success_output = FindProjection(node, 1);
965 if (success_output.valid()) {
966 outputs[output_count++] = g.DefineAsRegister(success_output.value());
967 }
968
969 Emit(kRiscvTruncUlD, output_count, outputs, 1, inputs);
970}
971
972void InstructionSelectorT::VisitBitcastWord32ToWord64(OpIndex node) {
975 RiscvOperandGeneratorT g(this);
976 Emit(kRiscvZeroExtendWord, g.DefineAsRegister(node),
977 g.UseRegister(this->input_at(node, 0)));
978}
979
981 RiscvOperandGeneratorT g(selector);
982 OpIndex value = selector->input_at(node, 0);
983 selector->Emit(kRiscvSignExtendWord, g.DefineAsRegister(node),
984 g.UseRegister(value));
985}
986
987void InstructionSelectorT::VisitChangeInt32ToInt64(OpIndex node) {
988 const ChangeOp& change_op = this->Get(node).template Cast<ChangeOp>();
989 const Operation& input_op = this->Get(change_op.input());
990 if (input_op.Is<LoadOp>() && CanCover(node, change_op.input())) {
991 // Generate sign-extending load.
992 LoadRepresentation load_rep =
993 this->load_view(change_op.input()).loaded_rep();
994 MachineRepresentation rep = load_rep.representation();
995 InstructionCode opcode = kArchNop;
996 switch (rep) {
997 case MachineRepresentation::kBit: // Fall through.
999 opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
1000 break;
1002 opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
1003 break;
1006 // Since BitcastElider may remove nodes of
1007 // IrOpcode::kTruncateInt64ToInt32 and directly use the inputs, values
1008 // with kWord64 can also reach this line.
1012 opcode = kRiscvLw;
1013 break;
1014 default:
1015 UNREACHABLE();
1016 }
1017 EmitLoad(this, change_op.input(), opcode, node);
1018 return;
1019 }
1020 EmitSignExtendWord(this, node);
1021}
1022
1023void InstructionSelectorT::VisitChangeUint32ToUint64(OpIndex node) {
1024 RiscvOperandGeneratorT g(this);
1025 OpIndex value = this->input_at(node, 0);
1026 if (ZeroExtendsWord32ToWord64(value)) {
1027 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1028 return;
1029 }
1030 Emit(kRiscvZeroExtendWord, g.DefineAsRegister(node), g.UseRegister(value));
1031}
1032
1033bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis(OpIndex node) {
1034 DCHECK(!this->Get(node).Is<PhiOp>());
1035 const Operation& op = this->Get(node);
1036 if (op.opcode == Opcode::kLoad) {
1037 auto load = this->load_view(node);
1038 LoadRepresentation load_rep = load.loaded_rep();
1039 if (load_rep.IsUnsigned()) {
1040 switch (load_rep.representation()) {
1043 return true;
1044 default:
1045 return false;
1046 }
1047 }
1048 }
1049 // All other 32-bit operations sign-extend to the upper 32 bits
1050 return false;
1051}
1052
1053void InstructionSelectorT::VisitTruncateInt64ToInt32(OpIndex node) {
1054 RiscvOperandGeneratorT g(this);
1055 auto value = input_at(node, 0);
1056 if (CanCover(node, value)) {
1058 auto shift_value = input_at(value, 1);
1059 if (CanCover(value, input_at(value, 0)) &&
1060 TryEmitExtendingLoad(this, value, node)) {
1061 return;
1062 } else if (int64_t constant;
1063 MatchSignedIntegralConstant(shift_value, &constant)) {
1064 if (constant <= 63 && constant >= 32) {
1065 // After smi untagging no need for truncate. Combine sequence.
1066 Emit(kRiscvSar64, g.DefineSameAsFirst(node),
1067 g.UseRegister(input_at(value, 0)), g.UseImmediate64(constant));
1068 return;
1069 }
1070 }
1071 }
1072 }
1073 // Semantics of this machine IR is not clear. For example, x86 zero-extend
1074 // the truncated value; arm treats it as nop thus the upper 32-bit as
1075 // undefined; Riscv emits ext instruction which zero-extend the 32-bit
1076 // value; for riscv, we do sign-extension of the truncated value
1077 Emit(kRiscvSignExtendWord, g.DefineAsRegister(node), g.UseRegister(value),
1078 g.TempImmediate(0));
1079}
1080
1081void InstructionSelectorT::VisitRoundInt64ToFloat32(OpIndex node) {
1082 VisitRR(this, kRiscvCvtSL, node);
1083}
1084
1085void InstructionSelectorT::VisitRoundInt64ToFloat64(OpIndex node) {
1086 VisitRR(this, kRiscvCvtDL, node);
1087}
1088
1089void InstructionSelectorT::VisitRoundUint64ToFloat32(OpIndex node) {
1090 VisitRR(this, kRiscvCvtSUl, node);
1091}
1092
1093void InstructionSelectorT::VisitRoundUint64ToFloat64(OpIndex node) {
1094 VisitRR(this, kRiscvCvtDUl, node);
1095}
1096
1097void InstructionSelectorT::VisitBitcastFloat32ToInt32(OpIndex node) {
1098 VisitRR(this, kRiscvBitcastFloat32ToInt32, node);
1099}
1100
1101void InstructionSelectorT::VisitBitcastFloat64ToInt64(OpIndex node) {
1102 VisitRR(this, kRiscvBitcastDL, node);
1103}
1104
1105void InstructionSelectorT::VisitBitcastInt32ToFloat32(OpIndex node) {
1106 VisitRR(this, kRiscvBitcastInt32ToFloat32, node);
1107}
1108
1109void InstructionSelectorT::VisitBitcastInt64ToFloat64(OpIndex node) {
1110 VisitRR(this, kRiscvBitcastLD, node);
1111}
1112
1113void InstructionSelectorT::VisitFloat64RoundDown(OpIndex node) {
1114 VisitRR(this, kRiscvFloat64RoundDown, node);
1115}
1116
1117void InstructionSelectorT::VisitFloat32RoundUp(OpIndex node) {
1118 VisitRR(this, kRiscvFloat32RoundUp, node);
1119}
1120
1121void InstructionSelectorT::VisitFloat64RoundUp(OpIndex node) {
1122 VisitRR(this, kRiscvFloat64RoundUp, node);
1123}
1124
1125void InstructionSelectorT::VisitFloat32RoundTruncate(OpIndex node) {
1126 VisitRR(this, kRiscvFloat32RoundTruncate, node);
1127}
1128
1129void InstructionSelectorT::VisitFloat64RoundTruncate(OpIndex node) {
1130 VisitRR(this, kRiscvFloat64RoundTruncate, node);
1131}
1132
1133void InstructionSelectorT::VisitFloat64RoundTiesAway(OpIndex node) {
1134 UNREACHABLE();
1135}
1136
1137void InstructionSelectorT::VisitFloat32RoundTiesEven(OpIndex node) {
1138 VisitRR(this, kRiscvFloat32RoundTiesEven, node);
1139}
1140
1141void InstructionSelectorT::VisitFloat64RoundTiesEven(OpIndex node) {
1142 VisitRR(this, kRiscvFloat64RoundTiesEven, node);
1143}
1144
1145void InstructionSelectorT::VisitFloat32Neg(OpIndex node) {
1146 VisitRR(this, kRiscvNegS, node);
1147}
1148
1149void InstructionSelectorT::VisitFloat64Neg(OpIndex node) {
1150 VisitRR(this, kRiscvNegD, node);
1151}
1152
1154 InstructionCode opcode) {
1155 RiscvOperandGeneratorT g(this);
1156 Emit(opcode, g.DefineAsFixed(node, fa0),
1157 g.UseFixed(this->input_at(node, 0), fa0),
1158 g.UseFixed(this->input_at(node, 1), fa1))
1159 ->MarkAsCall();
1160}
1161
1163 InstructionCode opcode) {
1164 RiscvOperandGeneratorT g(this);
1165 Emit(opcode, g.DefineAsFixed(node, fa0),
1166 g.UseFixed(this->input_at(node, 0), fa1))
1167 ->MarkAsCall();
1168}
1169
1171 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1172 OpIndex node) {
1173 RiscvOperandGeneratorT g(this);
1174
1175 // Prepare for C function call.
1176 if (call_descriptor->IsCFunctionCall()) {
1177 int gp_param_count = static_cast<int>(call_descriptor->GPParameterCount());
1178 int fp_param_count = static_cast<int>(call_descriptor->FPParameterCount());
1179 Emit(kArchPrepareCallCFunction | ParamField::encode(gp_param_count) |
1180 FPParamField::encode(fp_param_count),
1181 0, nullptr, 0, nullptr);
1182
1183 // Poke any stack arguments.
1184 int slot = kCArgSlotCount;
1185 for (PushParameter input : (*arguments)) {
1186 Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1187 g.TempImmediate(slot << kSystemPointerSizeLog2));
1188 ++slot;
1189 }
1190 } else {
1191 int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
1192 if (push_count > 0) {
1193 // Calculate needed space
1194 int stack_size = 0;
1195 for (PushParameter input : (*arguments)) {
1196 if (input.node.valid()) {
1197 stack_size += input.location.GetSizeInPointers();
1198 }
1199 }
1200 Emit(kRiscvStackClaim, g.NoOutput(),
1201 g.TempImmediate(stack_size << kSystemPointerSizeLog2));
1202 }
1203 for (size_t n = 0; n < arguments->size(); ++n) {
1204 PushParameter input = (*arguments)[n];
1205 if (input.node.valid()) {
1206 Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1207 g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
1208 }
1209 }
1210 }
1211}
1212
1213void InstructionSelectorT::VisitUnalignedLoad(OpIndex node) {
1214 auto load = this->load_view(node);
1215 LoadRepresentation load_rep = load.loaded_rep();
1216 RiscvOperandGeneratorT g(this);
1217 OpIndex base = load.base();
1218 OpIndex index = load.index();
1219
1220 InstructionCode opcode = kArchNop;
1221 switch (load_rep.representation()) {
1223 opcode = kRiscvULoadFloat;
1224 break;
1226 opcode = kRiscvULoadDouble;
1227 break;
1229 opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
1230 break;
1232 opcode = load_rep.IsUnsigned() ? kRiscvUlhu : kRiscvUlh;
1233 break;
1235 opcode = kRiscvUlw;
1236 break;
1237 case MachineRepresentation::kTaggedSigned: // Fall through.
1238 case MachineRepresentation::kTaggedPointer: // Fall through.
1239 case MachineRepresentation::kTagged: // Fall through.
1241 opcode = kRiscvUld;
1242 break;
1244 opcode = kRiscvRvvLd;
1245 break;
1246 case MachineRepresentation::kSimd256: // Fall through.
1247 case MachineRepresentation::kBit: // Fall through.
1248 case MachineRepresentation::kCompressedPointer: // Fall through.
1249 case MachineRepresentation::kCompressed: // Fall through.
1250 case MachineRepresentation::kSandboxedPointer: // Fall through.
1251 case MachineRepresentation::kMapWord: // Fall through.
1252 case MachineRepresentation::kIndirectPointer: // Fall through.
1253 case MachineRepresentation::kProtectedPointer: // Fall through.
1254 case MachineRepresentation::kFloat16: // Fall through.
1255 case MachineRepresentation::kFloat16RawBits: // Fall through.
1257 UNREACHABLE();
1258 }
1259 bool traps_on_null;
1260 if (load.is_protected(&traps_on_null)) {
1261 if (traps_on_null) {
1263 } else {
1265 }
1266 }
1267 if (g.CanBeImmediate(index, opcode)) {
1269 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1270 } else {
1271 InstructionOperand addr_reg = g.TempRegister();
1272 Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
1273 g.UseRegister(index), g.UseRegister(base));
1274 // Emit desired load opcode, using temp addr_reg.
1276 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1277 }
1278}
1279
1280void InstructionSelectorT::VisitUnalignedStore(OpIndex node) {
1281 RiscvOperandGeneratorT g(this);
1282 auto store_view = this->store_view(node);
1285 OpIndex index = this->value(store_view.index());
1286 OpIndex value = store_view.value();
1287
1289
1291 switch (rep) {
1293 opcode = kRiscvUStoreFloat;
1294 break;
1296 opcode = kRiscvUStoreDouble;
1297 break;
1299 opcode = kRiscvSb;
1300 break;
1302 opcode = kRiscvUsh;
1303 break;
1305 opcode = kRiscvUsw;
1306 break;
1307 case MachineRepresentation::kTaggedSigned: // Fall through.
1308 case MachineRepresentation::kTaggedPointer: // Fall through.
1309 case MachineRepresentation::kTagged: // Fall through.
1311 opcode = kRiscvUsd;
1312 break;
1314 opcode = kRiscvRvvSt;
1315 break;
1316 case MachineRepresentation::kSimd256: // Fall through.
1317 case MachineRepresentation::kBit: // Fall through.
1318 case MachineRepresentation::kCompressedPointer: // Fall through.
1319 case MachineRepresentation::kCompressed: // Fall through.
1320 case MachineRepresentation::kSandboxedPointer: // Fall through.
1321 case MachineRepresentation::kMapWord: // Fall through.
1322 case MachineRepresentation::kIndirectPointer: // Fall through.
1323 case MachineRepresentation::kProtectedPointer: // Fall through.
1324 case MachineRepresentation::kFloat16: // Fall through.
1325 case MachineRepresentation::kFloat16RawBits: // Fall through.
1327 UNREACHABLE();
1328 }
1329
1330 if (g.CanBeImmediate(index, opcode)) {
1331 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1332 g.UseRegister(base), g.UseImmediate(index),
1333 g.UseRegisterOrImmediateZero(value));
1334 } else {
1335 InstructionOperand addr_reg = g.TempRegister();
1336 Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
1337 g.UseRegister(index), g.UseRegister(base));
1338 // Emit desired store opcode, using temp addr_reg.
1339 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1340 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1341 }
1342}
1343
1344namespace {
1345
1346bool IsNodeUnsigned(InstructionSelectorT* selector, OpIndex n) {
1347 const Operation& op = selector->Get(n);
1348 if (op.Is<LoadOp>()) {
1349 const LoadOp& load = op.Cast<LoadOp>();
1350 return load.machine_type().IsUnsigned() ||
1351 load.machine_type().IsCompressed();
1352 } else if (op.Is<WordBinopOp>()) {
1353 const WordBinopOp& binop = op.Cast<WordBinopOp>();
1354 switch (binop.kind) {
1355 case WordBinopOp::Kind::kUnsignedDiv:
1356 case WordBinopOp::Kind::kUnsignedMod:
1357 case WordBinopOp::Kind::kUnsignedMulOverflownBits:
1358 return true;
1359 default:
1360 return false;
1361 }
1362 } else if (op.Is<ChangeOrDeoptOp>()) {
1363 const ChangeOrDeoptOp& change = op.Cast<ChangeOrDeoptOp>();
1364 return change.kind == ChangeOrDeoptOp::Kind::kFloat64ToUint32;
1365 } else if (op.Is<ConvertJSPrimitiveToUntaggedOp>()) {
1366 const ConvertJSPrimitiveToUntaggedOp& convert =
1367 op.Cast<ConvertJSPrimitiveToUntaggedOp>();
1368 return convert.kind ==
1369 ConvertJSPrimitiveToUntaggedOp::UntaggedKind::kUint32;
1370 } else if (op.Is<ConstantOp>()) {
1371 const ConstantOp& constant = op.Cast<ConstantOp>();
1372 return constant.kind == ConstantOp::Kind::kCompressedHeapObject;
1373 } else {
1374 return false;
1375 }
1376}
1377
1378bool CanUseOptimizedWord32Compare(InstructionSelectorT* selector,
1379 OpIndex node) {
1381 return false;
1382 }
1383 if (IsNodeUnsigned(selector, selector->input_at(node, 0)) ==
1384 IsNodeUnsigned(selector, selector->input_at(node, 1))) {
1385 return true;
1386 }
1387 return false;
1388}
1389
1390// Shared routine for multiple word compare operations.
1391
1392void VisitFullWord32Compare(InstructionSelectorT* selector, OpIndex node,
1393 InstructionCode opcode, FlagsContinuationT* cont) {
1394 RiscvOperandGeneratorT g(selector);
1395 InstructionOperand leftOp = g.TempRegister();
1396 InstructionOperand rightOp = g.TempRegister();
1397
1398 selector->Emit(kRiscvShl64, leftOp,
1399 g.UseRegister(selector->input_at(node, 0)),
1400 g.TempImmediate(32));
1401 selector->Emit(kRiscvShl64, rightOp,
1402 g.UseRegister(selector->input_at(node, 1)),
1403 g.TempImmediate(32));
1404
1405 Instruction* instr = VisitCompare(selector, opcode, leftOp, rightOp, cont);
1406 selector->UpdateSourcePosition(instr, node);
1407}
1408
1409void VisitOptimizedWord32Compare(InstructionSelectorT* selector, OpIndex node,
1410 InstructionCode opcode,
1411 FlagsContinuationT* cont) {
1412 if (v8_flags.debug_code) {
1413 RiscvOperandGeneratorT g(selector);
1414 InstructionOperand leftOp = g.TempRegister();
1415 InstructionOperand rightOp = g.TempRegister();
1416 InstructionOperand optimizedResult = g.TempRegister();
1417 InstructionOperand fullResult = g.TempRegister();
1418 FlagsCondition condition = cont->condition();
1419 InstructionCode testOpcode = opcode |
1422
1423 selector->Emit(testOpcode, optimizedResult,
1424 g.UseRegister(selector->input_at(node, 0)),
1425 g.UseRegister(selector->input_at(node, 1)));
1426 selector->Emit(kRiscvShl64, leftOp,
1427 g.UseRegister(selector->input_at(node, 0)),
1428 g.TempImmediate(32));
1429 selector->Emit(kRiscvShl64, rightOp,
1430 g.UseRegister(selector->input_at(node, 1)),
1431 g.TempImmediate(32));
1432 selector->Emit(testOpcode, fullResult, leftOp, rightOp);
1433
1434 selector->Emit(kRiscvAssertEqual, g.NoOutput(), optimizedResult, fullResult,
1435 g.TempImmediate(static_cast<int>(
1436 AbortReason::kUnsupportedNonPrimitiveCompare)));
1437 }
1438
1439 Instruction* instr = VisitWordCompare(selector, node, opcode, cont, false);
1440 selector->UpdateSourcePosition(instr, node);
1441}
1442
1443void VisitWord32Compare(InstructionSelectorT* selector, OpIndex node,
1444 FlagsContinuationT* cont) {
1445#ifdef USE_SIMULATOR
1446 const Operation& lhs = selector->Get(selector->input_at(node, 0));
1447 const Operation& rhs = selector->Get(selector->input_at(node, 1));
1448 if (lhs.Is<DidntThrowOp>() || rhs.Is<DidntThrowOp>()) {
1449 VisitFullWord32Compare(selector, node, kRiscvCmp, cont);
1450 } else if (!CanUseOptimizedWord32Compare(selector, node)) {
1451#else
1452 if (!CanUseOptimizedWord32Compare(selector, node)) {
1453#endif
1454 VisitFullWord32Compare(selector, node, kRiscvCmp, cont);
1455 } else {
1456 VisitOptimizedWord32Compare(selector, node, kRiscvCmp, cont);
1457 }
1458}
1459
1460void VisitWord64Compare(InstructionSelectorT* selector, OpIndex node,
1461 FlagsContinuationT* cont) {
1462 VisitWordCompare(selector, node, kRiscvCmp, cont, false);
1463}
1464
1465void VisitAtomicLoad(InstructionSelectorT* selector, OpIndex node,
1466 AtomicWidth width) {
1467 using OpIndex = OpIndex;
1468 RiscvOperandGeneratorT g(selector);
1469 auto load = selector->load_view(node);
1470 OpIndex base = load.base();
1471 OpIndex index = load.index();
1472
1473 // The memory order is ignored as both acquire and sequentially consistent
1474 // loads can emit LDAR.
1475 // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
1476 LoadRepresentation load_rep = load.loaded_rep();
1478 switch (load_rep.representation()) {
1480 DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
1481 code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
1482 break;
1484 DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
1485 code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
1486 break;
1488 code = kAtomicLoadWord32;
1489 break;
1491 code = kRiscvWord64AtomicLoadUint64;
1492 break;
1493#ifdef V8_COMPRESS_POINTERS
1495 code = kRiscvAtomicLoadDecompressTaggedSigned;
1496 break;
1499 code = kRiscvAtomicLoadDecompressTagged;
1500 break;
1501#else
1502 case MachineRepresentation::kTaggedSigned: // Fall through.
1503 case MachineRepresentation::kTaggedPointer: // Fall through.
1505 if (kTaggedSize == 8) {
1506 code = kRiscvWord64AtomicLoadUint64;
1507 } else {
1508 code = kAtomicLoadWord32;
1509 }
1510 break;
1511#endif
1512 case MachineRepresentation::kCompressedPointer: // Fall through.
1515 code = kAtomicLoadWord32;
1516 break;
1517 default:
1518 UNREACHABLE();
1519 }
1520
1521 bool traps_on_null;
1522 if (load.is_protected(&traps_on_null)) {
1523 // Atomic loads and null dereference are mutually exclusive. This might
1524 // change with multi-threaded wasm-gc in which case the access mode should
1525 // probably be kMemoryAccessProtectedNullDereference.
1526 DCHECK(!traps_on_null);
1528 }
1529
1530 if (g.CanBeImmediate(index, code)) {
1531 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
1533 g.DefineAsRegister(node), g.UseRegister(base),
1534 g.UseImmediate(index));
1535 } else {
1536 InstructionOperand addr_reg = g.TempRegister();
1537 selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
1538 addr_reg, g.UseRegister(base), g.UseRegister(index));
1539 // Emit desired load opcode, using temp addr_reg.
1540 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
1542 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1543 }
1544}
1545
1546AtomicStoreParameters AtomicStoreParametersOf(InstructionSelectorT* selector,
1547 OpIndex node) {
1548 auto store = selector->store_view(node);
1549 return AtomicStoreParameters(store.stored_rep().representation(),
1550 store.stored_rep().write_barrier_kind(),
1551 store.memory_order().value(),
1552 store.access_kind());
1553}
1554
1555void VisitAtomicStore(InstructionSelectorT* selector, OpIndex node,
1556 AtomicWidth width) {
1557 using OpIndex = OpIndex;
1558 RiscvOperandGeneratorT g(selector);
1559 auto store = selector->store_view(node);
1560 OpIndex base = store.base();
1561 OpIndex index = selector->value(store.index());
1562 OpIndex value = store.value();
1563 DCHECK_EQ(store.displacement(), 0);
1564
1565 // The memory order is ignored.
1566 AtomicStoreParameters store_params = AtomicStoreParametersOf(selector, node);
1567 WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
1568 MachineRepresentation rep = store_params.representation();
1569
1570 if (v8_flags.enable_unconditional_write_barriers &&
1572 write_barrier_kind = kFullWriteBarrier;
1573 }
1574
1576
1577 if (write_barrier_kind != kNoWriteBarrier &&
1578 !v8_flags.disable_write_barriers) {
1581
1582 InstructionOperand inputs[3];
1583 size_t input_count = 0;
1584 inputs[input_count++] = g.UseUniqueRegister(base);
1585 inputs[input_count++] = g.UseUniqueRegister(index);
1586 inputs[input_count++] = g.UseUniqueRegister(value);
1587 RecordWriteMode record_write_mode =
1588 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
1589 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
1590 size_t const temp_count = arraysize(temps);
1591 code = kArchAtomicStoreWithWriteBarrier;
1592 code |= RecordWriteModeField::encode(record_write_mode);
1593 selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
1594 } else {
1595 switch (rep) {
1597 code = kAtomicStoreWord8;
1598 break;
1600 code = kAtomicStoreWord16;
1601 break;
1603 code = kAtomicStoreWord32;
1604 break;
1607 code = kRiscvWord64AtomicStoreWord64;
1608 break;
1609 case MachineRepresentation::kTaggedSigned: // Fall through.
1610 case MachineRepresentation::kTaggedPointer: // Fall through.
1613 code = kRiscvStoreCompressTagged;
1614 break;
1615 default:
1616 UNREACHABLE();
1617 }
1618 code |= AtomicWidthField::encode(width);
1619
1620 if (store_params.kind() == MemoryAccessKind::kProtectedByTrapHandler) {
1622 }
1623 if (g.CanBeImmediate(index, code)) {
1624 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
1626 g.NoOutput(), g.UseRegisterOrImmediateZero(value),
1627 g.UseRegister(base), g.UseImmediate(index));
1628 } else {
1629 InstructionOperand addr_reg = g.TempRegister();
1630 selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
1631 addr_reg, g.UseRegister(index), g.UseRegister(base));
1632 // Emit desired store opcode, using temp addr_reg.
1633 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
1635 g.NoOutput(), g.UseRegisterOrImmediateZero(value),
1636 addr_reg, g.TempImmediate(0));
1637 }
1638 }
1639}
1640
1641void VisitAtomicBinop(InstructionSelectorT* selector, OpIndex node,
1642 ArchOpcode opcode, AtomicWidth width,
1643 MemoryAccessKind access_kind) {
1644 using OpIndex = OpIndex;
1645 RiscvOperandGeneratorT g(selector);
1646 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
1647 OpIndex base = atomic_op.base();
1648 OpIndex index = atomic_op.index();
1649 OpIndex value = atomic_op.value();
1650
1651 AddressingMode addressing_mode = kMode_MRI;
1652 InstructionOperand inputs[3];
1653 size_t input_count = 0;
1654 inputs[input_count++] = g.UseUniqueRegister(base);
1655 inputs[input_count++] = g.UseUniqueRegister(index);
1656 inputs[input_count++] = g.UseUniqueRegister(value);
1657 InstructionOperand outputs[1];
1658 outputs[0] = g.UseUniqueRegister(node);
1659 InstructionOperand temps[4];
1660 temps[0] = g.TempRegister();
1661 temps[1] = g.TempRegister();
1662 temps[2] = g.TempRegister();
1663 temps[3] = g.TempRegister();
1664 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
1666 if (access_kind == MemoryAccessKind::kProtectedByTrapHandler) {
1668 }
1669 selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
1670}
1671
1672} // namespace
1673
1675 OpIndex node, FlagsContinuationT* cont) {
1677 OpIndex value;
1678 const auto& op = this->turboshaft_graph()
1679 ->Get(node)
1681 kind = op.kind;
1682 value = op.stack_limit();
1684 kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
1685
1686 RiscvOperandGeneratorT g(this);
1687
1688 // No outputs.
1689 InstructionOperand* const outputs = nullptr;
1690 const int output_count = 0;
1691
1692 // Applying an offset to this stack check requires a temp register. Offsets
1693 // are only applied to the first stack check. If applying an offset, we must
1694 // ensure the input and temp registers do not alias, thus kUniqueRegister.
1695 InstructionOperand temps[] = {g.TempRegister()};
1696 const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0);
1697 const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
1700
1701 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
1702 static constexpr int input_count = arraysize(inputs);
1703
1704 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
1705 temp_count, temps, cont);
1706}
1707
1709 FlagsContinuation* cont) {
1710 // Try to combine with comparisons against 0 by simply inverting the branch.
1711 while (const ComparisonOp* equal =
1712 this->TryCast<Opmask::kWord32Equal>(value)) {
1713 if (!CanCover(user, value)) break;
1714 if (!MatchIntegralZero(equal->right())) break;
1715
1716 user = value;
1717 value = equal->left();
1718 cont->Negate();
1719 }
1720
1721 const Operation& value_op = Get(value);
1722 if (CanCover(user, value)) {
1723 if (const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
1724 switch (comparison->rep.value()) {
1726 cont->OverwriteAndNegateIfEqual(
1727 GetComparisonFlagCondition(*comparison));
1728 return VisitWord32Compare(this, value, cont);
1729
1731 cont->OverwriteAndNegateIfEqual(
1732 GetComparisonFlagCondition(*comparison));
1733 return VisitWord64Compare(this, value, cont);
1734
1736 switch (comparison->kind) {
1737 case ComparisonOp::Kind::kEqual:
1738 cont->OverwriteAndNegateIfEqual(kEqual);
1739 return VisitFloat32Compare(this, value, cont);
1740 case ComparisonOp::Kind::kSignedLessThan:
1741 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
1742 return VisitFloat32Compare(this, value, cont);
1743 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1744 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
1745 return VisitFloat32Compare(this, value, cont);
1746 default:
1747 UNREACHABLE();
1748 }
1750 switch (comparison->kind) {
1751 case ComparisonOp::Kind::kEqual:
1752 cont->OverwriteAndNegateIfEqual(kEqual);
1753 return VisitFloat64Compare(this, value, cont);
1754 case ComparisonOp::Kind::kSignedLessThan:
1755 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
1756 return VisitFloat64Compare(this, value, cont);
1757 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1758 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
1759 return VisitFloat64Compare(this, value, cont);
1760 default:
1761 UNREACHABLE();
1762 }
1763 default:
1764 break;
1765 }
1766 } else if (const ProjectionOp* projection =
1767 value_op.TryCast<ProjectionOp>()) {
1768 // Check if this is the overflow output projection of an
1769 // <Operation>WithOverflow node.
1770 if (projection->index == 1u) {
1771 // We cannot combine the <Operation>WithOverflow with this branch
1772 // unless the 0th projection (the use of the actual value of the
1773 // <Operation> is either nullptr, which means there's no use of the
1774 // actual value, or was already defined, which means it is scheduled
1775 // *AFTER* this branch).
1776 OpIndex node = projection->input();
1777 if (const OverflowCheckedBinopOp* binop =
1779 binop && CanDoBranchIfOverflowFusion(node)) {
1780 const bool is64 = binop->rep == WordRepresentation::Word64();
1781 switch (binop->kind) {
1782 case OverflowCheckedBinopOp::Kind::kSignedAdd:
1783 cont->OverwriteAndNegateIfEqual(kOverflow);
1784 return VisitBinop<Int32BinopMatcher>(
1785 this, node, is64 ? kRiscvAddOvf64 : kRiscvAdd64, cont);
1786 case OverflowCheckedBinopOp::Kind::kSignedSub:
1787 cont->OverwriteAndNegateIfEqual(kOverflow);
1788 return VisitBinop<Int32BinopMatcher>(
1789 this, node, is64 ? kRiscvSubOvf64 : kRiscvSub64, cont);
1790 case OverflowCheckedBinopOp::Kind::kSignedMul:
1791 cont->OverwriteAndNegateIfEqual(kOverflow);
1792 return VisitBinop<Int32BinopMatcher>(
1793 this, node, is64 ? kRiscvMulOvf64 : kRiscvMulOvf32, cont);
1794 }
1795 }
1796 }
1797 }
1798 }
1799
1800 // Continuation could not be combined with a compare, emit compare against
1801 // 0.
1802 const ComparisonOp* comparison = this->Get(user).TryCast<ComparisonOp>();
1803#ifdef V8_COMPRESS_POINTERS
1804 if (comparison &&
1805 comparison->rep.value() == RegisterRepresentation::Word64()) {
1806 return EmitWordCompareZero(this, value, cont);
1807 } else {
1808 return EmitWord32CompareZero(this, value, cont);
1809 }
1810#else
1811 if (comparison &&
1812 comparison->rep.value() == RegisterRepresentation::Word32()) {
1813 return EmitWord32CompareZero(this, value, cont);
1814 } else {
1815 return EmitWordCompareZero(this, value, cont);
1816 }
1817#endif
1818}
1819
1820void InstructionSelectorT::VisitWord32Equal(OpIndex node) {
1821 const Operation& equal = Get(node);
1822 DCHECK(equal.Is<ComparisonOp>());
1823 OpIndex left = equal.input(0);
1824 OpIndex right = equal.input(1);
1825 OpIndex user = node;
1826 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1827
1828 if (MatchZero(right)) {
1829 return VisitWordCompareZero(user, left, &cont);
1830 }
1831
1832 if (isolate() && (V8_STATIC_ROOTS_BOOL ||
1833 (COMPRESS_POINTERS_BOOL && !isolate()->bootstrapper()))) {
1834 RiscvOperandGeneratorT g(this);
1835 const RootsTable& roots_table = isolate()->roots_table();
1836 RootIndex root_index;
1837 Handle<HeapObject> right;
1838 // HeapConstants and CompressedHeapConstants can be treated the same when
1839 // using them as an input to a 32-bit comparison. Check whether either is
1840 // present.
1841 if (MatchHeapConstant(node, &right) && !right.is_null() &&
1842 roots_table.IsRootHandle(right, &root_index)) {
1843 if (RootsTable::IsReadOnly(root_index)) {
1844 Tagged_t ptr =
1845 MacroAssemblerBase::ReadOnlyRootPtr(root_index, isolate());
1846 if (g.CanBeImmediate(ptr, kRiscvCmp32)) {
1847 VisitCompare(this, kRiscvCmp32, g.UseRegister(left),
1848 g.TempImmediate(int32_t(ptr)), &cont);
1849 return;
1850 }
1851 }
1852 }
1853 }
1854 VisitWord32Compare(this, node, &cont);
1855}
1856
1857void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
1858 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1859 VisitWord32Compare(this, node, &cont);
1860}
1861
1862void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
1863 FlagsContinuation cont =
1864 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1865 VisitWord32Compare(this, node, &cont);
1866}
1867
1868void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
1869 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1870 VisitWord32Compare(this, node, &cont);
1871}
1872
1873void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
1874 FlagsContinuation cont =
1875 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1876 VisitWord32Compare(this, node, &cont);
1877}
1878
1879void InstructionSelectorT::VisitInt32AddWithOverflow(OpIndex node) {
1880 OptionalOpIndex ovf = FindProjection(node, 1);
1881 if (ovf.valid() && IsUsed(ovf.value())) {
1882 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1883 return VisitBinop<Int32BinopMatcher>(this, node, kRiscvAdd64, &cont);
1884 }
1885 FlagsContinuation cont;
1886 VisitBinop<Int32BinopMatcher>(this, node, kRiscvAdd64, &cont);
1887}
1888
1889void InstructionSelectorT::VisitInt32SubWithOverflow(OpIndex node) {
1890 OptionalOpIndex ovf = FindProjection(node, 1);
1891 if (ovf.valid()) {
1892 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1893 return VisitBinop<Int32BinopMatcher>(this, node, kRiscvSub64, &cont);
1894 }
1895 FlagsContinuation cont;
1896 VisitBinop<Int32BinopMatcher>(this, node, kRiscvSub64, &cont);
1897}
1898
1899void InstructionSelectorT::VisitInt32MulWithOverflow(OpIndex node) {
1900 OptionalOpIndex ovf = FindProjection(node, 1);
1901 if (ovf.valid()) {
1902 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1903 return VisitBinop<Int32BinopMatcher>(this, node, kRiscvMulOvf32, &cont);
1904 }
1905 FlagsContinuation cont;
1906 VisitBinop<Int32BinopMatcher>(this, node, kRiscvMulOvf32, &cont);
1907}
1908
1909void InstructionSelectorT::VisitInt64AddWithOverflow(OpIndex node) {
1910 OptionalOpIndex ovf = FindProjection(node, 1);
1911 if (ovf.valid()) {
1912 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1913 return VisitBinop<Int64BinopMatcher>(this, node, kRiscvAddOvf64, &cont);
1914 }
1915 FlagsContinuation cont;
1916 VisitBinop<Int64BinopMatcher>(this, node, kRiscvAddOvf64, &cont);
1917}
1918
1919void InstructionSelectorT::VisitInt64SubWithOverflow(OpIndex node) {
1920 OptionalOpIndex ovf = FindProjection(node, 1);
1921 if (ovf.valid()) {
1922 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1923 return VisitBinop<Int64BinopMatcher>(this, node, kRiscvSubOvf64, &cont);
1924 }
1925 FlagsContinuation cont;
1926 VisitBinop<Int64BinopMatcher>(this, node, kRiscvSubOvf64, &cont);
1927}
1928
1929void InstructionSelectorT::VisitInt64MulWithOverflow(OpIndex node) {
1930 OptionalOpIndex ovf = FindProjection(node, 1);
1931 if (ovf.valid()) {
1932 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
1933 return VisitBinop<Int64BinopMatcher>(this, node, kRiscvMulOvf64, &cont);
1934 }
1935 FlagsContinuation cont;
1936 VisitBinop<Int64BinopMatcher>(this, node, kRiscvMulOvf64, &cont);
1937}
1938
1939void InstructionSelectorT::VisitWord64Equal(OpIndex node) {
1940 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1941 const ComparisonOp& equal = this->Get(node).template Cast<ComparisonOp>();
1942 DCHECK_EQ(equal.kind, ComparisonOp::Kind::kEqual);
1943 if (this->MatchIntegralZero(equal.right())) {
1944 return VisitWordCompareZero(node, equal.left(), &cont);
1945 }
1946 VisitWord64Compare(this, node, &cont);
1947}
1948
1949void InstructionSelectorT::VisitInt64LessThan(OpIndex node) {
1950 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1951 VisitWord64Compare(this, node, &cont);
1952}
1953
1954void InstructionSelectorT::VisitInt64LessThanOrEqual(OpIndex node) {
1955 FlagsContinuation cont =
1956 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1957 VisitWord64Compare(this, node, &cont);
1958}
1959
1960void InstructionSelectorT::VisitUint64LessThan(OpIndex node) {
1961 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1962 VisitWord64Compare(this, node, &cont);
1963}
1964
1965void InstructionSelectorT::VisitUint64LessThanOrEqual(OpIndex node) {
1966 FlagsContinuation cont =
1967 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1968 VisitWord64Compare(this, node, &cont);
1969}
1970
1971void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
1972 VisitAtomicLoad(this, node, AtomicWidth::kWord32);
1973}
1974
1975void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
1976 VisitAtomicStore(this, node, AtomicWidth::kWord32);
1977}
1978
1979void InstructionSelectorT::VisitWord64AtomicLoad(OpIndex node) {
1980 VisitAtomicLoad(this, node, AtomicWidth::kWord64);
1981}
1982
1983void InstructionSelectorT::VisitWord64AtomicStore(OpIndex node) {
1984 VisitAtomicStore(this, node, AtomicWidth::kWord64);
1985}
1986
1988 ArchOpcode opcode, AtomicWidth width,
1989 MemoryAccessKind access_kind) {
1990 using OpIndex = OpIndex;
1991 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
1992 RiscvOperandGeneratorT g(selector);
1993 OpIndex base = atomic_op.base();
1994 OpIndex index = atomic_op.index();
1995 OpIndex value = atomic_op.value();
1996
1997 InstructionOperand inputs[3];
1998 size_t input_count = 0;
1999 inputs[input_count++] = g.UseUniqueRegister(base);
2000 inputs[input_count++] = g.UseUniqueRegister(index);
2001 inputs[input_count++] = g.UseUniqueRegister(value);
2002 InstructionOperand outputs[1];
2003 outputs[0] = g.UseUniqueRegister(node);
2004 InstructionOperand temp[3];
2005 temp[0] = g.TempRegister();
2006 temp[1] = g.TempRegister();
2007 temp[2] = g.TempRegister();
2008
2009 InstructionCode code = opcode | AddressingModeField::encode(kMode_MRI) |
2010 AtomicWidthField::encode(width);
2011 if (access_kind == MemoryAccessKind::kProtectedByTrapHandler) {
2012 code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
2013 }
2014 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2015}
2016
2018 ArchOpcode opcode, AtomicWidth width,
2019 MemoryAccessKind access_kind) {
2020 RiscvOperandGeneratorT g(selector);
2021 using OpIndex = OpIndex;
2022 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
2023 OpIndex base = atomic_op.base();
2024 OpIndex index = atomic_op.index();
2025 OpIndex old_value = atomic_op.expected().value();
2026 OpIndex new_value = atomic_op.value();
2027
2028 AddressingMode addressing_mode = kMode_MRI;
2029 InstructionOperand inputs[4];
2030 size_t input_count = 0;
2031 inputs[input_count++] = g.UseUniqueRegister(base);
2032 inputs[input_count++] = g.UseUniqueRegister(index);
2033 inputs[input_count++] = g.UseUniqueRegister(old_value);
2034 inputs[input_count++] = g.UseUniqueRegister(new_value);
2035 InstructionOperand outputs[1];
2036 outputs[0] = g.UseUniqueRegister(node);
2037 InstructionOperand temp[3];
2038 temp[0] = g.TempRegister();
2039 temp[1] = g.TempRegister();
2040 temp[2] = g.TempRegister();
2041 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2042 AtomicWidthField::encode(width);
2043 if (access_kind == MemoryAccessKind::kProtectedByTrapHandler) {
2044 code |= AccessModeField::encode(kMemoryAccessProtectedMemOutOfBounds);
2045 }
2046 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2047}
2048
2049void InstructionSelectorT::VisitWord32AtomicExchange(OpIndex node) {
2050 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2051 ArchOpcode opcode;
2052 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2053 opcode = kAtomicExchangeInt8;
2054 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2055 opcode = kAtomicExchangeUint8;
2056 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2057 opcode = kAtomicExchangeInt16;
2058 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2059 opcode = kAtomicExchangeUint16;
2060 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2061 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2062 opcode = kAtomicExchangeWord32;
2063 } else {
2064 UNREACHABLE();
2065 }
2066 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32,
2067 atomic_op.memory_access_kind);
2068}
2069
2070void InstructionSelectorT::VisitWord64AtomicExchange(OpIndex node) {
2071 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2072 ArchOpcode opcode;
2073 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2074 opcode = kAtomicExchangeUint8;
2075 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2076 opcode = kAtomicExchangeUint16;
2077 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2078 opcode = kAtomicExchangeWord32;
2079 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2080 opcode = kRiscvWord64AtomicExchangeUint64;
2081 } else {
2082 UNREACHABLE();
2083 }
2084 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64,
2085 atomic_op.memory_access_kind);
2086}
2087
2088void InstructionSelectorT::VisitWord32AtomicCompareExchange(OpIndex node) {
2089 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2090 ArchOpcode opcode;
2091 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2092 opcode = kAtomicCompareExchangeInt8;
2093 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2094 opcode = kAtomicCompareExchangeUint8;
2095 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2096 opcode = kAtomicCompareExchangeInt16;
2097 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2098 opcode = kAtomicCompareExchangeUint16;
2099 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2100 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2101 opcode = kAtomicCompareExchangeWord32;
2102 } else {
2103 UNREACHABLE();
2104 }
2105 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32,
2106 atomic_op.memory_access_kind);
2107}
2108
2109void InstructionSelectorT::VisitWord64AtomicCompareExchange(OpIndex node) {
2110 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2111 ArchOpcode opcode;
2112 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2113 opcode = kAtomicCompareExchangeUint8;
2114 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2115 opcode = kAtomicCompareExchangeUint16;
2116 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2117 opcode = kAtomicCompareExchangeWord32;
2118 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2119 opcode = kRiscvWord64AtomicCompareExchangeUint64;
2120 } else {
2121 UNREACHABLE();
2122 }
2123 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64,
2124 atomic_op.memory_access_kind);
2125}
2126
2127void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
2128 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2129 ArchOpcode uint16_op, ArchOpcode word32_op) {
2130 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2131 ArchOpcode opcode;
2132 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2133 opcode = int8_op;
2134 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2135 opcode = uint8_op;
2136 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2137 opcode = int16_op;
2138 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2139 opcode = uint16_op;
2140 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2141 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2142 opcode = word32_op;
2143 } else {
2144 UNREACHABLE();
2145 }
2146 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32,
2147 atomic_op.memory_access_kind);
2148}
2149
2150#define VISIT_ATOMIC_BINOP(op) \
2151 \
2152 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
2153 VisitWord32AtomicBinaryOperation( \
2154 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2155 kAtomic##op##Uint16, kAtomic##op##Word32); \
2156 }
2162#undef VISIT_ATOMIC_BINOP
2163
2164void InstructionSelectorT::VisitWord64AtomicBinaryOperation(
2165 OpIndex node, ArchOpcode uint8_op, ArchOpcode uint16_op,
2166 ArchOpcode uint32_op, ArchOpcode uint64_op) {
2167 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2168 ArchOpcode opcode;
2169 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2170 opcode = uint8_op;
2171 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2172 opcode = uint16_op;
2173 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2174 opcode = uint32_op;
2175 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2176 opcode = uint64_op;
2177 } else {
2178 UNREACHABLE();
2179 }
2180 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64,
2181 atomic_op.memory_access_kind);
2182}
2183
2184#define VISIT_ATOMIC_BINOP(op) \
2185 \
2186 void InstructionSelectorT::VisitWord64Atomic##op(OpIndex node) { \
2187 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
2188 kAtomic##op##Uint16, kAtomic##op##Word32, \
2189 kRiscvWord64Atomic##op##Uint64); \
2190 }
2196#undef VISIT_ATOMIC_BINOP
2197
2198void InstructionSelectorT::VisitInt32AbsWithOverflow(OpIndex node) {
2199 UNREACHABLE();
2200}
2201
2202void InstructionSelectorT::VisitInt64AbsWithOverflow(OpIndex node) {
2203 UNREACHABLE();
2204}
2205
2206void InstructionSelectorT::VisitSignExtendWord8ToInt64(OpIndex node) {
2207 RiscvOperandGeneratorT g(this);
2208 Emit(kRiscvSignExtendByte, g.DefineAsRegister(node),
2209 g.UseRegister(this->input_at(node, 0)));
2210}
2211
2212void InstructionSelectorT::VisitSignExtendWord16ToInt64(OpIndex node) {
2213 RiscvOperandGeneratorT g(this);
2214 Emit(kRiscvSignExtendShort, g.DefineAsRegister(node),
2215 g.UseRegister(this->input_at(node, 0)));
2216}
2217
2218void InstructionSelectorT::VisitSignExtendWord32ToInt64(OpIndex node) {
2219 EmitSignExtendWord(this, node);
2220}
2221
2222void InstructionSelectorT::VisitF64x2Min(OpIndex node) {
2223 RiscvOperandGeneratorT g(this);
2224 InstructionOperand temp1 = g.TempFpRegister(v0);
2225 InstructionOperand temp2 = g.TempFpRegister(kSimd128ScratchReg);
2226 InstructionOperand mask_reg = g.TempFpRegister(v0);
2227 this->Emit(kRiscvVmfeqVv, temp1, g.UseRegister(this->input_at(node, 0)),
2228 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
2229 g.UseImmediate(m1));
2230 this->Emit(kRiscvVmfeqVv, temp2, g.UseRegister(this->input_at(node, 1)),
2231 g.UseRegister(this->input_at(node, 1)), g.UseImmediate(E64),
2232 g.UseImmediate(m1));
2233 this->Emit(kRiscvVandVv, mask_reg, temp2, temp1, g.UseImmediate(E64),
2234 g.UseImmediate(m1));
2235
2236 InstructionOperand NaN = g.TempFpRegister(kSimd128ScratchReg);
2237 InstructionOperand result = g.TempFpRegister(kSimd128ScratchReg);
2238 this->Emit(kRiscvVmv, NaN, g.UseImmediate64(0x7ff8000000000000L),
2239 g.UseImmediate(E64), g.UseImmediate(m1));
2240 this->Emit(kRiscvVfminVv, result, g.UseRegister(this->input_at(node, 1)),
2241 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
2242 g.UseImmediate(m1), g.UseImmediate(MaskType::Mask));
2243 this->Emit(kRiscvVmv, g.DefineAsRegister(node), result, g.UseImmediate(E64),
2244 g.UseImmediate(m1));
2245}
2246
2247void InstructionSelectorT::VisitF64x2Max(OpIndex node) {
2248 RiscvOperandGeneratorT g(this);
2249 InstructionOperand temp1 = g.TempFpRegister(v0);
2250 InstructionOperand temp2 = g.TempFpRegister(kSimd128ScratchReg);
2251 InstructionOperand mask_reg = g.TempFpRegister(v0);
2252 this->Emit(kRiscvVmfeqVv, temp1, g.UseRegister(this->input_at(node, 0)),
2253 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
2254 g.UseImmediate(m1));
2255 this->Emit(kRiscvVmfeqVv, temp2, g.UseRegister(this->input_at(node, 1)),
2256 g.UseRegister(this->input_at(node, 1)), g.UseImmediate(E64),
2257 g.UseImmediate(m1));
2258 this->Emit(kRiscvVandVv, mask_reg, temp2, temp1, g.UseImmediate(E64),
2259 g.UseImmediate(m1));
2260
2261 InstructionOperand NaN = g.TempFpRegister(kSimd128ScratchReg);
2262 InstructionOperand result = g.TempFpRegister(kSimd128ScratchReg);
2263 this->Emit(kRiscvVmv, NaN, g.UseImmediate64(0x7ff8000000000000L),
2264 g.UseImmediate(E64), g.UseImmediate(m1));
2265 this->Emit(kRiscvVfmaxVv, result, g.UseRegister(this->input_at(node, 1)),
2266 g.UseRegister(this->input_at(node, 0)), g.UseImmediate(E64),
2267 g.UseImmediate(m1), g.UseImmediate(MaskType::Mask));
2268 this->Emit(kRiscvVmv, g.DefineAsRegister(node), result, g.UseImmediate(E64),
2269 g.UseImmediate(m1));
2270}
2271
2272//
2273// void InstructionSelectorT::Comment(const std::string msg){
2274// RiscvOperandGeneratorT g(this);
2275// if (!v8_flags.code_comments) return;
2276// int64_t length = msg.length() + 1;
2277// char* zone_buffer =
2278// reinterpret_cast<char*>(this->isolate()->array_buffer_allocator()->Allocate(length));
2279// memset(zone_buffer, '\0', length);
2280// MemCopy(zone_buffer, msg.c_str(), length);
2281// using ptrsize_int_t =
2282// std::conditional<kSystemPointerSize == 8, int64_t, int32_t>::type;
2283// InstructionOperand operand = this->sequence()->AddImmediate(
2284// Constant{reinterpret_cast<ptrsize_int_t>(zone_buffer)});
2285// InstructionOperand inputs[2];
2286// inputs[0] = operand;
2287// inputs[1] = g.UseImmediate64(length);
2288// this->Emit(kArchComment, 0, nullptr, 1, inputs);
2289// }
2290
2291// static
2292MachineOperatorBuilder::Flags
2293InstructionSelector::SupportedMachineOperatorFlags() {
2294 MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
2295 flags |= MachineOperatorBuilder::kWord32ShiftIsSafe |
2296 MachineOperatorBuilder::kInt32DivIsSafe |
2297 MachineOperatorBuilder::kUint32DivIsSafe |
2298 MachineOperatorBuilder::kFloat64RoundDown |
2299 MachineOperatorBuilder::kFloat32RoundDown |
2300 MachineOperatorBuilder::kFloat64RoundUp |
2301 MachineOperatorBuilder::kFloat32RoundUp |
2302 MachineOperatorBuilder::kFloat64RoundTruncate |
2303 MachineOperatorBuilder::kFloat32RoundTruncate |
2304 MachineOperatorBuilder::kFloat64RoundTiesEven |
2305 MachineOperatorBuilder::kFloat32RoundTiesEven;
2306 if (CpuFeatures::IsSupported(ZBB)) {
2307 flags |= MachineOperatorBuilder::kWord32Ctz |
2308 MachineOperatorBuilder::kWord64Ctz |
2309 MachineOperatorBuilder::kWord32Popcnt |
2310 MachineOperatorBuilder::kWord64Popcnt;
2311 }
2312 return flags;
2313}
2314
2315} // namespace compiler
2316} // namespace internal
2317} // namespace v8
Builtins::Kind kind
Definition builtins.cc:40
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr U encode(T value)
Definition bit-field.h:55
static bool IsSupported(CpuFeature f)
constexpr MachineRepresentation representation() const
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
FlagsCondition GetComparisonFlagCondition(const turboshaft::ComparisonOp &op) const
void EmitPrepareArguments(ZoneVector< PushParameter > *arguments, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand UseUniqueRegister(turboshaft::OpIndex node)
InstructionOperand UseRegister(turboshaft::OpIndex node)
bool CanBeImmediate(OpIndex node, InstructionCode mode)
MachineRepresentation representation() const
static constexpr FloatRepresentation Float32()
static constexpr FloatRepresentation Float64()
V8_INLINE const Operation & Get(OpIndex i) const
Definition graph.h:618
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation Float16()
static constexpr MemoryRepresentation AnyUncompressedTagged()
static constexpr MemoryRepresentation Uint32()
static constexpr MemoryRepresentation TaggedSigned()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Int64()
static constexpr MemoryRepresentation Simd128()
static constexpr MemoryRepresentation SandboxedPointer()
static constexpr MemoryRepresentation Uint16()
static constexpr MemoryRepresentation ProtectedPointer()
static constexpr MemoryRepresentation Uint8()
static constexpr MemoryRepresentation Simd256()
static constexpr MemoryRepresentation Int16()
static constexpr MemoryRepresentation IndirectPointer()
static constexpr MemoryRepresentation Float32()
static constexpr MemoryRepresentation Float64()
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
bool MatchIntegralWord64Constant(V< Any > matched, uint64_t *constant) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Float32()
static constexpr RegisterRepresentation Word64()
static constexpr RegisterRepresentation Tagged()
Handle< Code > code
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
other heap size flags(e.g. initial_heap_size) take precedence") DEFINE_SIZE_T( max_shared_heap_size
Isolate * isolate
int64_t immediate_
InstructionSelectorT * selector_
ArchOpcode opcode_
#define VISIT_ATOMIC_BINOP(op)
Node * node
Instruction * instr
ZoneVector< RpoNumber > & result
int m
Definition mul-fft.cc:294
int n
Definition mul-fft.cc:296
int int32_t
Definition unicode.cc:40
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float64(), RegisterRepresentation::Word64()> kTruncateFloat64ToInt64OverflowToMin
Definition opmasks.h:276
ConstantMask::For< ConstantOp::Kind::kWord32 > kWord32Constant
Definition opmasks.h:242
ChangeOpMask::For< ChangeOp::Kind::kSignExtend, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Word32(), RegisterRepresentation::Word64()> kChangeInt32ToInt64
Definition opmasks.h:267
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToInt32OverflowToMin
Definition opmasks.h:281
ShiftMask::For< ShiftOp::Kind::kShiftRightLogical, WordRepresentation::Word64()> kWord64ShiftRightLogical
Definition opmasks.h:232
ConstantMask::For< ConstantOp::Kind::kExternal > kExternalConstant
Definition opmasks.h:244
ChangeOpMask::For< ChangeOp::Kind::kZeroExtend, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Word32(), RegisterRepresentation::Word64()> kChangeUint32ToUint64
Definition opmasks.h:270
ChangeOpMask::For< ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToUint32OverflowToMin
Definition opmasks.h:286
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
WordWithBits< 64 > Word64
Definition index.h:224
FloatWithBits< 32 > Float32
Definition index.h:233
WordWithBits< 32 > Word32
Definition index.h:223
FloatWithBits< 64 > Float64
Definition index.h:234
void VisitAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode, AtomicWidth width)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
static void VisitRRO(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void EmitSignExtendWord(InstructionSelectorT *selector, OpIndex node)
void VisitRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
AtomicStoreParameters const & AtomicStoreParametersOf(Operator const *op)
bool TryEmitExtendingLoad(InstructionSelectorT *selector, OpIndex node, OpIndex output_node)
size_t AtomicWidthSize(AtomicWidth width)
void EmitS128Load(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, VSew sew, Vlmul lmul)
static Instruction * VisitCompare(InstructionSelectorT *selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuationT *cont)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
void EmitLoad(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, ImmediateMode immediate_mode, MachineRepresentation rep, OptionalOpIndex output={})
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kBitsPerByte
Definition globals.h:682
constexpr bool CanBeTaggedOrCompressedOrIndirectPointer(MachineRepresentation rep)
switch(set_by_)
Definition flags.cc:3669
Tagged(T object) -> Tagged< T >
constexpr bool CanBeTaggedOrCompressedPointer(MachineRepresentation rep)
Address Tagged_t
Definition globals.h:547
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
void EmitWordCompareZero(InstructionSelectorT *selector, OpIndex value, FlagsContinuationT *cont)
constexpr bool SmiValuesAre31Bits()
constexpr bool CanBeTaggedPointer(MachineRepresentation rep)
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
const int kCArgSlotCount
Operation
Definition operation.h:43
#define UNREACHABLE()
Definition logging.h:67
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define arraysize(array)
Definition macros.h:67
ExtendingLoadMatcher(OpIndex node, InstructionSelectorT *selector)
bool is_load_root_register(turboshaft::OpIndex node) const
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
underlying_operation_t< Op > & Cast()
Definition operations.h:980
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001
#define V8_LIKELY(condition)
Definition v8config.h:661