v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-selector-mips64.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <optional>
6
7#include "src/base/bits.h"
8#include "src/base/logging.h"
13
14namespace v8 {
15namespace internal {
16namespace compiler {
17
18using namespace turboshaft; // NOLINT(build/namespaces)
19
20#define TRACE(...) PrintF(__VA_ARGS__)
21
22// Adds Mips-specific methods for generating InstructionOperands.
24 public:
27
29 if (CanBeImmediate(node, opcode)) {
30 return UseImmediate(node);
31 }
32 return UseRegister(node);
33 }
34
35 // Use the zero register if the node has the immediate value zero, otherwise
36 // assign a register.
38 if (const ConstantOp* constant =
39 selector()->Get(node).TryCast<ConstantOp>()) {
40 if ((constant->IsIntegral() && constant->integral() == 0) ||
41 (constant->kind == ConstantOp::Kind::kFloat32 &&
42 constant->float32().get_bits() == 0) ||
43 (constant->kind == ConstantOp::Kind::kFloat64 &&
44 constant->float64().get_bits() == 0)) {
45 return UseImmediate(node);
46 }
47 }
48 return UseRegister(node);
49 }
50
52 int64_t unused;
53 return selector()->MatchSignedIntegralConstant(node, &unused);
54 }
55
56 std::optional<int64_t> GetOptionalIntegerConstant(OpIndex operation) {
57 if (int64_t constant; MatchSignedIntegralConstant(operation, &constant)) {
58 return constant;
59 }
60 return std::nullopt;
61 }
62
64 const ConstantOp* constant = selector()->Get(node).TryCast<ConstantOp>();
65 if (!constant) return false;
66
67 int64_t value;
68 return selector()->MatchSignedIntegralConstant(node, &value) &&
69 CanBeImmediate(value, mode);
70 }
71
72 bool CanBeImmediate(int64_t value, InstructionCode opcode) {
74 case kMips64Shl:
75 case kMips64Sar:
76 case kMips64Shr:
77 return is_uint5(value);
78 case kMips64Dshl:
79 case kMips64Dsar:
80 case kMips64Dshr:
81 return is_uint6(value);
82 case kMips64Add:
83 case kMips64And32:
84 case kMips64And:
85 case kMips64Dadd:
86 case kMips64Or32:
87 case kMips64Or:
88 case kMips64Tst:
89 case kMips64Xor:
90 return is_uint16(value);
91 case kMips64Lb:
92 case kMips64Lbu:
93 case kMips64Sb:
94 case kMips64Lh:
95 case kMips64Lhu:
96 case kMips64Sh:
97 case kMips64Lw:
98 case kMips64Sw:
99 case kMips64Ld:
100 case kMips64Sd:
101 case kMips64Lwc1:
102 case kMips64Swc1:
103 case kMips64Ldc1:
104 case kMips64Sdc1:
105 return is_int32(value);
106 default:
107 return is_int16(value);
108 }
109 }
110
111 private:
112 bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
113 TRACE("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__);
114 return false;
115 }
116};
117
118static void VisitRR(InstructionSelectorT* selector, ArchOpcode opcode,
119 OpIndex node) {
120 Mips64OperandGeneratorT g(selector);
121 selector->Emit(opcode, g.DefineAsRegister(node),
122 g.UseRegister(selector->input_at(node, 0)));
123}
124
125static void VisitRRI(InstructionSelectorT* selector, ArchOpcode opcode,
126 OpIndex node) {
128}
129
130static void VisitSimdShift(InstructionSelectorT* selector, ArchOpcode opcode,
131 OpIndex node) {
133}
134
135static void VisitRRIR(InstructionSelectorT* selector, ArchOpcode opcode,
136 OpIndex node) {
138}
139
140void VisitRRR(InstructionSelectorT* selector, ArchOpcode opcode, OpIndex node) {
141 Mips64OperandGeneratorT g(selector);
142 selector->Emit(opcode, g.DefineAsRegister(node),
143 g.UseRegister(selector->input_at(node, 0)),
144 g.UseRegister(selector->input_at(node, 1)));
145}
146
147static void VisitUniqueRRR(InstructionSelectorT* selector, ArchOpcode opcode,
148 OpIndex node) {
150}
151
152void VisitRRRR(InstructionSelectorT* selector, ArchOpcode opcode,
153 OpIndex node) {
155}
156
157static void VisitRRO(InstructionSelectorT* selector, ArchOpcode opcode,
158 OpIndex node) {
159 Mips64OperandGeneratorT g(selector);
160 selector->Emit(opcode, g.DefineAsRegister(node),
161 g.UseRegister(selector->input_at(node, 0)),
162 g.UseOperand(selector->input_at(node, 1), opcode));
163}
164
165struct ExtendingLoadMatcher {
167 : matches_(false), selector_(selector), immediate_(0) {
168 Initialize(node);
169 }
170
171 bool Matches() const { return matches_; }
172
173 OpIndex base() const {
174 DCHECK(Matches());
175 return base_;
176 }
177 int64_t immediate() const {
178 DCHECK(Matches());
179 return immediate_;
180 }
182 DCHECK(Matches());
183 return opcode_;
184 }
185
186 private:
187 bool matches_;
188 InstructionSelectorT* selector_;
189 OpIndex base_{};
190 int64_t immediate_;
191 ArchOpcode opcode_;
192
194 const ShiftOp& shift = selector_->Get(node).template Cast<ShiftOp>();
195 DCHECK(shift.kind == ShiftOp::Kind::kShiftRightArithmetic ||
196 shift.kind == ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros);
197 // When loading a 64-bit value and shifting by 32, we should
198 // just load and sign-extend the interesting 4 bytes instead.
199 // This happens, for example, when we're loading and untagging SMIs.
200 const Operation& lhs = selector_->Get(shift.left());
201 int64_t constant_rhs;
202
203 if (lhs.Is<LoadOp>() &&
204 selector_->MatchIntegralWord64Constant(shift.right(), &constant_rhs) &&
205 constant_rhs == 32 && selector_->CanCover(node, shift.left())) {
207
208 const LoadOp& load = lhs.Cast<LoadOp>();
209 base_ = load.base();
210 opcode_ = kMips64Lw;
211 if (load.index().has_value()) {
212 int64_t index_constant;
213 if (selector_->MatchIntegralWord64Constant(load.index().value(),
214 &index_constant)) {
215 DCHECK_EQ(load.element_size_log2, 0);
216 immediate_ = index_constant + 4;
217 matches_ = g.CanBeImmediate(immediate_, kMips64Lw);
218 }
219 } else {
220 immediate_ = load.offset + 4;
221 matches_ = g.CanBeImmediate(immediate_, kMips64Lw);
222 }
223 }
224 }
225};
226
227bool TryEmitExtendingLoad(InstructionSelectorT* selector, OpIndex node,
228 OpIndex output_node) {
229 ExtendingLoadMatcher m(node, selector);
230 Mips64OperandGeneratorT g(selector);
231 if (m.Matches()) {
232 InstructionOperand inputs[2];
233 inputs[0] = g.UseRegister(m.base());
234 InstructionCode opcode =
235 m.opcode() | AddressingModeField::encode(kMode_MRI);
236 DCHECK(is_int32(m.immediate()));
237 inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
238 InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
239 selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
240 inputs);
241 return true;
242 }
243 return false;
244}
245
246bool TryMatchImmediate(InstructionSelectorT* selector,
247 InstructionCode* opcode_return, OpIndex node,
248 size_t* input_count_return, InstructionOperand* inputs) {
249 Mips64OperandGeneratorT g(selector);
250 if (g.CanBeImmediate(node, *opcode_return)) {
251 *opcode_return |= AddressingModeField::encode(kMode_MRI);
252 inputs[0] = g.UseImmediate(node);
253 *input_count_return = 1;
254 return true;
255 }
256 return false;
257}
258
259static void VisitBinop(InstructionSelectorT* selector, OpIndex node,
260 InstructionCode opcode, bool has_reverse_opcode,
261 InstructionCode reverse_opcode,
262 FlagsContinuationT* cont) {
263 Mips64OperandGeneratorT g(selector);
264 InstructionOperand inputs[2];
265 size_t input_count = 0;
266 InstructionOperand outputs[1];
267 size_t output_count = 0;
268
269 const Operation& binop = selector->Get(node);
270 OpIndex left_node = binop.input(0);
271 OpIndex right_node = binop.input(1);
272
273 if (TryMatchImmediate(selector, &opcode, right_node, &input_count,
274 &inputs[1])) {
275 inputs[0] = g.UseRegister(left_node);
276 input_count++;
277 } else if (has_reverse_opcode &&
278 TryMatchImmediate(selector, &reverse_opcode, left_node,
279 &input_count, &inputs[1])) {
280 inputs[0] = g.UseRegister(right_node);
281 opcode = reverse_opcode;
282 input_count++;
283 } else {
284 inputs[input_count++] = g.UseRegister(left_node);
285 inputs[input_count++] = g.UseOperand(right_node, opcode);
286 }
287
288 outputs[output_count++] = g.DefineAsRegister(node);
289
290 DCHECK_NE(0u, input_count);
291 DCHECK_EQ(1u, output_count);
292 DCHECK_GE(arraysize(inputs), input_count);
293 DCHECK_GE(arraysize(outputs), output_count);
294
295 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
296 inputs, cont);
297}
298
299static void VisitBinop(InstructionSelectorT* selector, OpIndex node,
300 InstructionCode opcode, bool has_reverse_opcode,
301 InstructionCode reverse_opcode) {
303 VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
304}
305
306static void VisitBinop(InstructionSelectorT* selector, OpIndex node,
307 InstructionCode opcode, FlagsContinuationT* cont) {
308 VisitBinop(selector, node, opcode, false, kArchNop, cont);
309}
310
311static void VisitBinop(InstructionSelectorT* selector, OpIndex node,
312 InstructionCode opcode) {
313 VisitBinop(selector, node, opcode, false, kArchNop);
314}
315
316void InstructionSelectorT::VisitStackSlot(OpIndex node) {
317 const StackSlotOp& stack_slot = Cast<StackSlotOp>(node);
318 int slot = frame_->AllocateSpillSlot(stack_slot.size, stack_slot.alignment,
319 stack_slot.is_tagged);
320 OperandGenerator g(this);
321
322 Emit(kArchStackSlot, g.DefineAsRegister(node),
323 sequence()->AddImmediate(Constant(slot)), 0, nullptr);
324}
325
326void InstructionSelectorT::VisitAbortCSADcheck(OpIndex node) {
327 Mips64OperandGeneratorT g(this);
328 Emit(kArchAbortCSADcheck, g.NoOutput(),
329 g.UseFixed(this->input_at(node, 0), a0));
330}
331
332void EmitLoad(InstructionSelectorT* selector, OpIndex node,
333 InstructionCode opcode, OpIndex output = OpIndex{}) {
334 Mips64OperandGeneratorT g(selector);
335 const Operation& op = selector->Get(node);
336 const LoadOp& load = op.Cast<LoadOp>();
337
338 // The LoadStoreSimplificationReducer transforms all loads into
339 // *(base + index).
340 OpIndex base = load.base();
341 OpIndex index = load.index().value();
342 CHECK_EQ(load.offset, 0);
343 DCHECK_EQ(load.element_size_log2, 0);
344
345 InstructionOperand inputs[3];
346 size_t input_count = 0;
347 InstructionOperand output_op;
348
349 // If output is valid, use that as the output register. This is used when we
350 // merge a conversion into the load.
351 output_op = g.DefineAsRegister(output.valid() ? output : node);
352
353 const Operation& base_op = selector->Get(base);
354 int64_t index_value;
355 if (base_op.Is<Opmask::kExternalConstant>() &&
356 selector->MatchSignedIntegralConstant(index, &index_value)) {
357 const ConstantOp& constant_base = base_op.Cast<ConstantOp>();
358 if (selector->CanAddressRelativeToRootsRegister(
359 constant_base.external_reference())) {
360 ptrdiff_t const delta =
361 index_value +
363 selector->isolate(), constant_base.external_reference());
364 input_count = 1;
365 // Check that the delta is a 32-bit integer due to the limitations of
366 // immediate operands.
367 if (is_int32(delta)) {
368 inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
369 opcode |= AddressingModeField::encode(kMode_Root);
370 selector->Emit(opcode, 1, &output_op, input_count, inputs);
371 return;
372 }
373 }
374 }
375
376 if (base_op.Is<LoadRootRegisterOp>()) {
377 int64_t index_value;
378 CHECK(selector->MatchSignedIntegralConstant(index, &index_value));
379 input_count = 1;
380 inputs[0] = g.UseImmediate64(index_value);
381 opcode |= AddressingModeField::encode(kMode_Root);
382 selector->Emit(opcode, 1, &output_op, input_count, inputs);
383 return;
384 }
385
386 if (g.CanBeImmediate(index, opcode)) {
387 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
388 g.DefineAsRegister(output.valid() ? output : node),
389 g.UseRegister(base), g.UseImmediate(index));
390 } else {
391 InstructionOperand addr_reg = g.TempRegister();
392 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
393 addr_reg, g.UseRegister(index), g.UseRegister(base));
394 // Emit desired load opcode, using temp addr_reg.
395 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
396 g.DefineAsRegister(output.valid() ? output : node), addr_reg,
397 g.TempImmediate(0));
398 }
399}
400
401void InstructionSelectorT::VisitStoreLane(OpIndex node) { UNIMPLEMENTED(); }
402
403void InstructionSelectorT::VisitLoadLane(OpIndex node) { UNIMPLEMENTED(); }
404
406
408 auto load = this->load_view(node);
409 LoadRepresentation load_rep = load.loaded_rep();
410
411 InstructionCode opcode = kArchNop;
412 switch (load_rep.representation()) {
414 opcode = kMips64Lwc1;
415 break;
417 opcode = kMips64Ldc1;
418 break;
419 case MachineRepresentation::kBit: // Fall through.
421 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
422 break;
424 opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
425 break;
427 opcode = kMips64Lw;
428 break;
429 case MachineRepresentation::kTaggedSigned: // Fall through.
430 case MachineRepresentation::kTaggedPointer: // Fall through.
431 case MachineRepresentation::kTagged: // Fall through.
433 opcode = kMips64Ld;
434 break;
436 opcode = kMips64MsaLd;
437 break;
440 case MachineRepresentation::kSimd256: // Fall through.
441 case MachineRepresentation::kCompressedPointer: // Fall through.
442 case MachineRepresentation::kProtectedPointer: // Fall through.
443 case MachineRepresentation::kSandboxedPointer: // Fall through.
444 case MachineRepresentation::kCompressed: // Fall through.
445 case MachineRepresentation::kMapWord: // Fall through.
446 case MachineRepresentation::kIndirectPointer: // Fall through.
447 case MachineRepresentation::kFloat16RawBits: // Fall through.
449 UNREACHABLE();
450 }
451
452 EmitLoad(this, node, opcode);
453}
454
455void InstructionSelectorT::VisitProtectedLoad(OpIndex node) {
456 // TODO(eholk)
458}
459
460void InstructionSelectorT::VisitStorePair(OpIndex node) { UNREACHABLE(); }
461
462void InstructionSelectorT::VisitStore(OpIndex node) {
463 Mips64OperandGeneratorT g(this);
464 TurboshaftAdapter::StoreView store_view = this->store_view(node);
467 OpIndex index = this->value(store_view.index());
468 OpIndex value = store_view.value();
469
470 WriteBarrierKind write_barrier_kind =
473
474 if (v8_flags.enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
475 write_barrier_kind = kFullWriteBarrier;
476 }
477
478 // TODO(mips): I guess this could be done in a better way.
479 if (write_barrier_kind != kNoWriteBarrier &&
480 !v8_flags.disable_write_barriers) {
482 InstructionOperand inputs[3];
483 size_t input_count = 0;
484 inputs[input_count++] = g.UseUniqueRegister(base);
485 inputs[input_count++] = g.UseUniqueRegister(index);
486 inputs[input_count++] = g.UseUniqueRegister(value);
487 RecordWriteMode record_write_mode =
488 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
489 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
490 size_t const temp_count = arraysize(temps);
491 InstructionCode code = kArchStoreWithWriteBarrier;
492 code |= MiscField::encode(static_cast<int>(record_write_mode));
493 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
494 } else {
496 switch (rep) {
498 opcode = kMips64Swc1;
499 break;
501 opcode = kMips64Sdc1;
502 break;
503 case MachineRepresentation::kBit: // Fall through.
505 opcode = kMips64Sb;
506 break;
508 opcode = kMips64Sh;
509 break;
511 opcode = kMips64Sw;
512 break;
513 case MachineRepresentation::kTaggedSigned: // Fall through.
514 case MachineRepresentation::kTaggedPointer: // Fall through.
515 case MachineRepresentation::kTagged: // Fall through.
517 opcode = kMips64Sd;
518 break;
520 opcode = kMips64MsaSt;
521 break;
524 case MachineRepresentation::kSimd256: // Fall through.
525 case MachineRepresentation::kCompressedPointer: // Fall through.
526 case MachineRepresentation::kCompressed: // Fall through.
527 case MachineRepresentation::kProtectedPointer: // Fall through.
528 case MachineRepresentation::kSandboxedPointer: // Fall through.
529 case MachineRepresentation::kMapWord: // Fall through.
530 case MachineRepresentation::kIndirectPointer: // Fall through.
531 case MachineRepresentation::kFloat16RawBits: // Fall through.
533 UNREACHABLE();
534 }
535
536 if (this->is_load_root_register(base)) {
537 // This will only work if {index} is a constant.
538 Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(),
539 g.UseImmediate(index), g.UseRegisterOrImmediateZero(value));
540 return;
541 }
542
543 if (g.CanBeImmediate(index, opcode)) {
544 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
545 g.UseRegister(base), g.UseImmediate(index),
546 g.UseRegisterOrImmediateZero(value));
547 } else {
548 InstructionOperand addr_reg = g.TempRegister();
549 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
550 g.UseRegister(index), g.UseRegister(base));
551 // Emit desired store opcode, using temp addr_reg.
552 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
553 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
554 }
555 }
556}
557
558void InstructionSelectorT::VisitProtectedStore(OpIndex node) {
559 // TODO(eholk)
561}
562
563void InstructionSelectorT::VisitWord32And(turboshaft::OpIndex node) {
564 // TODO(MIPS_dev): May could be optimized like in Turbofan.
565 VisitBinop(this, node, kMips64And32, true, kMips64And32);
566}
567
568void InstructionSelectorT::VisitWord64And(OpIndex node) {
569 // TODO(MIPS_dev): May could be optimized like in Turbofan.
570 VisitBinop(this, node, kMips64And, true, kMips64And);
571}
572
573void InstructionSelectorT::VisitWord32Or(OpIndex node) {
574 VisitBinop(this, node, kMips64Or32, true, kMips64Or32);
575}
576
577void InstructionSelectorT::VisitWord64Or(OpIndex node) {
578 VisitBinop(this, node, kMips64Or, true, kMips64Or);
579}
580
581void InstructionSelectorT::VisitWord32Xor(OpIndex node) {
582 // TODO(MIPS_dev): May could be optimized like in Turbofan.
583 VisitBinop(this, node, kMips64Xor32, true, kMips64Xor32);
584}
585
586void InstructionSelectorT::VisitWord64Xor(OpIndex node) {
587 // TODO(MIPS_dev): May could be optimized like in Turbofan.
588 VisitBinop(this, node, kMips64Xor, true, kMips64Xor);
589}
590
591void InstructionSelectorT::VisitWord32Shl(OpIndex node) {
592 // TODO(MIPS_dev): May could be optimized like in Turbofan.
593 VisitRRO(this, kMips64Shl, node);
594}
595
596void InstructionSelectorT::VisitWord32Shr(OpIndex node) {
597 // TODO(MIPS_dev): May could be optimized like in Turbofan.
598 VisitRRO(this, kMips64Shr, node);
599}
600
601void InstructionSelectorT::VisitWord32Sar(turboshaft::OpIndex node) {
602 // TODO(MIPS_dev): May could be optimized like in Turbofan.
603 VisitRRO(this, kMips64Sar, node);
604}
605
606void InstructionSelectorT::VisitWord64Shl(OpIndex node) {
607 const ShiftOp& shift_op = this->Get(node).template Cast<ShiftOp>();
608 const Operation& lhs = this->Get(shift_op.left());
609 const Operation& rhs = this->Get(shift_op.right());
610 if ((lhs.Is<Opmask::kChangeInt32ToInt64>() ||
612 rhs.Is<Opmask::kWord32Constant>()) {
613 int64_t shift_by = rhs.Cast<ConstantOp>().signed_integral();
614 if (base::IsInRange(shift_by, 32, 63) && CanCover(node, shift_op.left())) {
615 Mips64OperandGeneratorT g(this);
616 // There's no need to sign/zero-extend to 64-bit if we shift out the
617 // upper 32 bits anyway.
618 Emit(kMips64Dshl, g.DefineAsRegister(node),
619 g.UseRegister(lhs.Cast<ChangeOp>().input()),
620 g.UseImmediate64(shift_by));
621 return;
622 }
623 }
624 VisitRRO(this, kMips64Dshl, node);
625}
626
627void InstructionSelectorT::VisitWord64Shr(OpIndex node) {
628 // TODO(MIPS_dev): May could be optimized like in Turbofan.
629 VisitRRO(this, kMips64Dshr, node);
630}
631
632void InstructionSelectorT::VisitWord64Sar(OpIndex node) {
633 if (TryEmitExtendingLoad(this, node, node)) return;
634
635 const ShiftOp& shiftop = Get(node).Cast<ShiftOp>();
636 const Operation& lhs = Get(shiftop.left());
637
638 int64_t constant_rhs;
639 if (lhs.Is<Opmask::kChangeInt32ToInt64>() &&
640 MatchIntegralWord64Constant(shiftop.right(), &constant_rhs) &&
641 is_uint5(constant_rhs) && CanCover(node, shiftop.left())) {
642 OpIndex input = lhs.Cast<ChangeOp>().input();
643 if (!Get(input).Is<LoadOp>() || !CanCover(shiftop.left(), input)) {
644 Mips64OperandGeneratorT g(this);
645 int right = static_cast<int>(constant_rhs);
646 Emit(kMips64Sar, g.DefineAsRegister(node), g.UseRegister(input),
647 g.UseImmediate(right));
648 return;
649 }
650 }
651
652 VisitRRO(this, kMips64Dsar, node);
653}
654
655void InstructionSelectorT::VisitWord32Rol(OpIndex node) { UNREACHABLE(); }
656
657void InstructionSelectorT::VisitWord64Rol(OpIndex node) { UNREACHABLE(); }
658
659void InstructionSelectorT::VisitWord32Ror(OpIndex node) {
660 VisitRRO(this, kMips64Ror, node);
661}
662
663void InstructionSelectorT::VisitWord32Clz(OpIndex node) {
664 VisitRR(this, kMips64Clz, node);
665}
666
667void InstructionSelectorT::VisitWord32ReverseBits(OpIndex node) {
668 UNREACHABLE();
669}
670
671void InstructionSelectorT::VisitWord64ReverseBits(OpIndex node) {
672 UNREACHABLE();
673}
674
675void InstructionSelectorT::VisitWord64ReverseBytes(OpIndex node) {
676 VisitRR(this, kMips64ByteSwap64, node);
677}
678
679void InstructionSelectorT::VisitWord32ReverseBytes(OpIndex node) {
680 VisitRR(this, kMips64ByteSwap32, node);
681}
682
683void InstructionSelectorT::VisitSimd128ReverseBytes(OpIndex node) {
684 UNREACHABLE();
685}
686
687void InstructionSelectorT::VisitWord32Ctz(OpIndex node) {
688 VisitRR(this, kMips64Ctz, node);
689}
690
691void InstructionSelectorT::VisitWord64Ctz(OpIndex node) {
692 VisitRR(this, kMips64Dctz, node);
693}
694
695void InstructionSelectorT::VisitWord32Popcnt(OpIndex node) {
696 VisitRR(this, kMips64Popcnt, node);
697}
698
699void InstructionSelectorT::VisitWord64Popcnt(OpIndex node) {
700 VisitRR(this, kMips64Dpopcnt, node);
701}
702
703void InstructionSelectorT::VisitWord64Ror(OpIndex node) {
704 VisitRRO(this, kMips64Dror, node);
705}
706
707void InstructionSelectorT::VisitWord64Clz(OpIndex node) {
708 VisitRR(this, kMips64Dclz, node);
709}
710
711void InstructionSelectorT::VisitInt32Add(OpIndex node) {
712 // TODO(MIPS_dev): May could be optimized like in Turbofan.
713 VisitBinop(this, node, kMips64Add, true, kMips64Add);
714}
715
716void InstructionSelectorT::VisitInt64Add(OpIndex node) {
717 // TODO(MIPS_dev): May could be optimized like in Turbofan.
718 VisitBinop(this, node, kMips64Dadd, true, kMips64Dadd);
719}
720
721void InstructionSelectorT::VisitInt32Sub(OpIndex node) {
722 VisitBinop(this, node, kMips64Sub);
723}
724
725void InstructionSelectorT::VisitInt64Sub(OpIndex node) {
726 VisitBinop(this, node, kMips64Dsub);
727}
728
729void InstructionSelectorT::VisitInt32Mul(OpIndex node) {
730 // TODO(MIPS_dev): May could be optimized like in Turbofan.
731 VisitBinop(this, node, kMips64Mul, true, kMips64Mul);
732}
733
734void InstructionSelectorT::VisitInt32MulHigh(OpIndex node) {
735 VisitRRR(this, kMips64MulHigh, node);
736}
737
738void InstructionSelectorT::VisitInt64MulHigh(OpIndex node) {
739 VisitRRR(this, kMips64DMulHigh, node);
740}
741
742void InstructionSelectorT::VisitUint32MulHigh(OpIndex node) {
743 VisitRRR(this, kMips64MulHighU, node);
744}
745
746void InstructionSelectorT::VisitUint64MulHigh(OpIndex node) {
747 VisitRRR(this, kMips64DMulHighU, node);
748}
749
750void InstructionSelectorT::VisitInt64Mul(OpIndex node) {
751 // TODO(MIPS_dev): May could be optimized like in Turbofan.
752 VisitBinop(this, node, kMips64Dmul, true, kMips64Dmul);
753}
754
755void InstructionSelectorT::VisitInt32Div(OpIndex node) {
756 Mips64OperandGeneratorT g(this);
757
758 auto [left, right] = Inputs<WordBinopOp>(node);
759 Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(left),
760 g.UseRegister(right));
761}
762
763void InstructionSelectorT::VisitUint32Div(OpIndex node) {
764 Mips64OperandGeneratorT g(this);
765
766 auto [left, right] = Inputs<WordBinopOp>(node);
767 Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(left),
768 g.UseRegister(right));
769}
770
771void InstructionSelectorT::VisitInt32Mod(OpIndex node) {
772 Mips64OperandGeneratorT g(this);
773
774 auto [left, right] = Inputs<WordBinopOp>(node);
775 Emit(kMips64Mod, g.DefineSameAsFirst(node), g.UseRegister(left),
776 g.UseRegister(right));
777}
778
779void InstructionSelectorT::VisitUint32Mod(OpIndex node) {
780 VisitRRR(this, kMips64ModU, node);
781}
782
783void InstructionSelectorT::VisitInt64Div(OpIndex node) {
784 Mips64OperandGeneratorT g(this);
785
786 auto [left, right] = Inputs<WordBinopOp>(node);
787 Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(left),
788 g.UseRegister(right));
789}
790
791void InstructionSelectorT::VisitUint64Div(OpIndex node) {
792 Mips64OperandGeneratorT g(this);
793
794 auto [left, right] = Inputs<WordBinopOp>(node);
795 Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(left),
796 g.UseRegister(right));
797}
798
799void InstructionSelectorT::VisitInt64Mod(OpIndex node) {
800 VisitRRR(this, kMips64Dmod, node);
801}
802
803void InstructionSelectorT::VisitUint64Mod(OpIndex node) {
804 VisitRRR(this, kMips64DmodU, node);
805}
806
807void InstructionSelectorT::VisitChangeFloat32ToFloat64(OpIndex node) {
808 VisitRR(this, kMips64CvtDS, node);
809}
810
811void InstructionSelectorT::VisitRoundInt32ToFloat32(OpIndex node) {
812 VisitRR(this, kMips64CvtSW, node);
813}
814
815void InstructionSelectorT::VisitRoundUint32ToFloat32(OpIndex node) {
816 VisitRR(this, kMips64CvtSUw, node);
817}
818
819void InstructionSelectorT::VisitChangeInt32ToFloat64(OpIndex node) {
820 VisitRR(this, kMips64CvtDW, node);
821}
822
823void InstructionSelectorT::VisitChangeInt64ToFloat64(OpIndex node) {
824 VisitRR(this, kMips64CvtDL, node);
825}
826
827void InstructionSelectorT::VisitChangeUint32ToFloat64(OpIndex node) {
828 VisitRR(this, kMips64CvtDUw, node);
829}
830
831void InstructionSelectorT::VisitTruncateFloat32ToInt32(OpIndex node) {
832 Mips64OperandGeneratorT g(this);
833
834 const Operation& op = this->Get(node);
835 InstructionCode opcode = kMips64TruncWS;
836 opcode |=
838 Emit(opcode, g.DefineAsRegister(node),
839 g.UseRegister(this->input_at(node, 0)));
840}
841
842void InstructionSelectorT::VisitTruncateFloat32ToUint32(OpIndex node) {
843 Mips64OperandGeneratorT g(this);
844
845 const Operation& op = this->Get(node);
846 InstructionCode opcode = kMips64TruncUwS;
848 opcode |= MiscField::encode(true);
849 }
850
851 Emit(opcode, g.DefineAsRegister(node),
852 g.UseRegister(this->input_at(node, 0)));
853}
854
855void InstructionSelectorT::VisitChangeFloat64ToInt32(OpIndex node) {
856 VisitRR(this, kMips64TruncWD, node);
857}
858
859void InstructionSelectorT::VisitChangeFloat64ToInt64(OpIndex node) {
860 VisitRR(this, kMips64TruncLD, node);
861}
862
863void InstructionSelectorT::VisitChangeFloat64ToUint32(OpIndex node) {
864 VisitRR(this, kMips64TruncUwD, node);
865}
866
867void InstructionSelectorT::VisitChangeFloat64ToUint64(OpIndex node) {
868 VisitRR(this, kMips64TruncUlD, node);
869}
870
871void InstructionSelectorT::VisitTruncateFloat64ToUint32(OpIndex node) {
872 VisitRR(this, kMips64TruncUwD, node);
873}
874
875void InstructionSelectorT::VisitTruncateFloat64ToInt64(OpIndex node) {
876 Mips64OperandGeneratorT g(this);
877
878 InstructionCode opcode = kMips64TruncLD;
879 const Operation& op = this->Get(node);
881 opcode |= MiscField::encode(true);
882 }
883
884 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)));
885}
886
887void InstructionSelectorT::VisitTruncateFloat64ToFloat16RawBits(OpIndex node) {
889}
890
891void InstructionSelectorT::VisitChangeFloat16RawBitsToFloat64(OpIndex node) {
893}
894
895void InstructionSelectorT::VisitTryTruncateFloat32ToInt64(OpIndex node) {
896 Mips64OperandGeneratorT g(this);
897
898 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
899 InstructionOperand outputs[2];
900 size_t output_count = 0;
901 outputs[output_count++] = g.DefineAsRegister(node);
902
903 OptionalOpIndex success_output = FindProjection(node, 1);
904 if (success_output.valid()) {
905 outputs[output_count++] = g.DefineAsRegister(success_output.value());
906 }
907
908 Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
909}
910
911void InstructionSelectorT::VisitTryTruncateFloat64ToInt64(OpIndex node) {
912 Mips64OperandGeneratorT g(this);
913
914 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
915 InstructionOperand outputs[2];
916 size_t output_count = 0;
917 outputs[output_count++] = g.DefineAsRegister(node);
918
919 OptionalOpIndex success_output = FindProjection(node, 1);
920 if (success_output.valid()) {
921 outputs[output_count++] = g.DefineAsRegister(success_output.value());
922 }
923
924 Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
925}
926
927void InstructionSelectorT::VisitTryTruncateFloat32ToUint64(OpIndex node) {
928 Mips64OperandGeneratorT g(this);
929
930 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
931 InstructionOperand outputs[2];
932 size_t output_count = 0;
933 outputs[output_count++] = g.DefineAsRegister(node);
934
935 OptionalOpIndex success_output = FindProjection(node, 1);
936 if (success_output.valid()) {
937 outputs[output_count++] = g.DefineAsRegister(success_output.value());
938 }
939
940 Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
941}
942
943void InstructionSelectorT::VisitTryTruncateFloat64ToUint64(OpIndex node) {
944 Mips64OperandGeneratorT g(this);
945
946 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
947 InstructionOperand outputs[2];
948 size_t output_count = 0;
949 outputs[output_count++] = g.DefineAsRegister(node);
950
951 OptionalOpIndex success_output = FindProjection(node, 1);
952 if (success_output.valid()) {
953 outputs[output_count++] = g.DefineAsRegister(success_output.value());
954 }
955
956 Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
957}
958
959void InstructionSelectorT::VisitTryTruncateFloat64ToInt32(OpIndex node) {
960 Mips64OperandGeneratorT g(this);
961
962 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
963 InstructionOperand outputs[2];
964 size_t output_count = 0;
965 outputs[output_count++] = g.DefineAsRegister(node);
966
967 OptionalOpIndex success_output = FindProjection(node, 1);
968 if (success_output.valid()) {
969 outputs[output_count++] = g.DefineAsRegister(success_output.value());
970 }
971
972 Emit(kMips64TruncWD, output_count, outputs, 1, inputs);
973}
974
975void InstructionSelectorT::VisitTryTruncateFloat64ToUint32(OpIndex node) {
976 Mips64OperandGeneratorT g(this);
977
978 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
979 InstructionOperand outputs[2];
980 size_t output_count = 0;
981 outputs[output_count++] = g.DefineAsRegister(node);
982
983 OptionalOpIndex success_output = FindProjection(node, 1);
984 if (success_output.valid()) {
985 outputs[output_count++] = g.DefineAsRegister(success_output.value());
986 }
987
988 Emit(kMips64TruncUwD, output_count, outputs, 1, inputs);
989}
990
991void InstructionSelectorT::VisitBitcastWord32ToWord64(OpIndex node) {
993}
994
995void InstructionSelectorT::VisitChangeInt32ToInt64(OpIndex node) {
996 Mips64OperandGeneratorT g(this);
997 const ChangeOp& change_op = this->Get(node).template Cast<ChangeOp>();
998 const Operation& input_op = this->Get(change_op.input());
999 if (input_op.Is<LoadOp>() && CanCover(node, change_op.input())) {
1000 // Generate sign-extending load.
1001 LoadRepresentation load_rep =
1002 this->load_view(change_op.input()).loaded_rep();
1003 MachineRepresentation rep = load_rep.representation();
1004 InstructionCode opcode = kArchNop;
1005 switch (rep) {
1006 case MachineRepresentation::kBit: // Fall through.
1008 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
1009 break;
1011 opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
1012 break;
1014 opcode = kMips64Lw;
1015 break;
1016 default:
1017 UNREACHABLE();
1018 }
1019 EmitLoad(this, change_op.input(), opcode, node);
1020 return;
1021 } else if (input_op.Is<Opmask::kWord32ShiftRightArithmetic>() &&
1022 CanCover(node, change_op.input())) {
1023 // TODO(MIPS_dev): May also optimize 'TruncateInt64ToInt32' here.
1024 EmitIdentity(node);
1025 }
1026 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(change_op.input()),
1027 g.TempImmediate(0));
1028}
1029
1030bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis(OpIndex node) {
1031 DCHECK(!this->Get(node).Is<PhiOp>());
1032 const Operation& op = this->Get(node);
1033 switch (op.opcode) {
1034 // Comparisons only emit 0/1, so the upper 32 bits must be zero.
1035 case Opcode::kComparison:
1036 return op.Cast<ComparisonOp>().rep == RegisterRepresentation::Word32();
1037 case Opcode::kOverflowCheckedBinop:
1038 return op.Cast<OverflowCheckedBinopOp>().rep ==
1040 case Opcode::kLoad: {
1041 auto load = this->load_view(node);
1042 LoadRepresentation load_rep = load.loaded_rep();
1043 if (load_rep.IsUnsigned()) {
1044 switch (load_rep.representation()) {
1045 case MachineRepresentation::kBit: // Fall through.
1046 case MachineRepresentation::kWord8: // Fall through.
1048 return true;
1049 default:
1050 return false;
1051 }
1052 }
1053 return false;
1054 }
1055 default:
1056 return false;
1057 }
1058}
1059
1060void InstructionSelectorT::VisitChangeUint32ToUint64(OpIndex node) {
1061 Mips64OperandGeneratorT g(this);
1062 const ChangeOp& change_op = this->Get(node).template Cast<ChangeOp>();
1063 OpIndex input = change_op.input();
1064 const Operation& input_op = this->Get(input);
1065
1066 if (input_op.Is<LoadOp>() && CanCover(node, input)) {
1067 // Generate zero-extending load.
1068 LoadRepresentation load_rep = this->load_view(input).loaded_rep();
1069 if (load_rep.IsUnsigned() &&
1070 load_rep.representation() == MachineRepresentation::kWord32) {
1071 EmitLoad(this, input, kMips64Lwu, node);
1072 return;
1073 }
1074 }
1075 if (ZeroExtendsWord32ToWord64(input)) {
1076 EmitIdentity(node);
1077 return;
1078 }
1079 Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(input),
1080 g.TempImmediate(0), g.TempImmediate(32));
1081}
1082
1083void InstructionSelectorT::VisitTruncateInt64ToInt32(OpIndex node) {
1084 Mips64OperandGeneratorT g(this);
1085 auto value = input_at(node, 0);
1086 if (CanCover(node, value)) {
1088 auto shift_value = input_at(value, 1);
1089 if (CanCover(value, input_at(value, 0)) &&
1090 TryEmitExtendingLoad(this, value, node)) {
1091 return;
1092 } else if (int64_t constant;
1093 MatchSignedIntegralConstant(shift_value, &constant)) {
1094 if (constant >= 32 && constant <= 63) {
1095 // After smi untagging no need for truncate. Combine sequence.
1096 Emit(kMips64Dsar, g.DefineAsRegister(node),
1097 g.UseRegister(input_at(value, 0)),
1098 g.UseImmediate(input_at(value, 1)));
1099 return;
1100 }
1101 }
1102 }
1103 }
1104 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
1105 g.TempImmediate(0));
1106}
1107
1108void InstructionSelectorT::VisitTruncateFloat64ToFloat32(OpIndex node) {
1109 VisitRR(this, kMips64CvtSD, node);
1110}
1111
1112void InstructionSelectorT::VisitTruncateFloat64ToWord32(OpIndex node) {
1113 VisitRR(this, kArchTruncateDoubleToI, node);
1114}
1115
1116void InstructionSelectorT::VisitRoundFloat64ToInt32(OpIndex node) {
1117 VisitRR(this, kMips64TruncWD, node);
1118}
1119
1120void InstructionSelectorT::VisitRoundInt64ToFloat32(OpIndex node) {
1121 VisitRR(this, kMips64CvtSL, node);
1122}
1123
1124void InstructionSelectorT::VisitRoundInt64ToFloat64(OpIndex node) {
1125 VisitRR(this, kMips64CvtDL, node);
1126}
1127
1128void InstructionSelectorT::VisitRoundUint64ToFloat32(OpIndex node) {
1129 VisitRR(this, kMips64CvtSUl, node);
1130}
1131
1132void InstructionSelectorT::VisitRoundUint64ToFloat64(OpIndex node) {
1133 VisitRR(this, kMips64CvtDUl, node);
1134}
1135
1136void InstructionSelectorT::VisitBitcastFloat32ToInt32(OpIndex node) {
1137 VisitRR(this, kMips64Float64ExtractLowWord32, node);
1138}
1139
1140void InstructionSelectorT::VisitBitcastFloat64ToInt64(OpIndex node) {
1141 VisitRR(this, kMips64BitcastDL, node);
1142}
1143
1144void InstructionSelectorT::VisitBitcastInt32ToFloat32(OpIndex node) {
1145 // when move lower 32 bits of general registers to 64-bit fpu registers on
1146 // mips64, the upper 32 bits of the fpu register is undefined. So we could
1147 // just move the whole 64 bits to fpu registers.
1148 VisitRR(this, kMips64BitcastLD, node);
1149}
1150
1151void InstructionSelectorT::VisitBitcastInt64ToFloat64(OpIndex node) {
1152 VisitRR(this, kMips64BitcastLD, node);
1153}
1154
1155void InstructionSelectorT::VisitFloat32Add(OpIndex node) {
1156 // Optimization with Madd.S(z, x, y) is intentionally removed.
1157 // See explanation for madd_s in assembler-mips64.cc.
1158 VisitRRR(this, kMips64AddS, node);
1159}
1160
1161void InstructionSelectorT::VisitFloat64Add(OpIndex node) {
1162 // Optimization with Madd.D(z, x, y) is intentionally removed.
1163 // See explanation for madd_d in assembler-mips64.cc.
1164 VisitRRR(this, kMips64AddD, node);
1165}
1166
1167void InstructionSelectorT::VisitFloat32Sub(OpIndex node) {
1168 // Optimization with Msub.S(z, x, y) is intentionally removed.
1169 // See explanation for madd_s in assembler-mips64.cc.
1170 VisitRRR(this, kMips64SubS, node);
1171}
1172
1173void InstructionSelectorT::VisitFloat64Sub(OpIndex node) {
1174 // Optimization with Msub.D(z, x, y) is intentionally removed.
1175 // See explanation for madd_d in assembler-mips64.cc.
1176 VisitRRR(this, kMips64SubD, node);
1177}
1178
1179void InstructionSelectorT::VisitFloat32Mul(OpIndex node) {
1180 VisitRRR(this, kMips64MulS, node);
1181}
1182
1183void InstructionSelectorT::VisitFloat64Mul(OpIndex node) {
1184 VisitRRR(this, kMips64MulD, node);
1185}
1186
1187void InstructionSelectorT::VisitFloat32Div(OpIndex node) {
1188 VisitRRR(this, kMips64DivS, node);
1189}
1190
1191void InstructionSelectorT::VisitFloat64Div(OpIndex node) {
1192 VisitRRR(this, kMips64DivD, node);
1193}
1194
1195void InstructionSelectorT::VisitFloat64Mod(OpIndex node) {
1196 Mips64OperandGeneratorT g(this);
1197 Emit(kMips64ModD, g.DefineAsFixed(node, f0),
1198 g.UseFixed(this->input_at(node, 0), f12),
1199 g.UseFixed(this->input_at(node, 1), f14))
1200 ->MarkAsCall();
1201}
1202
1203void InstructionSelectorT::VisitFloat32Max(OpIndex node) {
1204 VisitRRR(this, kMips64Float32Max, node);
1205}
1206
1207void InstructionSelectorT::VisitFloat64Max(OpIndex node) {
1208 VisitRRR(this, kMips64Float64Max, node);
1209}
1210
1211void InstructionSelectorT::VisitFloat32Min(OpIndex node) {
1212 VisitRRR(this, kMips64Float32Min, node);
1213}
1214
1215void InstructionSelectorT::VisitFloat64Min(OpIndex node) {
1216 VisitRRR(this, kMips64Float64Min, node);
1217}
1218
1219void InstructionSelectorT::VisitFloat32Abs(OpIndex node) {
1220 VisitRR(this, kMips64AbsS, node);
1221}
1222
1223void InstructionSelectorT::VisitFloat64Abs(OpIndex node) {
1224 VisitRR(this, kMips64AbsD, node);
1225}
1226
1227void InstructionSelectorT::VisitFloat32Sqrt(OpIndex node) {
1228 VisitRR(this, kMips64SqrtS, node);
1229}
1230
1231void InstructionSelectorT::VisitFloat64Sqrt(OpIndex node) {
1232 VisitRR(this, kMips64SqrtD, node);
1233}
1234
1235void InstructionSelectorT::VisitFloat32RoundDown(OpIndex node) {
1236 VisitRR(this, kMips64Float32RoundDown, node);
1237}
1238
1239void InstructionSelectorT::VisitFloat64RoundDown(OpIndex node) {
1240 VisitRR(this, kMips64Float64RoundDown, node);
1241}
1242
1243void InstructionSelectorT::VisitFloat32RoundUp(OpIndex node) {
1244 VisitRR(this, kMips64Float32RoundUp, node);
1245}
1246
1247void InstructionSelectorT::VisitFloat64RoundUp(OpIndex node) {
1248 VisitRR(this, kMips64Float64RoundUp, node);
1249}
1250
1251void InstructionSelectorT::VisitFloat32RoundTruncate(OpIndex node) {
1252 VisitRR(this, kMips64Float32RoundTruncate, node);
1253}
1254
1255void InstructionSelectorT::VisitFloat64RoundTruncate(OpIndex node) {
1256 VisitRR(this, kMips64Float64RoundTruncate, node);
1257}
1258
1259void InstructionSelectorT::VisitFloat64RoundTiesAway(OpIndex node) {
1260 UNREACHABLE();
1261}
1262
1263void InstructionSelectorT::VisitFloat32RoundTiesEven(OpIndex node) {
1264 VisitRR(this, kMips64Float32RoundTiesEven, node);
1265}
1266
1267void InstructionSelectorT::VisitFloat64RoundTiesEven(OpIndex node) {
1268 VisitRR(this, kMips64Float64RoundTiesEven, node);
1269}
1270
1271void InstructionSelectorT::VisitFloat32Neg(OpIndex node) {
1272 VisitRR(this, kMips64NegS, node);
1273}
1274
1275void InstructionSelectorT::VisitFloat64Neg(OpIndex node) {
1276 VisitRR(this, kMips64NegD, node);
1277}
1278
1280 InstructionCode opcode) {
1281 Mips64OperandGeneratorT g(this);
1282 Emit(opcode, g.DefineAsFixed(node, f0),
1283 g.UseFixed(this->input_at(node, 0), f2),
1284 g.UseFixed(this->input_at(node, 1), f4))
1285 ->MarkAsCall();
1286}
1287
1289 InstructionCode opcode) {
1290 Mips64OperandGeneratorT g(this);
1291 Emit(opcode, g.DefineAsFixed(node, f0),
1292 g.UseFixed(this->input_at(node, 0), f12))
1293 ->MarkAsCall();
1294}
1295
1297
1298void InstructionSelectorT::EmitMoveFPRToParam(InstructionOperand* op,
1299 LinkageLocation location) {}
1300
1302 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1303 OpIndex node) {
1304 Mips64OperandGeneratorT g(this);
1305
1306 // Prepare for C function call.
1307 if (call_descriptor->IsCFunctionCall()) {
1308 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1309 call_descriptor->ParameterCount())),
1310 0, nullptr, 0, nullptr);
1311
1312 // Poke any stack arguments.
1313 int slot = kCArgSlotCount;
1314 for (PushParameter input : (*arguments)) {
1315 Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1316 g.TempImmediate(slot << kSystemPointerSizeLog2));
1317 ++slot;
1318 }
1319 } else {
1320 int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
1321 if (push_count > 0) {
1322 // Calculate needed space
1323 int stack_size = 0;
1324 for (PushParameter input : (*arguments)) {
1325 if (input.node.valid()) {
1326 stack_size += input.location.GetSizeInPointers();
1327 }
1328 }
1329 Emit(kMips64StackClaim, g.NoOutput(),
1330 g.TempImmediate(stack_size << kSystemPointerSizeLog2));
1331 }
1332 for (size_t n = 0; n < arguments->size(); ++n) {
1333 PushParameter input = (*arguments)[n];
1334 if (input.node.valid()) {
1335 Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1336 g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
1337 }
1338 }
1339 }
1340}
1341
1343 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1344 OpIndex node) {
1345 Mips64OperandGeneratorT g(this);
1346
1347 for (PushParameter output : *results) {
1348 if (!output.location.IsCallerFrameSlot()) continue;
1349 // Skip any alignment holes in nodes.
1350 if (output.node.valid()) {
1351 DCHECK(!call_descriptor->IsCFunctionCall());
1352 if (output.location.GetType() == MachineType::Float32()) {
1353 MarkAsFloat32(output.node);
1354 } else if (output.location.GetType() == MachineType::Float64()) {
1355 MarkAsFloat64(output.node);
1356 } else if (output.location.GetType() == MachineType::Simd128()) {
1357 MarkAsSimd128(output.node);
1358 }
1359 int offset = call_descriptor->GetOffsetToReturns();
1360 int reverse_slot = -output.location.GetLocation() - offset;
1361 Emit(kMips64Peek, g.DefineAsRegister(output.node),
1362 g.UseImmediate(reverse_slot));
1363 }
1364 }
1365}
1366
1368
1369void InstructionSelectorT::VisitUnalignedLoad(OpIndex node) {
1370 auto load = this->load_view(node);
1371 LoadRepresentation load_rep = load.loaded_rep();
1372
1373 InstructionCode opcode = kArchNop;
1374 switch (load_rep.representation()) {
1376 opcode = kMips64Ulwc1;
1377 break;
1379 opcode = kMips64Uldc1;
1380 break;
1382 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
1383 break;
1385 opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
1386 break;
1388 opcode = kMips64Ulw;
1389 break;
1390 case MachineRepresentation::kTaggedSigned: // Fall through.
1391 case MachineRepresentation::kTaggedPointer: // Fall through.
1392 case MachineRepresentation::kTagged: // Fall through.
1394 opcode = kMips64Uld;
1395 break;
1397 opcode = kMips64MsaLd;
1398 break;
1400 UNIMPLEMENTED();
1401 case MachineRepresentation::kSimd256: // Fall through.
1402 case MachineRepresentation::kBit: // Fall through.
1403 case MachineRepresentation::kCompressedPointer: // Fall through.
1404 case MachineRepresentation::kCompressed: // Fall through.
1405 case MachineRepresentation::kProtectedPointer: // Fall through.
1406 case MachineRepresentation::kSandboxedPointer: // Fall through.
1407 case MachineRepresentation::kMapWord: // Fall through.
1408 case MachineRepresentation::kIndirectPointer: // Fall through.
1409 case MachineRepresentation::kFloat16RawBits: // Fall through.
1411 UNREACHABLE();
1412 }
1413
1414 EmitLoad(this, node, opcode);
1415}
1416
1417void InstructionSelectorT::VisitUnalignedStore(OpIndex node) {
1418 Mips64OperandGeneratorT g(this);
1419 TurboshaftAdapter::StoreView store_view = this->store_view(node);
1422 OpIndex index = this->value(store_view.index());
1423 OpIndex value = store_view.value();
1424
1426
1428 switch (rep) {
1430 opcode = kMips64Uswc1;
1431 break;
1433 opcode = kMips64Usdc1;
1434 break;
1436 opcode = kMips64Sb;
1437 break;
1439 opcode = kMips64Ush;
1440 break;
1442 opcode = kMips64Usw;
1443 break;
1444 case MachineRepresentation::kTaggedSigned: // Fall through.
1445 case MachineRepresentation::kTaggedPointer: // Fall through.
1446 case MachineRepresentation::kTagged: // Fall through.
1448 opcode = kMips64Usd;
1449 break;
1451 opcode = kMips64MsaSt;
1452 break;
1454 UNIMPLEMENTED();
1455 case MachineRepresentation::kSimd256: // Fall through.
1456 case MachineRepresentation::kBit: // Fall through.
1457 case MachineRepresentation::kCompressedPointer: // Fall through.
1458 case MachineRepresentation::kCompressed: // Fall through.
1459 case MachineRepresentation::kProtectedPointer: // Fall through.
1460 case MachineRepresentation::kSandboxedPointer: // Fall through.
1461 case MachineRepresentation::kMapWord: // Fall through.
1462 case MachineRepresentation::kIndirectPointer: // Fall through.
1463 case MachineRepresentation::kFloat16RawBits: // Fall through.
1465 UNREACHABLE();
1466 }
1467
1468 if (g.CanBeImmediate(index, opcode)) {
1469 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1470 g.UseRegister(base), g.UseImmediate(index),
1471 g.UseRegisterOrImmediateZero(value));
1472 } else {
1473 InstructionOperand addr_reg = g.TempRegister();
1474 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1475 g.UseRegister(index), g.UseRegister(base));
1476 // Emit desired store opcode, using temp addr_reg.
1477 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1478 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1479 }
1480}
1481
1482namespace {
1483
1484// Shared routine for multiple compare operations.
1485static Instruction* VisitCompare(InstructionSelectorT* selector,
1486 InstructionCode opcode,
1487 InstructionOperand left,
1488 InstructionOperand right,
1489 FlagsContinuationT* cont) {
1490 return selector->EmitWithContinuation(opcode, left, right, cont);
1491}
1492
1493// Shared routine for multiple float32 compare operations.
1494void VisitFloat32Compare(InstructionSelectorT* selector, OpIndex node,
1495 FlagsContinuationT* cont) {
1496 Mips64OperandGeneratorT g(selector);
1497 const ComparisonOp& op = selector->Get(node).template Cast<ComparisonOp>();
1498 OpIndex left = op.left();
1499 OpIndex right = op.right();
1500 InstructionOperand lhs, rhs;
1501
1502 lhs = selector->MatchZero(left) ? g.UseImmediate(left) : g.UseRegister(left);
1503 rhs =
1504 selector->MatchZero(right) ? g.UseImmediate(right) : g.UseRegister(right);
1505 VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
1506}
1507
1508// Shared routine for multiple float64 compare operations.
1509void VisitFloat64Compare(InstructionSelectorT* selector, OpIndex node,
1510 FlagsContinuationT* cont) {
1511 Mips64OperandGeneratorT g(selector);
1512 const Operation& compare = selector->Get(node);
1513 DCHECK(compare.Is<ComparisonOp>());
1514 OpIndex lhs = compare.input(0);
1515 OpIndex rhs = compare.input(1);
1516 if (selector->MatchZero(rhs)) {
1517 VisitCompare(selector, kMips64CmpD, g.UseRegister(lhs), g.UseImmediate(rhs),
1518 cont);
1519 } else if (selector->MatchZero(lhs)) {
1520 VisitCompare(selector, kMips64CmpD, g.UseImmediate(lhs), g.UseRegister(rhs),
1521 cont);
1522 } else {
1523 VisitCompare(selector, kMips64CmpD, g.UseRegister(lhs), g.UseRegister(rhs),
1524 cont);
1525 }
1526}
1527
1528// Shared routine for multiple word compare operations.
1529Instruction* VisitWordCompare(InstructionSelectorT* selector, OpIndex node,
1530 InstructionCode opcode, FlagsContinuationT* cont,
1531 bool commutative) {
1532 Mips64OperandGeneratorT g(selector);
1533 DCHECK_EQ(selector->value_input_count(node), 2);
1534 auto left = selector->input_at(node, 0);
1535 auto right = selector->input_at(node, 1);
1536
1537 // Match immediates on left or right side of comparison.
1538 if (g.CanBeImmediate(right, opcode)) {
1539 if (opcode == kMips64Tst) {
1540 return VisitCompare(selector, opcode, g.UseRegister(left),
1541 g.UseImmediate(right), cont);
1542 } else {
1543 switch (cont->condition()) {
1544 case kEqual:
1545 case kNotEqual:
1546 if (cont->IsSet()) {
1547 return VisitCompare(selector, opcode, g.UseRegister(left),
1548 g.UseImmediate(right), cont);
1549 } else {
1550 return VisitCompare(selector, opcode, g.UseRegister(left),
1551 g.UseRegister(right), cont);
1552 }
1553 case kSignedLessThan:
1555 case kUnsignedLessThan:
1557 return VisitCompare(selector, opcode, g.UseRegister(left),
1558 g.UseImmediate(right), cont);
1559 default:
1560 return VisitCompare(selector, opcode, g.UseRegister(left),
1561 g.UseRegister(right), cont);
1562 }
1563 }
1564 } else if (g.CanBeImmediate(left, opcode)) {
1565 if (!commutative) cont->Commute();
1566 if (opcode == kMips64Tst) {
1567 return VisitCompare(selector, opcode, g.UseRegister(right),
1568 g.UseImmediate(left), cont);
1569 } else {
1570 switch (cont->condition()) {
1571 case kEqual:
1572 case kNotEqual:
1573 if (cont->IsSet()) {
1574 return VisitCompare(selector, opcode, g.UseRegister(right),
1575 g.UseImmediate(left), cont);
1576 } else {
1577 return VisitCompare(selector, opcode, g.UseRegister(right),
1578 g.UseRegister(left), cont);
1579 }
1580 case kSignedLessThan:
1582 case kUnsignedLessThan:
1584 return VisitCompare(selector, opcode, g.UseRegister(right),
1585 g.UseImmediate(left), cont);
1586 default:
1587 return VisitCompare(selector, opcode, g.UseRegister(right),
1588 g.UseRegister(left), cont);
1589 }
1590 }
1591 } else {
1592 return VisitCompare(selector, opcode, g.UseRegister(left),
1593 g.UseRegister(right), cont);
1594 }
1595}
1596
1597// Shared routine for multiple word compare operations.
1598void VisitFullWord32Compare(InstructionSelectorT* selector, OpIndex node,
1599 InstructionCode opcode, FlagsContinuationT* cont) {
1600 Mips64OperandGeneratorT g(selector);
1601 InstructionOperand leftOp = g.TempRegister();
1602 InstructionOperand rightOp = g.TempRegister();
1603
1604 selector->Emit(kMips64Dshl, leftOp,
1605 g.UseRegister(selector->input_at(node, 0)),
1606 g.TempImmediate(32));
1607 selector->Emit(kMips64Dshl, rightOp,
1608 g.UseRegister(selector->input_at(node, 1)),
1609 g.TempImmediate(32));
1610
1611 Instruction* instr = VisitCompare(selector, opcode, leftOp, rightOp, cont);
1612 selector->UpdateSourcePosition(instr, node);
1613}
1614
1615void VisitWord32Compare(InstructionSelectorT* selector, OpIndex node,
1616 FlagsContinuationT* cont) {
1617 VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
1618}
1619
1620void VisitWord64Compare(InstructionSelectorT* selector, OpIndex node,
1621 FlagsContinuationT* cont) {
1622 VisitWordCompare(selector, node, kMips64Cmp, cont, false);
1623}
1624
1625void EmitWordCompareZero(InstructionSelectorT* selector, OpIndex value,
1626 FlagsContinuationT* cont) {
1627 Mips64OperandGeneratorT g(selector);
1628 selector->EmitWithContinuation(kMips64Cmp, g.UseRegister(value),
1629 g.TempImmediate(0), cont);
1630}
1631
1632void VisitAtomicLoad(InstructionSelectorT* selector, OpIndex node,
1633 AtomicWidth width) {
1634 using OpIndex = OpIndex;
1635 Mips64OperandGeneratorT g(selector);
1636 auto load = selector->load_view(node);
1637 OpIndex base = load.base();
1638 OpIndex index = load.index();
1639
1640 // The memory order is ignored.
1641 LoadRepresentation load_rep = load.loaded_rep();
1643 switch (load_rep.representation()) {
1645 DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
1646 code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
1647 break;
1649 DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
1650 code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
1651 break;
1653 code = kAtomicLoadWord32;
1654 break;
1656 code = kMips64Word64AtomicLoadUint64;
1657 break;
1658 case MachineRepresentation::kTaggedSigned: // Fall through.
1659 case MachineRepresentation::kTaggedPointer: // Fall through.
1662 code = kMips64Word64AtomicLoadUint64;
1663 break;
1664 default:
1665 UNREACHABLE();
1666 }
1667
1668 if (g.CanBeImmediate(index, code)) {
1669 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
1671 g.DefineAsRegister(node), g.UseRegister(base),
1672 g.UseImmediate(index));
1673 } else {
1674 InstructionOperand addr_reg = g.TempRegister();
1675 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
1676 addr_reg, g.UseRegister(index), g.UseRegister(base));
1677 // Emit desired load opcode, using temp addr_reg.
1678 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
1680 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1681 }
1682}
1683
1684AtomicStoreParameters AtomicStoreParametersOf(InstructionSelectorT* selector,
1685 OpIndex node) {
1686 auto store = selector->store_view(node);
1687 return AtomicStoreParameters(store.stored_rep().representation(),
1688 store.stored_rep().write_barrier_kind(),
1689 store.memory_order().value(),
1690 store.access_kind());
1691}
1692
1693void VisitAtomicStore(InstructionSelectorT* selector, OpIndex node,
1694 AtomicWidth width) {
1695 using OpIndex = OpIndex;
1696 Mips64OperandGeneratorT g(selector);
1697 auto store = selector->store_view(node);
1698 OpIndex base = store.base();
1699 OpIndex index = selector->value(store.index());
1700 OpIndex value = store.value();
1701 DCHECK_EQ(store.displacement(), 0);
1702
1703 // The memory order is ignored.
1704 AtomicStoreParameters store_params = AtomicStoreParametersOf(selector, node);
1705 WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
1706 MachineRepresentation rep = store_params.representation();
1707
1708 if (v8_flags.enable_unconditional_write_barriers &&
1710 write_barrier_kind = kFullWriteBarrier;
1711 }
1712
1714
1715 if (write_barrier_kind != kNoWriteBarrier &&
1716 !v8_flags.disable_write_barriers) {
1719
1720 InstructionOperand inputs[3];
1721 size_t input_count = 0;
1722 inputs[input_count++] = g.UseUniqueRegister(base);
1723 inputs[input_count++] = g.UseUniqueRegister(index);
1724 inputs[input_count++] = g.UseUniqueRegister(value);
1725 RecordWriteMode record_write_mode =
1726 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
1727 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
1728 size_t const temp_count = arraysize(temps);
1729 code = kArchAtomicStoreWithWriteBarrier;
1730 code |= RecordWriteModeField::encode(record_write_mode);
1731 selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
1732 } else {
1733 switch (rep) {
1735 code = kAtomicStoreWord8;
1736 break;
1738 code = kAtomicStoreWord16;
1739 break;
1741 code = kAtomicStoreWord32;
1742 break;
1745 code = kMips64Word64AtomicStoreWord64;
1746 break;
1747 case MachineRepresentation::kTaggedSigned: // Fall through.
1748 case MachineRepresentation::kTaggedPointer: // Fall through.
1751 code = kMips64StoreCompressTagged;
1752 break;
1753 default:
1754 UNREACHABLE();
1755 }
1756 code |= AtomicWidthField::encode(width);
1757
1758 if (g.CanBeImmediate(index, code)) {
1759 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
1761 g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
1762 g.UseRegisterOrImmediateZero(value));
1763 } else {
1764 InstructionOperand addr_reg = g.TempRegister();
1765 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
1766 addr_reg, g.UseRegister(index), g.UseRegister(base));
1767 // Emit desired store opcode, using temp addr_reg.
1768 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
1770 g.NoOutput(), addr_reg, g.TempImmediate(0),
1771 g.UseRegisterOrImmediateZero(value));
1772 }
1773 }
1774}
1775
1776void VisitAtomicExchange(InstructionSelectorT* selector, OpIndex node,
1777 ArchOpcode opcode, AtomicWidth width) {
1778 using OpIndex = OpIndex;
1779 Mips64OperandGeneratorT g(selector);
1780 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
1781 OpIndex base = atomic_op.base();
1782 OpIndex index = atomic_op.index();
1783 OpIndex value = atomic_op.value();
1784
1785 AddressingMode addressing_mode = kMode_MRI;
1786 InstructionOperand inputs[3];
1787 size_t input_count = 0;
1788 inputs[input_count++] = g.UseUniqueRegister(base);
1789 inputs[input_count++] = g.UseUniqueRegister(index);
1790 inputs[input_count++] = g.UseUniqueRegister(value);
1791 InstructionOperand outputs[1];
1792 outputs[0] = g.UseUniqueRegister(node);
1793 InstructionOperand temp[3];
1794 temp[0] = g.TempRegister();
1795 temp[1] = g.TempRegister();
1796 temp[2] = g.TempRegister();
1797 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
1799 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
1800}
1801
1802void VisitAtomicCompareExchange(InstructionSelectorT* selector, OpIndex node,
1803 ArchOpcode opcode, AtomicWidth width) {
1804 using OpIndex = OpIndex;
1805 Mips64OperandGeneratorT g(selector);
1806 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
1807 OpIndex base = atomic_op.base();
1808 OpIndex index = atomic_op.index();
1809 OpIndex old_value = atomic_op.expected().value();
1810 OpIndex new_value = atomic_op.value();
1811
1812 AddressingMode addressing_mode = kMode_MRI;
1813 InstructionOperand inputs[4];
1814 size_t input_count = 0;
1815 inputs[input_count++] = g.UseUniqueRegister(base);
1816 inputs[input_count++] = g.UseUniqueRegister(index);
1817 inputs[input_count++] = g.UseUniqueRegister(old_value);
1818 inputs[input_count++] = g.UseUniqueRegister(new_value);
1819 InstructionOperand outputs[1];
1820 outputs[0] = g.UseUniqueRegister(node);
1821 InstructionOperand temp[3];
1822 temp[0] = g.TempRegister();
1823 temp[1] = g.TempRegister();
1824 temp[2] = g.TempRegister();
1825 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
1827 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
1828}
1829
1830void VisitAtomicBinop(InstructionSelectorT* selector, OpIndex node,
1831 ArchOpcode opcode, AtomicWidth width) {
1832 using OpIndex = OpIndex;
1833 Mips64OperandGeneratorT g(selector);
1834 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
1835 OpIndex base = atomic_op.base();
1836 OpIndex index = atomic_op.index();
1837 OpIndex value = atomic_op.value();
1838
1839 AddressingMode addressing_mode = kMode_MRI;
1840 InstructionOperand inputs[3];
1841 size_t input_count = 0;
1842 inputs[input_count++] = g.UseUniqueRegister(base);
1843 inputs[input_count++] = g.UseUniqueRegister(index);
1844 inputs[input_count++] = g.UseUniqueRegister(value);
1845 InstructionOperand outputs[1];
1846 outputs[0] = g.UseUniqueRegister(node);
1847 InstructionOperand temps[4];
1848 temps[0] = g.TempRegister();
1849 temps[1] = g.TempRegister();
1850 temps[2] = g.TempRegister();
1851 temps[3] = g.TempRegister();
1852 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
1854 selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
1855}
1856
1857} // namespace
1858
1860 OpIndex node, FlagsContinuationT* cont) {
1862 OpIndex value;
1863 const auto& op = this->turboshaft_graph()
1864 ->Get(node)
1866 kind = op.kind;
1867 value = op.stack_limit();
1869 kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
1870
1871 Mips64OperandGeneratorT g(this);
1872
1873 // No outputs.
1874 InstructionOperand* const outputs = nullptr;
1875 const int output_count = 0;
1876
1877 // TempRegister(0) is used to store the comparison result.
1878 // Applying an offset to this stack check requires a temp register. Offsets
1879 // are only applied to the first stack check. If applying an offset, we must
1880 // ensure the input and temp registers do not alias, thus kUniqueRegister.
1881 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
1882 const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1);
1883 const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
1886
1887 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
1888 static constexpr int input_count = arraysize(inputs);
1889
1890 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
1891 temp_count, temps, cont);
1892}
1893
1894// Shared routine for word comparisons against zero.
1896 FlagsContinuation* cont) {
1897 {
1898 Mips64OperandGeneratorT g(this);
1899 // Try to combine with comparisons against 0 by simply inverting the branch.
1900 while (const ComparisonOp* equal =
1901 this->TryCast<Opmask::kWord32Equal>(value)) {
1902 if (!CanCover(user, value)) break;
1903 if (!MatchIntegralZero(equal->right())) break;
1904
1905 user = value;
1906 value = equal->left();
1907 cont->Negate();
1908 }
1909 const Operation& value_op = Get(value);
1910 if (CanCover(user, value)) {
1911 if (const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
1912 switch (comparison->rep.value()) {
1914 cont->OverwriteAndNegateIfEqual(
1915 GetComparisonFlagCondition(*comparison));
1916 return VisitWord32Compare(this, value, cont);
1917
1919 cont->OverwriteAndNegateIfEqual(
1920 GetComparisonFlagCondition(*comparison));
1921 return VisitWord64Compare(this, value, cont);
1922
1924 switch (comparison->kind) {
1925 case ComparisonOp::Kind::kEqual:
1926 cont->OverwriteAndNegateIfEqual(kEqual);
1927 return VisitFloat32Compare(this, value, cont);
1928 case ComparisonOp::Kind::kSignedLessThan:
1929 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
1930 return VisitFloat32Compare(this, value, cont);
1931 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1932 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
1933 return VisitFloat32Compare(this, value, cont);
1934 default:
1935 UNREACHABLE();
1936 }
1937
1939 switch (comparison->kind) {
1940 case ComparisonOp::Kind::kEqual:
1941 cont->OverwriteAndNegateIfEqual(kEqual);
1942 return VisitFloat64Compare(this, value, cont);
1943 case ComparisonOp::Kind::kSignedLessThan:
1944 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
1945 return VisitFloat64Compare(this, value, cont);
1946 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1947 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
1948 return VisitFloat64Compare(this, value, cont);
1949 default:
1950 UNREACHABLE();
1951 }
1952
1953 default:
1954 break;
1955 }
1956 } else if (const ProjectionOp* projection =
1957 value_op.TryCast<ProjectionOp>()) {
1958 // Check if this is the overflow output projection of an
1959 // <Operation>WithOverflow node.
1960 if (projection->index == 1u) {
1961 // We cannot combine the <Operation>WithOverflow with this branch
1962 // unless the 0th projection (the use of the actual value of the
1963 // <Operation> is either nullptr, which means there's no use of the
1964 // actual value, or was already defined, which means it is scheduled
1965 // *AFTER* this branch).
1966 OpIndex node = projection->input();
1967 OptionalOpIndex result = FindProjection(node, 0);
1968 if (!result.valid() || IsDefined(result.value())) {
1969 if (const OverflowCheckedBinopOp* binop =
1971 const bool is64 = binop->rep == WordRepresentation::Word64();
1972 switch (binop->kind) {
1973 case OverflowCheckedBinopOp::Kind::kSignedAdd:
1974 cont->OverwriteAndNegateIfEqual(kOverflow);
1975 return VisitBinop(this, node,
1976 is64 ? kMips64DaddOvf : kMips64Dadd, cont);
1977 case OverflowCheckedBinopOp::Kind::kSignedSub:
1978 cont->OverwriteAndNegateIfEqual(kOverflow);
1979 return VisitBinop(this, node,
1980 is64 ? kMips64DsubOvf : kMips64Dsub, cont);
1981 case OverflowCheckedBinopOp::Kind::kSignedMul:
1982 cont->OverwriteAndNegateIfEqual(kOverflow);
1983 return VisitBinop(
1984 this, node, is64 ? kMips64DMulOvf : kMips64MulOvf, cont);
1985 }
1986 }
1987 }
1988 }
1989 } else if (value_op.Is<Opmask::kWord32BitwiseAnd>() ||
1990 value_op.Is<Opmask::kWord64BitwiseAnd>()) {
1991 VisitWordCompare(this, value, kMips64Tst, cont, true);
1992 return;
1993 } else if (value_op.Is<StackPointerGreaterThanOp>()) {
1994 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
1995 return VisitStackPointerGreaterThan(value, cont);
1996 }
1997 }
1998 // Continuation could not be combined with a compare, emit compare against
1999 // 0.
2000 EmitWordCompareZero(this, value, cont);
2001 }
2002}
2003
2004void InstructionSelectorT::VisitSwitch(OpIndex node, const SwitchInfo& sw) {
2005 Mips64OperandGeneratorT g(this);
2006 InstructionOperand value_operand = g.UseRegister(this->input_at(node, 0));
2007
2008 // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
2009 if (enable_switch_jump_table_ ==
2010 InstructionSelector::kEnableSwitchJumpTable) {
2011 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2012 size_t table_space_cost = 10 + 2 * sw.value_range();
2013 size_t table_time_cost = 3;
2014 size_t lookup_space_cost = 2 + 2 * sw.case_count();
2015 size_t lookup_time_cost = sw.case_count();
2016 if (sw.case_count() > 0 &&
2017 table_space_cost + 3 * table_time_cost <=
2018 lookup_space_cost + 3 * lookup_time_cost &&
2019 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2020 sw.value_range() <= kMaxTableSwitchValueRange) {
2021 InstructionOperand index_operand = value_operand;
2022 if (sw.min_value()) {
2023 index_operand = g.TempRegister();
2024 Emit(kMips64Sub, index_operand, value_operand,
2025 g.TempImmediate(sw.min_value()));
2026 }
2027 // Generate a table lookup.
2028 return EmitTableSwitch(sw, index_operand);
2029 }
2030 }
2031
2032 // Generate a tree of conditional jumps.
2033 return EmitBinarySearchSwitch(sw, value_operand);
2034}
2035
2036void InstructionSelectorT::VisitWord32Equal(OpIndex node) {
2037 const Operation& equal = Get(node);
2038 DCHECK(equal.Is<ComparisonOp>());
2039 OpIndex left = equal.input(0);
2040 OpIndex right = equal.input(1);
2041 OpIndex user = node;
2042 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2043
2044 if (MatchZero(right)) {
2045 return VisitWordCompareZero(user, left, &cont);
2046 }
2047
2048 VisitWord32Compare(this, node, &cont);
2049}
2050
2051void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
2052 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2053 VisitWord32Compare(this, node, &cont);
2054}
2055
2056void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
2057 FlagsContinuation cont =
2058 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2059 VisitWord32Compare(this, node, &cont);
2060}
2061
2062void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
2063 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2064 VisitWord32Compare(this, node, &cont);
2065}
2066
2067void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
2068 FlagsContinuation cont =
2069 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2070 VisitWord32Compare(this, node, &cont);
2071}
2072
2073void InstructionSelectorT::VisitInt32AddWithOverflow(OpIndex node) {
2074 OptionalOpIndex ovf = FindProjection(node, 1);
2075 if (ovf.valid() && IsUsed(ovf.value())) {
2076 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2077 return VisitBinop(this, node, kMips64Dadd, &cont);
2078 }
2079
2080 FlagsContinuation cont;
2081 VisitBinop(this, node, kMips64Dadd, &cont);
2082}
2083
2084void InstructionSelectorT::VisitInt32SubWithOverflow(OpIndex node) {
2085 OptionalOpIndex ovf = FindProjection(node, 1);
2086 if (ovf.valid()) {
2087 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2088 return VisitBinop(this, node, kMips64Dsub, &cont);
2089 }
2090
2091 FlagsContinuation cont;
2092 VisitBinop(this, node, kMips64Dsub, &cont);
2093}
2094
2095void InstructionSelectorT::VisitInt32MulWithOverflow(OpIndex node) {
2096 OptionalOpIndex ovf = FindProjection(node, 1);
2097 if (ovf.valid()) {
2098 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2099 return VisitBinop(this, node, kMips64MulOvf, &cont);
2100 }
2101
2102 FlagsContinuation cont;
2103 VisitBinop(this, node, kMips64MulOvf, &cont);
2104}
2105
2106void InstructionSelectorT::VisitInt64MulWithOverflow(OpIndex node) {
2107 OptionalOpIndex ovf = FindProjection(node, 1);
2108 if (ovf.valid()) {
2109 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2110 return VisitBinop(this, node, kMips64DMulOvf, &cont);
2111 }
2112
2113 FlagsContinuation cont;
2114 VisitBinop(this, node, kMips64DMulOvf, &cont);
2115}
2116
2117void InstructionSelectorT::VisitInt64AddWithOverflow(OpIndex node) {
2118 OptionalOpIndex ovf = FindProjection(node, 1);
2119 if (ovf.valid()) {
2120 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2121 return VisitBinop(this, node, kMips64DaddOvf, &cont);
2122 }
2123
2124 FlagsContinuation cont;
2125 VisitBinop(this, node, kMips64DaddOvf, &cont);
2126}
2127
2128void InstructionSelectorT::VisitInt64SubWithOverflow(OpIndex node) {
2129 OptionalOpIndex ovf = FindProjection(node, 1);
2130 if (ovf.valid()) {
2131 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2132 return VisitBinop(this, node, kMips64DsubOvf, &cont);
2133 }
2134
2135 FlagsContinuation cont;
2136 VisitBinop(this, node, kMips64DsubOvf, &cont);
2137}
2138
2139void InstructionSelectorT::VisitWord64Equal(OpIndex node) {
2140 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2141 VisitWord64Compare(this, node, &cont);
2142}
2143
2144void InstructionSelectorT::VisitInt64LessThan(OpIndex node) {
2145 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2146 VisitWord64Compare(this, node, &cont);
2147}
2148
2149void InstructionSelectorT::VisitInt64LessThanOrEqual(OpIndex node) {
2150 FlagsContinuation cont =
2151 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2152 VisitWord64Compare(this, node, &cont);
2153}
2154
2155void InstructionSelectorT::VisitUint64LessThan(OpIndex node) {
2156 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2157 VisitWord64Compare(this, node, &cont);
2158}
2159
2160void InstructionSelectorT::VisitUint64LessThanOrEqual(OpIndex node) {
2161 FlagsContinuation cont =
2162 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2163 VisitWord64Compare(this, node, &cont);
2164}
2165
2166void InstructionSelectorT::VisitFloat32Equal(OpIndex node) {
2167 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2168 VisitFloat32Compare(this, node, &cont);
2169}
2170
2171void InstructionSelectorT::VisitFloat32LessThan(OpIndex node) {
2172 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2173 VisitFloat32Compare(this, node, &cont);
2174}
2175
2176void InstructionSelectorT::VisitFloat32LessThanOrEqual(OpIndex node) {
2177 FlagsContinuation cont =
2178 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2179 VisitFloat32Compare(this, node, &cont);
2180}
2181
2182void InstructionSelectorT::VisitFloat64Equal(OpIndex node) {
2183 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2184 VisitFloat64Compare(this, node, &cont);
2185}
2186
2187void InstructionSelectorT::VisitFloat64LessThan(OpIndex node) {
2188 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2189 VisitFloat64Compare(this, node, &cont);
2190}
2191
2192void InstructionSelectorT::VisitFloat64LessThanOrEqual(OpIndex node) {
2193 FlagsContinuation cont =
2194 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2195 VisitFloat64Compare(this, node, &cont);
2196}
2197
2198void InstructionSelectorT::VisitFloat64ExtractLowWord32(OpIndex node) {
2199 VisitRR(this, kMips64Float64ExtractLowWord32, node);
2200}
2201
2202void InstructionSelectorT::VisitFloat64ExtractHighWord32(OpIndex node) {
2203 VisitRR(this, kMips64Float64ExtractHighWord32, node);
2204}
2205
2206void InstructionSelectorT::VisitFloat64SilenceNaN(OpIndex node) {
2207 VisitRR(this, kMips64Float64SilenceNaN, node);
2208}
2209
2210void InstructionSelectorT::VisitBitcastWord32PairToFloat64(OpIndex node) {
2211 Mips64OperandGeneratorT g(this);
2212 const auto& bitcast = this->Cast<BitcastWord32PairToFloat64Op>(node);
2213 OpIndex hi = bitcast.high_word32();
2214 OpIndex lo = bitcast.low_word32();
2215
2216 InstructionOperand temps[] = {g.TempRegister()};
2217 Emit(kMips64Float64FromWord32Pair, g.DefineAsRegister(node), g.Use(hi),
2218 g.Use(lo), arraysize(temps), temps);
2219}
2220
2221void InstructionSelectorT::VisitMemoryBarrier(OpIndex node) {
2222 Mips64OperandGeneratorT g(this);
2223 Emit(kMips64Sync, g.NoOutput());
2224}
2225
2226void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
2227 VisitAtomicLoad(this, node, AtomicWidth::kWord32);
2228}
2229
2230void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
2231 VisitAtomicStore(this, node, AtomicWidth::kWord32);
2232}
2233
2234void InstructionSelectorT::VisitWord64AtomicLoad(OpIndex node) {
2235 VisitAtomicLoad(this, node, AtomicWidth::kWord64);
2236}
2237
2238void InstructionSelectorT::VisitWord64AtomicStore(OpIndex node) {
2239 VisitAtomicStore(this, node, AtomicWidth::kWord64);
2240}
2241
2242void InstructionSelectorT::VisitWord32AtomicExchange(OpIndex node) {
2243 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2244 ArchOpcode opcode;
2245 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2246 opcode = kAtomicExchangeInt8;
2247 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2248 opcode = kAtomicExchangeUint8;
2249 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2250 opcode = kAtomicExchangeInt16;
2251 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2252 opcode = kAtomicExchangeUint16;
2253 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2254 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2255 opcode = kAtomicExchangeWord32;
2256 } else {
2257 UNREACHABLE();
2258 }
2259 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
2260}
2261
2262void InstructionSelectorT::VisitWord64AtomicExchange(OpIndex node) {
2263 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2264 ArchOpcode opcode;
2265 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2266 opcode = kAtomicExchangeUint8;
2267 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2268 opcode = kAtomicExchangeUint16;
2269 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2270 opcode = kAtomicExchangeWord32;
2271 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2272 opcode = kMips64Word64AtomicExchangeUint64;
2273 } else {
2274 UNREACHABLE();
2275 }
2276 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
2277}
2278
2279void InstructionSelectorT::VisitWord32AtomicCompareExchange(OpIndex node) {
2280 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2281 ArchOpcode opcode;
2282 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2283 opcode = kAtomicCompareExchangeInt8;
2284 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2285 opcode = kAtomicCompareExchangeUint8;
2286 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2287 opcode = kAtomicCompareExchangeInt16;
2288 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2289 opcode = kAtomicCompareExchangeUint16;
2290 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2291 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2292 opcode = kAtomicCompareExchangeWord32;
2293 } else {
2294 UNREACHABLE();
2295 }
2296 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
2297}
2298
2299void InstructionSelectorT::VisitWord64AtomicCompareExchange(OpIndex node) {
2300 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2301 ArchOpcode opcode;
2302 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2303 opcode = kAtomicCompareExchangeUint8;
2304 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2305 opcode = kAtomicCompareExchangeUint16;
2306 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2307 opcode = kAtomicCompareExchangeWord32;
2308 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2309 opcode = kMips64Word64AtomicCompareExchangeUint64;
2310 } else {
2311 UNREACHABLE();
2312 }
2313 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
2314}
2315
2316void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
2317 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2318 ArchOpcode uint16_op, ArchOpcode word32_op) {
2319 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2320 ArchOpcode opcode;
2321 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2322 opcode = int8_op;
2323 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2324 opcode = uint8_op;
2325 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2326 opcode = int16_op;
2327 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2328 opcode = uint16_op;
2329 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2330 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2331 opcode = word32_op;
2332 } else {
2333 UNREACHABLE();
2334 }
2335 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
2336}
2337
2338#define VISIT_ATOMIC_BINOP(op) \
2339 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
2340 VisitWord32AtomicBinaryOperation( \
2341 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2342 kAtomic##op##Uint16, kAtomic##op##Word32); \
2343 }
2349#undef VISIT_ATOMIC_BINOP
2350
2351void InstructionSelectorT::VisitWord64AtomicBinaryOperation(
2352 OpIndex node, ArchOpcode uint8_op, ArchOpcode uint16_op,
2353 ArchOpcode uint32_op, ArchOpcode uint64_op) {
2354 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2355 ArchOpcode opcode;
2356 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2357 opcode = uint8_op;
2358 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2359 opcode = uint16_op;
2360 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2361 opcode = uint32_op;
2362 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2363 opcode = uint64_op;
2364 } else {
2365 UNREACHABLE();
2366 }
2367 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
2368}
2369
2370#define VISIT_ATOMIC_BINOP(op) \
2371 void InstructionSelectorT::VisitWord64Atomic##op(OpIndex node) { \
2372 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
2373 kAtomic##op##Uint16, kAtomic##op##Word32, \
2374 kMips64Word64Atomic##op##Uint64); \
2375 }
2381#undef VISIT_ATOMIC_BINOP
2382
2383void InstructionSelectorT::VisitInt32AbsWithOverflow(OpIndex node) {
2384 UNREACHABLE();
2385}
2386
2387void InstructionSelectorT::VisitInt64AbsWithOverflow(OpIndex node) {
2388 UNREACHABLE();
2389}
2390
2391#define SIMD_TYPE_LIST(V) \
2392 V(F64x2) \
2393 V(F32x4) \
2394 V(I64x2) \
2395 V(I32x4) \
2396 V(I16x8) \
2397 V(I8x16)
2398
2399#define SIMD_UNOP_LIST(V) \
2400 V(F64x2Abs, kMips64F64x2Abs) \
2401 V(F64x2Neg, kMips64F64x2Neg) \
2402 V(F64x2Sqrt, kMips64F64x2Sqrt) \
2403 V(F64x2Ceil, kMips64F64x2Ceil) \
2404 V(F64x2Floor, kMips64F64x2Floor) \
2405 V(F64x2Trunc, kMips64F64x2Trunc) \
2406 V(F64x2NearestInt, kMips64F64x2NearestInt) \
2407 V(I64x2Neg, kMips64I64x2Neg) \
2408 V(I64x2BitMask, kMips64I64x2BitMask) \
2409 V(F64x2ConvertLowI32x4S, kMips64F64x2ConvertLowI32x4S) \
2410 V(F64x2ConvertLowI32x4U, kMips64F64x2ConvertLowI32x4U) \
2411 V(F64x2PromoteLowF32x4, kMips64F64x2PromoteLowF32x4) \
2412 V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
2413 V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
2414 V(F32x4Abs, kMips64F32x4Abs) \
2415 V(F32x4Neg, kMips64F32x4Neg) \
2416 V(F32x4Sqrt, kMips64F32x4Sqrt) \
2417 V(F32x4Ceil, kMips64F32x4Ceil) \
2418 V(F32x4Floor, kMips64F32x4Floor) \
2419 V(F32x4Trunc, kMips64F32x4Trunc) \
2420 V(F32x4NearestInt, kMips64F32x4NearestInt) \
2421 V(F32x4DemoteF64x2Zero, kMips64F32x4DemoteF64x2Zero) \
2422 V(I64x2Abs, kMips64I64x2Abs) \
2423 V(I64x2SConvertI32x4Low, kMips64I64x2SConvertI32x4Low) \
2424 V(I64x2SConvertI32x4High, kMips64I64x2SConvertI32x4High) \
2425 V(I64x2UConvertI32x4Low, kMips64I64x2UConvertI32x4Low) \
2426 V(I64x2UConvertI32x4High, kMips64I64x2UConvertI32x4High) \
2427 V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
2428 V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
2429 V(I32x4Neg, kMips64I32x4Neg) \
2430 V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
2431 V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
2432 V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
2433 V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
2434 V(I32x4Abs, kMips64I32x4Abs) \
2435 V(I32x4BitMask, kMips64I32x4BitMask) \
2436 V(I32x4TruncSatF64x2SZero, kMips64I32x4TruncSatF64x2SZero) \
2437 V(I32x4TruncSatF64x2UZero, kMips64I32x4TruncSatF64x2UZero) \
2438 V(I16x8Neg, kMips64I16x8Neg) \
2439 V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
2440 V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
2441 V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
2442 V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
2443 V(I16x8Abs, kMips64I16x8Abs) \
2444 V(I16x8BitMask, kMips64I16x8BitMask) \
2445 V(I8x16Neg, kMips64I8x16Neg) \
2446 V(I8x16Abs, kMips64I8x16Abs) \
2447 V(I8x16Popcnt, kMips64I8x16Popcnt) \
2448 V(I8x16BitMask, kMips64I8x16BitMask) \
2449 V(S128Not, kMips64S128Not) \
2450 V(I64x2AllTrue, kMips64I64x2AllTrue) \
2451 V(I32x4AllTrue, kMips64I32x4AllTrue) \
2452 V(I16x8AllTrue, kMips64I16x8AllTrue) \
2453 V(I8x16AllTrue, kMips64I8x16AllTrue) \
2454 V(V128AnyTrue, kMips64V128AnyTrue)
2455
2456#define SIMD_SHIFT_OP_LIST(V) \
2457 V(I64x2Shl) \
2458 V(I64x2ShrS) \
2459 V(I64x2ShrU) \
2460 V(I32x4Shl) \
2461 V(I32x4ShrS) \
2462 V(I32x4ShrU) \
2463 V(I16x8Shl) \
2464 V(I16x8ShrS) \
2465 V(I16x8ShrU) \
2466 V(I8x16Shl) \
2467 V(I8x16ShrS) \
2468 V(I8x16ShrU)
2469
2470#define SIMD_BINOP_LIST(V) \
2471 V(F64x2Add, kMips64F64x2Add) \
2472 V(F64x2Sub, kMips64F64x2Sub) \
2473 V(F64x2Mul, kMips64F64x2Mul) \
2474 V(F64x2Div, kMips64F64x2Div) \
2475 V(F64x2Min, kMips64F64x2Min) \
2476 V(F64x2Max, kMips64F64x2Max) \
2477 V(F64x2Eq, kMips64F64x2Eq) \
2478 V(F64x2Ne, kMips64F64x2Ne) \
2479 V(F64x2Lt, kMips64F64x2Lt) \
2480 V(F64x2Le, kMips64F64x2Le) \
2481 V(I64x2Eq, kMips64I64x2Eq) \
2482 V(I64x2Ne, kMips64I64x2Ne) \
2483 V(I64x2Add, kMips64I64x2Add) \
2484 V(I64x2Sub, kMips64I64x2Sub) \
2485 V(I64x2Mul, kMips64I64x2Mul) \
2486 V(I64x2GtS, kMips64I64x2GtS) \
2487 V(I64x2GeS, kMips64I64x2GeS) \
2488 V(F32x4Add, kMips64F32x4Add) \
2489 V(F32x4Sub, kMips64F32x4Sub) \
2490 V(F32x4Mul, kMips64F32x4Mul) \
2491 V(F32x4Div, kMips64F32x4Div) \
2492 V(F32x4Max, kMips64F32x4Max) \
2493 V(F32x4Min, kMips64F32x4Min) \
2494 V(F32x4Eq, kMips64F32x4Eq) \
2495 V(F32x4Ne, kMips64F32x4Ne) \
2496 V(F32x4Lt, kMips64F32x4Lt) \
2497 V(F32x4Le, kMips64F32x4Le) \
2498 V(I32x4Add, kMips64I32x4Add) \
2499 V(I32x4Sub, kMips64I32x4Sub) \
2500 V(I32x4Mul, kMips64I32x4Mul) \
2501 V(I32x4MaxS, kMips64I32x4MaxS) \
2502 V(I32x4MinS, kMips64I32x4MinS) \
2503 V(I32x4MaxU, kMips64I32x4MaxU) \
2504 V(I32x4MinU, kMips64I32x4MinU) \
2505 V(I32x4Eq, kMips64I32x4Eq) \
2506 V(I32x4Ne, kMips64I32x4Ne) \
2507 V(I32x4GtS, kMips64I32x4GtS) \
2508 V(I32x4GeS, kMips64I32x4GeS) \
2509 V(I32x4GtU, kMips64I32x4GtU) \
2510 V(I32x4GeU, kMips64I32x4GeU) \
2511 V(I32x4DotI16x8S, kMips64I32x4DotI16x8S) \
2512 V(I16x8Add, kMips64I16x8Add) \
2513 V(I16x8AddSatS, kMips64I16x8AddSatS) \
2514 V(I16x8AddSatU, kMips64I16x8AddSatU) \
2515 V(I16x8Sub, kMips64I16x8Sub) \
2516 V(I16x8SubSatS, kMips64I16x8SubSatS) \
2517 V(I16x8SubSatU, kMips64I16x8SubSatU) \
2518 V(I16x8Mul, kMips64I16x8Mul) \
2519 V(I16x8MaxS, kMips64I16x8MaxS) \
2520 V(I16x8MinS, kMips64I16x8MinS) \
2521 V(I16x8MaxU, kMips64I16x8MaxU) \
2522 V(I16x8MinU, kMips64I16x8MinU) \
2523 V(I16x8Eq, kMips64I16x8Eq) \
2524 V(I16x8Ne, kMips64I16x8Ne) \
2525 V(I16x8GtS, kMips64I16x8GtS) \
2526 V(I16x8GeS, kMips64I16x8GeS) \
2527 V(I16x8GtU, kMips64I16x8GtU) \
2528 V(I16x8GeU, kMips64I16x8GeU) \
2529 V(I16x8RoundingAverageU, kMips64I16x8RoundingAverageU) \
2530 V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
2531 V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
2532 V(I16x8Q15MulRSatS, kMips64I16x8Q15MulRSatS) \
2533 V(I8x16Add, kMips64I8x16Add) \
2534 V(I8x16AddSatS, kMips64I8x16AddSatS) \
2535 V(I8x16AddSatU, kMips64I8x16AddSatU) \
2536 V(I8x16Sub, kMips64I8x16Sub) \
2537 V(I8x16SubSatS, kMips64I8x16SubSatS) \
2538 V(I8x16SubSatU, kMips64I8x16SubSatU) \
2539 V(I8x16MaxS, kMips64I8x16MaxS) \
2540 V(I8x16MinS, kMips64I8x16MinS) \
2541 V(I8x16MaxU, kMips64I8x16MaxU) \
2542 V(I8x16MinU, kMips64I8x16MinU) \
2543 V(I8x16Eq, kMips64I8x16Eq) \
2544 V(I8x16Ne, kMips64I8x16Ne) \
2545 V(I8x16GtS, kMips64I8x16GtS) \
2546 V(I8x16GeS, kMips64I8x16GeS) \
2547 V(I8x16GtU, kMips64I8x16GtU) \
2548 V(I8x16GeU, kMips64I8x16GeU) \
2549 V(I8x16RoundingAverageU, kMips64I8x16RoundingAverageU) \
2550 V(I8x16SConvertI16x8, kMips64I8x16SConvertI16x8) \
2551 V(I8x16UConvertI16x8, kMips64I8x16UConvertI16x8) \
2552 V(S128And, kMips64S128And) \
2553 V(S128Or, kMips64S128Or) \
2554 V(S128Xor, kMips64S128Xor) \
2555 V(S128AndNot, kMips64S128AndNot)
2556
2557void InstructionSelectorT::VisitS128Const(OpIndex node) { UNIMPLEMENTED(); }
2558
2559void InstructionSelectorT::VisitS128Zero(OpIndex node) {
2560 Mips64OperandGeneratorT g(this);
2561 Emit(kMips64S128Zero, g.DefineAsRegister(node));
2562}
2563#define SIMD_VISIT_SPLAT(Type) \
2564 void InstructionSelectorT::Visit##Type##Splat(OpIndex node) { \
2565 VisitRR(this, kMips64##Type##Splat, node); \
2566 }
2568#undef SIMD_VISIT_SPLAT
2569
2570#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
2571 void InstructionSelectorT::Visit##Type##ExtractLane##Sign(OpIndex node) { \
2572 VisitRRI(this, kMips64##Type##ExtractLane##Sign, node); \
2573 }
2582#undef SIMD_VISIT_EXTRACT_LANE
2583
2584#define SIMD_VISIT_REPLACE_LANE(Type) \
2585 void InstructionSelectorT::Visit##Type##ReplaceLane(OpIndex node) { \
2586 VisitRRIR(this, kMips64##Type##ReplaceLane, node); \
2587 }
2589#undef SIMD_VISIT_REPLACE_LANE
2590
2591#define SIMD_VISIT_UNOP(Name, instruction) \
2592 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2593 VisitRR(this, instruction, node); \
2594 }
2596#undef SIMD_VISIT_UNOP
2597
2598#define SIMD_VISIT_SHIFT_OP(Name) \
2599 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2600 VisitSimdShift(this, kMips64##Name, node); \
2601 }
2603#undef SIMD_VISIT_SHIFT_OP
2604
2605#define SIMD_VISIT_BINOP(Name, instruction) \
2606 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2607 VisitRRR(this, instruction, node); \
2608 }
2610#undef SIMD_VISIT_BINOP
2611
2612#define SIMD_RELAXED_OP_LIST(V) \
2613 V(F64x2RelaxedMin) \
2614 V(F64x2RelaxedMax) \
2615 V(F32x4RelaxedMin) \
2616 V(F32x4RelaxedMax) \
2617 V(I32x4RelaxedTruncF32x4S) \
2618 V(I32x4RelaxedTruncF32x4U) \
2619 V(I32x4RelaxedTruncF64x2SZero) \
2620 V(I32x4RelaxedTruncF64x2UZero) \
2621 V(I16x8RelaxedQ15MulRS) \
2622 V(I8x16RelaxedLaneSelect) \
2623 V(I16x8RelaxedLaneSelect) \
2624 V(I32x4RelaxedLaneSelect) \
2625 V(I64x2RelaxedLaneSelect)
2626
2627#define SIMD_VISIT_RELAXED_OP(Name) \
2628 void InstructionSelectorT::Visit##Name(OpIndex node) { UNREACHABLE(); }
2630#undef SIMD_VISIT_SHIFT_OP
2631
2632void InstructionSelectorT::VisitS128Select(OpIndex node) {
2633 VisitRRRR(this, kMips64S128Select, node);
2634}
2635
2636#define SIMD_UNIMP_OP_LIST(V) \
2637 V(F64x2Qfma) \
2638 V(F64x2Qfms) \
2639 V(F32x4Qfma) \
2640 V(F32x4Qfms) \
2641 V(I16x8DotI8x16I7x16S) \
2642 V(I32x4DotI8x16I7x16AddS)
2643
2644#define SIMD_VISIT_UNIMP_OP(Name) \
2645 void InstructionSelectorT::Visit##Name(OpIndex node) { UNIMPLEMENTED(); }
2647
2648#undef SIMD_VISIT_UNIMP_OP
2649#undef SIMD_UNIMP_OP_LIST
2650
2651#define UNIMPLEMENTED_SIMD_FP16_OP_LIST(V) \
2652 V(F16x8Splat) \
2653 V(F16x8ExtractLane) \
2654 V(F16x8ReplaceLane) \
2655 V(F16x8Abs) \
2656 V(F16x8Neg) \
2657 V(F16x8Sqrt) \
2658 V(F16x8Floor) \
2659 V(F16x8Ceil) \
2660 V(F16x8Trunc) \
2661 V(F16x8NearestInt) \
2662 V(F16x8Add) \
2663 V(F16x8Sub) \
2664 V(F16x8Mul) \
2665 V(F16x8Div) \
2666 V(F16x8Min) \
2667 V(F16x8Max) \
2668 V(F16x8Pmin) \
2669 V(F16x8Pmax) \
2670 V(F16x8Eq) \
2671 V(F16x8Ne) \
2672 V(F16x8Lt) \
2673 V(F16x8Le) \
2674 V(F16x8SConvertI16x8) \
2675 V(F16x8UConvertI16x8) \
2676 V(I16x8SConvertF16x8) \
2677 V(I16x8UConvertF16x8) \
2678 V(F32x4PromoteLowF16x8) \
2679 V(F16x8DemoteF32x4Zero) \
2680 V(F16x8DemoteF64x2Zero) \
2681 V(F16x8Qfma) \
2682 V(F16x8Qfms)
2683
2684#define SIMD_VISIT_UNIMPL_FP16_OP(Name) \
2685 void InstructionSelectorT::Visit##Name(OpIndex node) { UNIMPLEMENTED(); }
2686
2688#undef SIMD_VISIT_UNIMPL_FP16_OP
2689#undef UNIMPLEMENTED_SIMD_FP16_OP_LIST
2690
2691#if V8_ENABLE_WEBASSEMBLY
2692
2693void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) { UNIMPLEMENTED(); }
2694
2695#endif // V8_ENABLE_WEBASSEMBLY
2696
2697void InstructionSelectorT::VisitI8x16Swizzle(OpIndex node) { UNIMPLEMENTED(); }
2698
2699void InstructionSelectorT::VisitSetStackPointer(OpIndex node) {
2700 OperandGenerator g(this);
2701 auto input = g.UseRegister(this->input_at(node, 0));
2702 Emit(kArchSetStackPointer, 0, nullptr, 1, &input);
2703}
2704
2705void InstructionSelectorT::VisitSignExtendWord8ToInt32(OpIndex node) {
2706 VisitRR(this, kMips64Seb, node);
2707}
2708
2709void InstructionSelectorT::VisitSignExtendWord16ToInt32(OpIndex node) {
2710 VisitRR(this, kMips64Seh, node);
2711}
2712
2713void InstructionSelectorT::VisitSignExtendWord8ToInt64(OpIndex node) {
2714 VisitRR(this, kMips64Seb, node);
2715}
2716
2717void InstructionSelectorT::VisitSignExtendWord16ToInt64(OpIndex node) {
2718 VisitRR(this, kMips64Seh, node);
2719}
2720
2721void InstructionSelectorT::VisitSignExtendWord32ToInt64(OpIndex node) {
2722 UNIMPLEMENTED();
2723}
2724
2725void InstructionSelectorT::VisitF32x4Pmin(OpIndex node) {
2726 VisitUniqueRRR(this, kMips64F32x4Pmin, node);
2727}
2728
2729void InstructionSelectorT::VisitF32x4Pmax(OpIndex node) {
2730 VisitUniqueRRR(this, kMips64F32x4Pmax, node);
2731}
2732
2733void InstructionSelectorT::VisitF64x2Pmin(OpIndex node) {
2734 VisitUniqueRRR(this, kMips64F64x2Pmin, node);
2735}
2736
2737void InstructionSelectorT::VisitF64x2Pmax(OpIndex node) {
2738 VisitUniqueRRR(this, kMips64F64x2Pmax, node);
2739}
2740
2741#define VISIT_EXT_MUL(OPCODE1, OPCODE2, TYPE) \
2742 void InstructionSelectorT::Visit##OPCODE1##ExtMulLow##OPCODE2( \
2743 OpIndex node) { \
2744 UNIMPLEMENTED(); \
2745 } \
2746 void InstructionSelectorT::Visit##OPCODE1##ExtMulHigh##OPCODE2( \
2747 OpIndex node) { \
2748 UNIMPLEMENTED(); \
2749 }
2750
2751VISIT_EXT_MUL(I64x2, I32x4S, MSAS32)
2752VISIT_EXT_MUL(I64x2, I32x4U, MSAU32)
2753VISIT_EXT_MUL(I32x4, I16x8S, MSAS16)
2754VISIT_EXT_MUL(I32x4, I16x8U, MSAU16)
2755VISIT_EXT_MUL(I16x8, I8x16S, MSAS8)
2756VISIT_EXT_MUL(I16x8, I8x16U, MSAU8)
2757#undef VISIT_EXT_MUL
2758
2759#define VISIT_EXTADD_PAIRWISE(OPCODE, TYPE) \
2760 void InstructionSelectorT::Visit##OPCODE(OpIndex node) { UNIMPLEMENTED(); }
2761VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16S, MSAS8)
2762VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16U, MSAU8)
2763VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S, MSAS16)
2764VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U, MSAU16)
2765#undef VISIT_EXTADD_PAIRWISE
2766
2767void InstructionSelectorT::AddOutputToSelectContinuation(OperandGenerator* g,
2768 int first_input_index,
2769 OpIndex node) {
2770 UNREACHABLE();
2771}
2772
2773// static
2774MachineOperatorBuilder::Flags
2775InstructionSelector::SupportedMachineOperatorFlags() {
2776 MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
2777 return flags | MachineOperatorBuilder::kWord32Ctz |
2778 MachineOperatorBuilder::kWord64Ctz |
2779 MachineOperatorBuilder::kWord32Popcnt |
2780 MachineOperatorBuilder::kWord64Popcnt |
2781 MachineOperatorBuilder::kWord32ShiftIsSafe |
2782 MachineOperatorBuilder::kInt32DivIsSafe |
2783 MachineOperatorBuilder::kUint32DivIsSafe |
2784 MachineOperatorBuilder::kFloat64RoundDown |
2785 MachineOperatorBuilder::kFloat32RoundDown |
2786 MachineOperatorBuilder::kFloat64RoundUp |
2787 MachineOperatorBuilder::kFloat32RoundUp |
2788 MachineOperatorBuilder::kFloat64RoundTruncate |
2789 MachineOperatorBuilder::kFloat32RoundTruncate |
2790 MachineOperatorBuilder::kFloat64RoundTiesEven |
2791 MachineOperatorBuilder::kFloat32RoundTiesEven;
2792}
2793
2794// static
2795MachineOperatorBuilder::AlignmentRequirements
2796InstructionSelector::AlignmentRequirements() {
2797 if (kArchVariant == kMips64r6) {
2798 return MachineOperatorBuilder::AlignmentRequirements::
2799 FullUnalignedAccessSupport();
2800 } else {
2802 return MachineOperatorBuilder::AlignmentRequirements::
2803 NoUnalignedAccessSupport();
2804 }
2805}
2806
2807#undef SIMD_BINOP_LIST
2808#undef SIMD_SHIFT_OP_LIST
2809#undef SIMD_RELAXED_OP_LIST
2810#undef SIMD_UNOP_LIST
2811#undef SIMD_TYPE_LIST
2812#undef TRACE
2813
2814} // namespace compiler
2815} // namespace internal
2816} // namespace v8
Builtins::Kind kind
Definition builtins.cc:40
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr U encode(T value)
Definition bit-field.h:55
static constexpr MachineType Float64()
constexpr MachineRepresentation representation() const
static constexpr MachineType Simd128()
static constexpr MachineType Float32()
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
int AllocateSpillSlot(int width, int alignment=0, bool is_tagged=false)
Definition frame.h:138
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
FlagsCondition GetComparisonFlagCondition(const turboshaft::ComparisonOp &op) const
void EmitPrepareArguments(ZoneVector< PushParameter > *arguments, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
void EmitMoveFPRToParam(InstructionOperand *op, LinkageLocation location)
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void VisitLoadTransform(Node *node, Node *value, InstructionCode opcode)
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
void EmitMoveParamToFPR(turboshaft::OpIndex node, int index)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
void EmitPrepareResults(ZoneVector< PushParameter > *results, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
InstructionOperand UseOperand(OpIndex node, InstructionCode opcode)
std::optional< int64_t > GetOptionalIntegerConstant(OpIndex operation)
bool CanBeImmediate(OpIndex node, InstructionCode mode)
bool CanBeImmediate(int64_t value, InstructionCode opcode)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand UseRegister(turboshaft::OpIndex node)
MachineRepresentation representation() const
V8_INLINE const Operation & Get(OpIndex i) const
Definition graph.h:618
const Operation & Get(V< AnyOrNone > op_idx) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
bool MatchIntegralWord64Constant(V< Any > matched, uint64_t *constant) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Float32()
static constexpr RegisterRepresentation Word64()
Handle< Code > code
@ kMips64r6
@ kMips64r2
static const ArchVariants kArchVariant
int64_t immediate_
InstructionSelectorT * selector_
ArchOpcode opcode_
#define SIMD_SHIFT_OP_LIST(V)
#define VISIT_ATOMIC_BINOP(op)
int32_t offset
#define SIMD_VISIT_SHIFT_OP(Name)
#define SIMD_VISIT_UNIMPL_FP16_OP(Name)
#define VISIT_EXT_MUL(OPCODE1, OPCODE2)
#define SIMD_VISIT_SPLAT(Type)
#define VISIT_EXTADD_PAIRWISE(OPCODE)
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign)
#define SIMD_VISIT_UNIMP_OP(Name)
#define SIMD_VISIT_REPLACE_LANE(Type)
#define SIMD_UNIMP_OP_LIST(V)
#define SIMD_VISIT_UNOP(Name, instruction)
#define SIMD_VISIT_BINOP(Name, instruction)
#define SIMD_TYPE_LIST(V)
#define UNIMPLEMENTED_SIMD_FP16_OP_LIST(V)
#define SIMD_RELAXED_OP_LIST(V)
#define SIMD_VISIT_RELAXED_OP(Name)
Node * node
Instruction * instr
ZoneVector< RpoNumber > & result
#define TRACE(...)
#define SIMD_UNOP_LIST(V)
#define SIMD_BINOP_LIST(V)
int m
Definition mul-fft.cc:294
int n
Definition mul-fft.cc:296
int int32_t
Definition unicode.cc:40
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float64(), RegisterRepresentation::Word64()> kTruncateFloat64ToInt64OverflowToMin
Definition opmasks.h:276
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word64()> kWord64BitwiseAnd
Definition opmasks.h:171
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word32()> kWord32BitwiseAnd
Definition opmasks.h:159
ConstantMask::For< ConstantOp::Kind::kWord32 > kWord32Constant
Definition opmasks.h:242
ChangeOpMask::For< ChangeOp::Kind::kSignExtend, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Word32(), RegisterRepresentation::Word64()> kChangeInt32ToInt64
Definition opmasks.h:267
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToInt32OverflowToMin
Definition opmasks.h:281
ConstantMask::For< ConstantOp::Kind::kExternal > kExternalConstant
Definition opmasks.h:244
ChangeOpMask::For< ChangeOp::Kind::kZeroExtend, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Word32(), RegisterRepresentation::Word64()> kChangeUint32ToUint64
Definition opmasks.h:270
ChangeOpMask::For< ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToUint32OverflowToMin
Definition opmasks.h:286
ShiftMask::For< ShiftOp::Kind::kShiftRightArithmetic, WordRepresentation::Word32()> kWord32ShiftRightArithmetic
Definition opmasks.h:216
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
void VisitAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode, AtomicWidth width)
static void VisitRRIR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
static void VisitRRO(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitRRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRRI(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
AtomicStoreParameters const & AtomicStoreParametersOf(Operator const *op)
bool TryEmitExtendingLoad(InstructionSelectorT *selector, OpIndex node, OpIndex output_node)
size_t AtomicWidthSize(AtomicWidth width)
static Instruction * VisitCompare(InstructionSelectorT *selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuationT *cont)
static void VisitSimdShift(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
void EmitLoad(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, ImmediateMode immediate_mode, MachineRepresentation rep, OptionalOpIndex output={})
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
static void VisitBinop(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
bool TryMatchImmediate(InstructionSelectorT *selector, InstructionCode *opcode_return, OpIndex node, size_t *input_count_return, InstructionOperand *inputs)
static void VisitUniqueRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int kTaggedSize
Definition globals.h:542
switch(set_by_)
Definition flags.cc:3669
constexpr bool CanBeTaggedOrCompressedPointer(MachineRepresentation rep)
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
void EmitWordCompareZero(InstructionSelectorT *selector, OpIndex value, FlagsContinuationT *cont)
constexpr bool CanBeTaggedPointer(MachineRepresentation rep)
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
const int kCArgSlotCount
Operation
Definition operation.h:43
uint32_t compare
#define UNREACHABLE()
Definition logging.h:67
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define UNIMPLEMENTED()
Definition logging.h:66
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define arraysize(array)
Definition macros.h:67
ExtendingLoadMatcher(OpIndex node, InstructionSelectorT *selector)
bool is_load_root_register(turboshaft::OpIndex node) const
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
V8_INLINE OpIndex input(size_t i) const
Definition operations.h:959
underlying_operation_t< Op > & Cast()
Definition operations.h:980