v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-selector-loong64.cc
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <optional>
6
7#include "src/base/bits.h"
8#include "src/base/logging.h"
14
15namespace v8 {
16namespace internal {
17namespace compiler {
18
19using namespace turboshaft; // NOLINT(build/namespaces)
20
21#define TRACE(...) PrintF(__VA_ARGS__)
22
23// Adds loong64-specific methods for generating InstructionOperands.
25 public:
28
30 if (CanBeImmediate(node, opcode)) {
31 return UseImmediate(node);
32 }
33 return UseRegister(node);
34 }
35
36 // Use the zero register if the node has the immediate value zero, otherwise
37 // assign a register.
39 if (const ConstantOp* constant =
40 selector()->Get(node).TryCast<ConstantOp>()) {
41 if ((constant->IsIntegral() && constant->integral() == 0) ||
42 (constant->kind == ConstantOp::Kind::kFloat32 &&
43 constant->float32().get_bits() == 0) ||
44 (constant->kind == ConstantOp::Kind::kFloat64 &&
45 constant->float64().get_bits() == 0))
46 return UseImmediate(node);
47 }
48 return UseRegister(node);
49 }
50
52 int64_t unused;
53 return selector()->MatchSignedIntegralConstant(node, &unused);
54 }
55
56 std::optional<int64_t> GetOptionalIntegerConstant(OpIndex operation) {
57 if (int64_t constant; MatchSignedIntegralConstant(operation, &constant)) {
58 return constant;
59 }
60 return std::nullopt;
61 }
62
64 const ConstantOp* constant = selector()->Get(node).TryCast<ConstantOp>();
65 if (!constant) return false;
66 if (constant->kind == ConstantOp::Kind::kCompressedHeapObject) {
67 if (!COMPRESS_POINTERS_BOOL) return false;
68 // For builtin code we need static roots
69 if (selector()->isolate()->bootstrapper() && !V8_STATIC_ROOTS_BOOL) {
70 return false;
71 }
72 const RootsTable& roots_table = selector()->isolate()->roots_table();
73 RootIndex root_index;
74 Handle<HeapObject> value = constant->handle();
75 if (roots_table.IsRootHandle(value, &root_index)) {
76 if (!RootsTable::IsReadOnly(root_index)) return false;
78 root_index, selector()->isolate()),
79 mode);
80 }
81 return false;
82 }
83
84 int64_t value;
85 return selector()->MatchSignedIntegralConstant(node, &value) &&
86 CanBeImmediate(value, mode);
87 }
88
89 bool CanBeImmediate(int64_t value, InstructionCode opcode) {
91 case kLoong64Cmp32:
92 case kLoong64Cmp64:
93 return true;
94 case kLoong64Sll_w:
95 case kLoong64Srl_w:
96 case kLoong64Sra_w:
97 return is_uint5(value);
98 case kLoong64Sll_d:
99 case kLoong64Srl_d:
100 case kLoong64Sra_d:
101 return is_uint6(value);
102 case kLoong64And:
103 case kLoong64And32:
104 case kLoong64Or:
105 case kLoong64Or32:
106 case kLoong64Xor:
107 case kLoong64Xor32:
108 case kLoong64Tst:
109 return is_uint12(value);
110 case kLoong64Ld_w:
111 case kLoong64St_w:
112 case kLoong64Ld_d:
113 case kLoong64St_d:
114 case kAtomicLoadWord32:
115 case kAtomicStoreWord32:
116 case kLoong64Word64AtomicLoadUint64:
117 case kLoong64Word64AtomicStoreWord64:
118 case kLoong64StoreCompressTagged:
119 return (is_int12(value) || (is_int16(value) && ((value & 0b11) == 0)));
120 default:
121 return is_int12(value);
122 }
123 }
124
125 private:
126 bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
127 TRACE("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__);
128 return false;
129 }
130};
131
132static void VisitRR(InstructionSelectorT* selector, ArchOpcode opcode,
133 OpIndex node) {
134 Loong64OperandGeneratorT g(selector);
135 selector->Emit(opcode, g.DefineAsRegister(node),
136 g.UseRegister(selector->input_at(node, 0)));
137}
138
139static void VisitRRI(InstructionSelectorT* selector, ArchOpcode opcode,
140 OpIndex node) {
142}
143
144static void VisitSimdShift(InstructionSelectorT* selector, ArchOpcode opcode,
145 OpIndex node) {
146 Loong64OperandGeneratorT g(selector);
147 OpIndex rhs = selector->input_at(node, 1);
148 if (selector->Get(rhs).TryCast<ConstantOp>()) {
149 selector->Emit(opcode, g.DefineAsRegister(node),
150 g.UseRegister(selector->input_at(node, 0)),
151 g.UseImmediate(selector->input_at(node, 1)));
152 } else {
153 selector->Emit(opcode, g.DefineAsRegister(node),
154 g.UseRegister(selector->input_at(node, 0)),
155 g.UseRegister(selector->input_at(node, 1)));
156 }
157}
158
159static void VisitRRIR(InstructionSelectorT* selector, ArchOpcode opcode,
160 OpIndex node) {
162}
163
164void VisitRRR(InstructionSelectorT* selector, ArchOpcode opcode, OpIndex node) {
165 Loong64OperandGeneratorT g(selector);
166 selector->Emit(opcode, g.DefineAsRegister(node),
167 g.UseRegister(selector->input_at(node, 0)),
168 g.UseRegister(selector->input_at(node, 1)));
169}
170
171static void VisitUniqueRRR(InstructionSelectorT* selector, ArchOpcode opcode,
172 OpIndex node) {
173 Loong64OperandGeneratorT g(selector);
174 selector->Emit(opcode, g.DefineAsRegister(node),
175 g.UseUniqueRegister(selector->input_at(node, 0)),
176 g.UseUniqueRegister(selector->input_at(node, 1)));
177}
178
180 OpIndex node) {
182}
183
184static void VisitRRO(InstructionSelectorT* selector, ArchOpcode opcode,
185 OpIndex node) {
186 Loong64OperandGeneratorT g(selector);
187 selector->Emit(opcode, g.DefineAsRegister(node),
188 g.UseRegister(selector->input_at(node, 0)),
189 g.UseOperand(selector->input_at(node, 1), opcode));
190}
191
192struct ExtendingLoadMatcher {
194 : matches_(false), selector_(selector), immediate_(0) {
195 Initialize(node);
196 }
197
198 bool Matches() const { return matches_; }
199
200 OpIndex base() const {
201 DCHECK(Matches());
202 return base_;
203 }
204 int64_t immediate() const {
205 DCHECK(Matches());
206 return immediate_;
207 }
209 DCHECK(Matches());
210 return opcode_;
211 }
212
213 private:
217 int64_t immediate_;
219
221 const ShiftOp& shift = selector_->Get(node).template Cast<ShiftOp>();
222 DCHECK(shift.kind == ShiftOp::Kind::kShiftRightArithmetic ||
223 shift.kind == ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros);
224 // When loading a 64-bit value and shifting by 32, we should
225 // just load and sign-extend the interesting 4 bytes instead.
226 // This happens, for example, when we're loading and untagging SMIs.
227 const Operation& lhs = selector_->Get(shift.left());
228 int64_t constant_rhs;
229
230 if (lhs.Is<LoadOp>() &&
231 selector_->MatchIntegralWord64Constant(shift.right(), &constant_rhs) &&
232 constant_rhs == 32 && selector_->CanCover(node, shift.left())) {
234
235 const LoadOp& load = lhs.Cast<LoadOp>();
236 base_ = load.base();
237 opcode_ = kLoong64Ld_w;
238 if (load.index().has_value()) {
239 int64_t index_constant;
240 if (selector_->MatchIntegralWord64Constant(load.index().value(),
241 &index_constant)) {
242 DCHECK_EQ(load.element_size_log2, 0);
243 immediate_ = index_constant + 4;
244 matches_ = g.CanBeImmediate(immediate_, kLoong64Ld_w);
245 }
246 } else {
247 immediate_ = load.offset + 4;
248 matches_ = g.CanBeImmediate(immediate_, kLoong64Ld_w);
249 }
250 }
251 }
252};
253
255 OpIndex output_node) {
256 ExtendingLoadMatcher m(node, selector);
257 Loong64OperandGeneratorT g(selector);
258 if (m.Matches()) {
259 InstructionOperand inputs[2];
260 inputs[0] = g.UseRegister(m.base());
261 InstructionCode opcode =
262 m.opcode() | AddressingModeField::encode(kMode_MRI);
263 DCHECK(is_int32(m.immediate()));
264 inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
265 InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
266 selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
267 inputs);
268 return true;
269 }
270 return false;
271}
272
274 InstructionCode* opcode_return, OpIndex node,
275 size_t* input_count_return, InstructionOperand* inputs) {
276 Loong64OperandGeneratorT g(selector);
277 if (g.CanBeImmediate(node, *opcode_return)) {
278 *opcode_return |= AddressingModeField::encode(kMode_MRI);
279 inputs[0] = g.UseImmediate(node);
280 *input_count_return = 1;
281 return true;
282 }
283 return false;
284}
285
287 InstructionCode opcode, bool has_reverse_opcode,
288 InstructionCode reverse_opcode,
289 FlagsContinuationT* cont) {
290 Loong64OperandGeneratorT g(selector);
291 InstructionOperand inputs[2];
292 size_t input_count = 0;
293 InstructionOperand outputs[1];
294 size_t output_count = 0;
295
296 const Operation& binop = selector->Get(node);
297 OpIndex left_node = binop.input(0);
298 OpIndex right_node = binop.input(1);
299
300 if (TryMatchImmediate(selector, &opcode, right_node, &input_count,
301 &inputs[1])) {
302 inputs[0] = g.UseRegister(left_node);
303 input_count++;
304 } else if (has_reverse_opcode &&
305 TryMatchImmediate(selector, &reverse_opcode, left_node,
306 &input_count, &inputs[1])) {
307 inputs[0] = g.UseRegister(right_node);
308 opcode = reverse_opcode;
309 input_count++;
310 } else {
311 inputs[input_count++] = g.UseRegister(left_node);
312 inputs[input_count++] = g.UseOperand(right_node, opcode);
313 }
314
315 outputs[output_count++] = g.DefineAsRegister(node);
316
317 DCHECK_NE(0u, input_count);
318 DCHECK_EQ(1u, output_count);
319 DCHECK_GE(arraysize(inputs), input_count);
320 DCHECK_GE(arraysize(outputs), output_count);
321
322 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
323 inputs, cont);
324}
325
327 InstructionCode opcode, bool has_reverse_opcode,
328 InstructionCode reverse_opcode) {
330 VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
331}
332
334 InstructionCode opcode, FlagsContinuationT* cont) {
335 VisitBinop(selector, node, opcode, false, kArchNop, cont);
336}
337
339 InstructionCode opcode) {
340 VisitBinop(selector, node, opcode, false, kArchNop);
341}
342
343void InstructionSelectorT::VisitStackSlot(OpIndex node) {
344 const StackSlotOp& stack_slot = Cast<StackSlotOp>(node);
345 int slot = frame_->AllocateSpillSlot(stack_slot.size, stack_slot.alignment,
346 stack_slot.is_tagged);
347 OperandGenerator g(this);
348
349 Emit(kArchStackSlot, g.DefineAsRegister(node),
350 sequence()->AddImmediate(Constant(slot)), 0, nullptr);
351}
352
353void InstructionSelectorT::VisitAbortCSADcheck(OpIndex node) {
354 Loong64OperandGeneratorT g(this);
355 Emit(kArchAbortCSADcheck, g.NoOutput(),
356 g.UseFixed(this->input_at(node, 0), a0));
357}
358
360 InstructionCode opcode, turboshaft::OpIndex output = OpIndex{}) {
361 Loong64OperandGeneratorT g(selector);
362 const Operation& op = selector->Get(node);
363 const LoadOp& load = op.Cast<LoadOp>();
364
365 // The LoadStoreSimplificationReducer transforms all loads into
366 // *(base + index).
367 OpIndex base = load.base();
368 OpIndex index = load.index().value();
369 CHECK_EQ(load.offset, 0);
370 DCHECK_EQ(load.element_size_log2, 0);
371
372 InstructionOperand inputs[3];
373 size_t input_count = 0;
374 InstructionOperand output_op;
375
376 // If output is valid, use that as the output register. This is used when we
377 // merge a conversion into the load.
378 output_op = g.DefineAsRegister(output.valid() ? output : node);
379
380 const Operation& base_op = selector->Get(base);
381 int64_t index_value;
382 if (base_op.Is<Opmask::kExternalConstant>() &&
383 selector->MatchSignedIntegralConstant(index, &index_value)) {
384 const ConstantOp& constant_base = base_op.Cast<ConstantOp>();
386 constant_base.external_reference())) {
387 ptrdiff_t const delta =
388 index_value +
390 selector->isolate(), constant_base.external_reference());
391 input_count = 1;
392 // Check that the delta is a 32-bit integer due to the limitations of
393 // immediate operands.
394 if (is_int32(delta)) {
395 inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
396 opcode |= AddressingModeField::encode(kMode_Root);
397 selector->Emit(opcode, 1, &output_op, input_count, inputs);
398 return;
399 }
400 }
401 }
402
403 if (base_op.Is<LoadRootRegisterOp>()) {
404 int64_t index_value;
405 CHECK(selector->MatchSignedIntegralConstant(index, &index_value));
406 input_count = 1;
407 inputs[0] = g.UseImmediate64(index_value);
408 opcode |= AddressingModeField::encode(kMode_Root);
409 selector->Emit(opcode, 1, &output_op, input_count, inputs);
410 return;
411 }
412
413 if (g.CanBeImmediate(index, opcode)) {
414 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
415 g.DefineAsRegister(output.valid() ? output : node),
416 g.UseRegister(base), g.UseImmediate(index));
417 } else {
418 selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
419 g.DefineAsRegister(output.valid() ? output : node),
420 g.UseRegister(base), g.UseRegister(index));
421 }
422}
423
424void InstructionSelectorT::VisitStoreLane(OpIndex node) { UNREACHABLE(); }
425
426void InstructionSelectorT::VisitLoadLane(OpIndex node) { UNREACHABLE(); }
427
429
430namespace {
431
432ArchOpcode GetLoadOpcode(turboshaft::MemoryRepresentation loaded_rep,
434 // NOTE: The meaning of `loaded_rep` = `MemoryRepresentation::AnyTagged()` is
435 // we are loading a compressed tagged field, while `result_rep` =
436 // `RegisterRepresentation::Tagged()` refers to an uncompressed tagged value.
437 switch (loaded_rep) {
440 return kLoong64Ld_b;
443 return kLoong64Ld_bu;
446 return kLoong64Ld_h;
449 return kLoong64Ld_hu;
453 return kLoong64Ld_w;
455 case MemoryRepresentation::Uint64():
457 return kLoong64Ld_d;
462 return kLoong64Fld_s;
465 return kLoong64Fld_d;
466#ifdef V8_COMPRESS_POINTERS
468 case MemoryRepresentation::TaggedPointer():
469 if (result_rep == RegisterRepresentation::Compressed()) {
470 return kLoong64Ld_wu;
471 }
473 return kLoong64LoadDecompressTagged;
475 if (result_rep == RegisterRepresentation::Compressed()) {
476 return kLoong64Ld_wu;
477 }
479 return kLoong64LoadDecompressTaggedSigned;
480#else
482 case MemoryRepresentation::TaggedPointer():
483 case MemoryRepresentation::TaggedSigned():
484 DCHECK_EQ(result_rep, RegisterRepresentation::Tagged());
485 return kLoong64Ld_d;
486#endif
488 case MemoryRepresentation::UncompressedTaggedPointer():
489 case MemoryRepresentation::UncompressedTaggedSigned():
490 DCHECK_EQ(result_rep, RegisterRepresentation::Tagged());
491 return kLoong64Ld_d;
494 return kLoong64LoadDecompressProtected;
496 UNREACHABLE();
498 return kLoong64LoadDecodeSandboxedPointer;
499 case MemoryRepresentation::Simd128(): // Fall through.
500 case MemoryRepresentation::Simd256():
501 UNREACHABLE();
502 }
503}
504
505ArchOpcode GetStoreOpcode(turboshaft::MemoryRepresentation stored_rep) {
506 switch (stored_rep) {
508 case MemoryRepresentation::Uint8():
509 return kLoong64St_b;
511 case MemoryRepresentation::Uint16():
512 return kLoong64St_h;
514 case MemoryRepresentation::Uint32():
515 return kLoong64St_w;
517 case MemoryRepresentation::Uint64():
518 return kLoong64St_d;
522 return kLoong64Fst_s;
524 return kLoong64Fst_d;
526 case MemoryRepresentation::TaggedPointer():
527 case MemoryRepresentation::TaggedSigned():
528 return kLoong64StoreCompressTagged;
530 case MemoryRepresentation::UncompressedTaggedPointer():
531 case MemoryRepresentation::UncompressedTaggedSigned():
532 return kLoong64St_d;
534 // We never store directly to protected pointers from generated code.
535 UNREACHABLE();
537 return kLoong64StoreIndirectPointer;
539 return kLoong64StoreEncodeSandboxedPointer;
541 case MemoryRepresentation::Simd256():
542 UNREACHABLE();
543 }
544}
545
546} // namespace
547
549 auto load = this->load_view(node);
550 InstructionCode opcode = kArchNop;
551
552 opcode = GetLoadOpcode(load.ts_loaded_rep(), load.ts_result_rep());
553
554 bool traps_on_null;
555 if (load.is_protected(&traps_on_null)) {
556 if (traps_on_null) {
558 } else {
560 }
561 }
562
563 EmitLoad(this, node, opcode);
564}
565
566void InstructionSelectorT::VisitProtectedLoad(OpIndex node) { VisitLoad(node); }
567
568void InstructionSelectorT::VisitStorePair(OpIndex node) { UNREACHABLE(); }
569
570void InstructionSelectorT::VisitStore(OpIndex node) {
571 Loong64OperandGeneratorT g(this);
572 TurboshaftAdapter::StoreView store_view = this->store_view(node);
575 OpIndex index = this->value(store_view.index());
576 OpIndex value = store_view.value();
577
578 WriteBarrierKind write_barrier_kind =
581
582 if (v8_flags.enable_unconditional_write_barriers &&
584 write_barrier_kind = kFullWriteBarrier;
585 }
586
587 // TODO(loong64): I guess this could be done in a better way.
588 if (write_barrier_kind != kNoWriteBarrier &&
589 !v8_flags.disable_write_barriers) {
591 AddressingMode addressing_mode;
592 InstructionOperand inputs[4];
593 size_t input_count = 0;
594 inputs[input_count++] = g.UseUniqueRegister(base);
595 // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
596 // must check kArithmeticImm as well as kLoadStoreImm64.
597 if (g.CanBeImmediate(index, kLoong64Add_d)) {
598 inputs[input_count++] = g.UseImmediate(index);
599 addressing_mode = kMode_MRI;
600 } else {
601 inputs[input_count++] = g.UseUniqueRegister(index);
602 addressing_mode = kMode_MRR;
603 }
604 inputs[input_count++] = g.UseUniqueRegister(value);
605 RecordWriteMode record_write_mode =
606 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
609 DCHECK_EQ(write_barrier_kind, kIndirectPointerWriteBarrier);
610 // In this case we need to add the IndirectPointerTag as additional input.
611 code = kArchStoreIndirectWithWriteBarrier;
613 inputs[input_count++] = g.UseImmediate64(static_cast<int64_t>(tag));
614 } else {
615 code = kArchStoreWithWriteBarrier;
616 }
617 code |= AddressingModeField::encode(addressing_mode);
618 code |= RecordWriteModeField::encode(record_write_mode);
621 }
622 Emit(code, 0, nullptr, input_count, inputs);
623 return;
624 }
625
627 code = GetStoreOpcode(store_view.ts_stored_rep());
628
629 std::optional<ExternalReference> external_base;
630 {
631 ExternalReference value;
632 if (this->MatchExternalConstant(base, &value)) {
633 external_base = value;
634 }
635 }
636
637 std::optional<int64_t> constant_index;
638 if (store_view.index().valid()) {
639 OpIndex index = this->value(store_view.index());
640 constant_index = g.GetOptionalIntegerConstant(index);
641 }
642 if (external_base.has_value() && constant_index.has_value() &&
643 CanAddressRelativeToRootsRegister(*external_base)) {
644 ptrdiff_t const delta =
645 *constant_index +
647 isolate(), *external_base);
648 // Check that the delta is a 32-bit integer due to the limitations of
649 // immediate operands.
650 if (is_int32(delta)) {
651 Emit(code | AddressingModeField::encode(kMode_Root), g.NoOutput(),
652 g.UseImmediate(static_cast<int32_t>(delta)),
653 g.UseRegisterOrImmediateZero(value));
654 return;
655 }
656 }
657
658 if (this->is_load_root_register(base)) {
659 // This will only work if {index} is a constant.
660 Emit(code | AddressingModeField::encode(kMode_Root), g.NoOutput(),
661 g.UseImmediate(index), g.UseRegisterOrImmediateZero(value));
662 return;
663 }
664
667 } else if (store_view.access_kind() ==
670 }
671
672 if (g.CanBeImmediate(index, code)) {
673 Emit(code | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
674 g.UseRegister(base), g.UseImmediate(index),
675 g.UseRegisterOrImmediateZero(value));
676 } else {
677 Emit(code | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
678 g.UseRegister(base), g.UseRegister(index),
679 g.UseRegisterOrImmediateZero(value));
680 }
681}
682
683void InstructionSelectorT::VisitProtectedStore(OpIndex node) {
684 VisitStore(node);
685}
686
687void InstructionSelectorT::VisitWord32And(turboshaft::OpIndex node) {
688 // TODO(LOONG_dev): May could be optimized like in Turbofan.
689 VisitBinop(this, node, kLoong64And32, true, kLoong64And32);
690}
691
692void InstructionSelectorT::VisitWord64And(OpIndex node) {
693 // TODO(LOONG_dev): May could be optimized like in Turbofan.
694 VisitBinop(this, node, kLoong64And, true, kLoong64And);
695}
696
697void InstructionSelectorT::VisitWord32Or(OpIndex node) {
698 VisitBinop(this, node, kLoong64Or32, true, kLoong64Or32);
699}
700
701void InstructionSelectorT::VisitWord64Or(OpIndex node) {
702 VisitBinop(this, node, kLoong64Or, true, kLoong64Or);
703}
704
705void InstructionSelectorT::VisitWord32Xor(OpIndex node) {
706 // TODO(LOONG_dev): May could be optimized like in Turbofan.
707 VisitBinop(this, node, kLoong64Xor32, true, kLoong64Xor32);
708}
709
710void InstructionSelectorT::VisitWord64Xor(OpIndex node) {
711 // TODO(LOONG_dev): May could be optimized like in Turbofan.
712 VisitBinop(this, node, kLoong64Xor, true, kLoong64Xor);
713}
714
715void InstructionSelectorT::VisitWord32Shl(OpIndex node) {
716 // TODO(LOONG_dev): May could be optimized like in Turbofan.
717 VisitRRO(this, kLoong64Sll_w, node);
718}
719
720void InstructionSelectorT::VisitWord32Shr(OpIndex node) {
721 VisitRRO(this, kLoong64Srl_w, node);
722}
723
724void InstructionSelectorT::VisitWord32Sar(turboshaft::OpIndex node) {
725 // TODO(LOONG_dev): May could be optimized like in Turbofan.
726 VisitRRO(this, kLoong64Sra_w, node);
727}
728
729void InstructionSelectorT::VisitWord64Shl(OpIndex node) {
730 const ShiftOp& shift_op = this->Get(node).template Cast<ShiftOp>();
731 const Operation& lhs = this->Get(shift_op.left());
732 const Operation& rhs = this->Get(shift_op.right());
733 if ((lhs.Is<Opmask::kChangeInt32ToInt64>() ||
735 rhs.Is<Opmask::kWord32Constant>()) {
736 int64_t shift_by = rhs.Cast<ConstantOp>().signed_integral();
737 if (base::IsInRange(shift_by, 32, 63) && CanCover(node, shift_op.left())) {
738 Loong64OperandGeneratorT g(this);
739 // There's no need to sign/zero-extend to 64-bit if we shift out the
740 // upper 32 bits anyway.
741 Emit(kLoong64Sll_d, g.DefineAsRegister(node),
742 g.UseRegister(lhs.Cast<ChangeOp>().input()),
743 g.UseImmediate(shift_by));
744 return;
745 }
746 }
747 VisitRRO(this, kLoong64Sll_d, node);
748}
749
750void InstructionSelectorT::VisitWord64Shr(OpIndex node) {
751 // TODO(LOONG_dev): May could be optimized like in Turbofan.
752 VisitRRO(this, kLoong64Srl_d, node);
753}
754
755void InstructionSelectorT::VisitWord64Sar(OpIndex node) {
756 if (TryEmitExtendingLoad(this, node, node)) return;
757
758 // Select Sbfx(x, imm, 32-imm) for Word64Sar(ChangeInt32ToInt64(x), imm)
759 // where possible
760 const ShiftOp& shiftop = Get(node).Cast<ShiftOp>();
761 const Operation& lhs = Get(shiftop.left());
762
763 int64_t constant_rhs;
764 if (lhs.Is<Opmask::kChangeInt32ToInt64>() &&
765 MatchIntegralWord64Constant(shiftop.right(), &constant_rhs) &&
766 is_uint5(constant_rhs) && CanCover(node, shiftop.left())) {
767 OpIndex input = lhs.Cast<ChangeOp>().input();
768 if (!Get(input).Is<LoadOp>() || !CanCover(shiftop.left(), input)) {
769 Loong64OperandGeneratorT g(this);
770 int right = static_cast<int>(constant_rhs);
771 Emit(kLoong64Sra_w, g.DefineAsRegister(node), g.UseRegister(input),
772 g.UseImmediate(right));
773 return;
774 }
775 }
776
777 VisitRRO(this, kLoong64Sra_d, node);
778}
779
780void InstructionSelectorT::VisitWord32Rol(OpIndex node) { UNREACHABLE(); }
781
782void InstructionSelectorT::VisitWord64Rol(OpIndex node) { UNREACHABLE(); }
783
784void InstructionSelectorT::VisitWord32Ror(OpIndex node) {
785 VisitRRO(this, kLoong64Rotr_w, node);
786}
787
788void InstructionSelectorT::VisitWord64Ror(OpIndex node) {
789 VisitRRO(this, kLoong64Rotr_d, node);
790}
791
792void InstructionSelectorT::VisitWord32ReverseBits(OpIndex node) {
793 UNREACHABLE();
794}
795
796void InstructionSelectorT::VisitWord64ReverseBits(OpIndex node) {
797 UNREACHABLE();
798}
799
800void InstructionSelectorT::VisitWord32ReverseBytes(OpIndex node) {
801 VisitRR(this, kLoong64ByteSwap32, node);
802}
803
804void InstructionSelectorT::VisitWord64ReverseBytes(OpIndex node) {
805 VisitRR(this, kLoong64ByteSwap64, node);
806}
807
808void InstructionSelectorT::VisitSimd128ReverseBytes(OpIndex node) {
809 UNREACHABLE();
810}
811
812void InstructionSelectorT::VisitWord32Clz(OpIndex node) {
813 VisitRR(this, kLoong64Clz_w, node);
814}
815
816void InstructionSelectorT::VisitWord64Clz(OpIndex node) {
817 VisitRR(this, kLoong64Clz_d, node);
818}
819
820void InstructionSelectorT::VisitWord32Ctz(OpIndex node) { UNREACHABLE(); }
821
822void InstructionSelectorT::VisitWord64Ctz(OpIndex node) { UNREACHABLE(); }
823
824void InstructionSelectorT::VisitWord32Popcnt(OpIndex node) { UNREACHABLE(); }
825
826void InstructionSelectorT::VisitWord64Popcnt(OpIndex node) { UNREACHABLE(); }
827
828void InstructionSelectorT::VisitInt32Add(OpIndex node) {
829 // TODO(LOONG_dev): May could be optimized like in Turbofan.
830 VisitBinop(this, node, kLoong64Add_w, true, kLoong64Add_w);
831}
832
833void InstructionSelectorT::VisitInt64Add(OpIndex node) {
834 // TODO(LOONG_dev): May could be optimized like in Turbofan.
835 VisitBinop(this, node, kLoong64Add_d, true, kLoong64Add_d);
836}
837
838void InstructionSelectorT::VisitInt32Sub(OpIndex node) {
839 VisitBinop(this, node, kLoong64Sub_w);
840}
841
842void InstructionSelectorT::VisitInt64Sub(OpIndex node) {
843 VisitBinop(this, node, kLoong64Sub_d);
844}
845
846void InstructionSelectorT::VisitInt32Mul(OpIndex node) {
847 // TODO(LOONG_dev): May could be optimized like in Turbofan.
848 VisitBinop(this, node, kLoong64Mul_w, true, kLoong64Mul_w);
849}
850
851void InstructionSelectorT::VisitInt32MulHigh(OpIndex node) {
852 VisitRRR(this, kLoong64Mulh_w, node);
853}
854
855void InstructionSelectorT::VisitInt64MulHigh(OpIndex node) {
856 VisitRRR(this, kLoong64Mulh_d, node);
857}
858
859void InstructionSelectorT::VisitUint32MulHigh(OpIndex node) {
860 VisitRRR(this, kLoong64Mulh_wu, node);
861}
862
863void InstructionSelectorT::VisitUint64MulHigh(OpIndex node) {
864 VisitRRR(this, kLoong64Mulh_du, node);
865}
866
867void InstructionSelectorT::VisitInt64Mul(OpIndex node) {
868 // TODO(LOONG_dev): May could be optimized like in Turbofan.
869 VisitBinop(this, node, kLoong64Mul_d, true, kLoong64Mul_d);
870}
871
872void InstructionSelectorT::VisitInt32Div(OpIndex node) {
873 Loong64OperandGeneratorT g(this);
874
875 auto [left, right] = Inputs<WordBinopOp>(node);
876 Emit(kLoong64Div_w, g.DefineSameAsFirst(node), g.UseRegister(left),
877 g.UseRegister(right));
878}
879
880void InstructionSelectorT::VisitUint32Div(OpIndex node) {
881 Loong64OperandGeneratorT g(this);
882
883 auto [left, right] = Inputs<WordBinopOp>(node);
884 Emit(kLoong64Div_wu, g.DefineSameAsFirst(node), g.UseRegister(left),
885 g.UseRegister(right));
886}
887
888void InstructionSelectorT::VisitInt32Mod(OpIndex node) {
889 Loong64OperandGeneratorT g(this);
890
891 auto [left, right] = Inputs<WordBinopOp>(node);
892 Emit(kLoong64Mod_w, g.DefineSameAsFirst(node), g.UseRegister(left),
893 g.UseRegister(right));
894}
895
896void InstructionSelectorT::VisitUint32Mod(OpIndex node) {
897 VisitRRR(this, kLoong64Mod_wu, node);
898}
899
900void InstructionSelectorT::VisitInt64Div(OpIndex node) {
901 Loong64OperandGeneratorT g(this);
902
903 auto [left, right] = Inputs<WordBinopOp>(node);
904 Emit(kLoong64Div_d, g.DefineSameAsFirst(node), g.UseRegister(left),
905 g.UseRegister(right));
906}
907
908void InstructionSelectorT::VisitUint64Div(OpIndex node) {
909 Loong64OperandGeneratorT g(this);
910
911 auto [left, right] = Inputs<WordBinopOp>(node);
912 Emit(kLoong64Div_du, g.DefineSameAsFirst(node), g.UseRegister(left),
913 g.UseRegister(right));
914}
915
916void InstructionSelectorT::VisitInt64Mod(OpIndex node) {
917 VisitRRR(this, kLoong64Mod_d, node);
918}
919
920void InstructionSelectorT::VisitUint64Mod(OpIndex node) {
921 VisitRRR(this, kLoong64Mod_du, node);
922}
923
924void InstructionSelectorT::VisitChangeFloat32ToFloat64(OpIndex node) {
925 VisitRR(this, kLoong64Float32ToFloat64, node);
926}
927
928void InstructionSelectorT::VisitRoundInt32ToFloat32(OpIndex node) {
929 VisitRR(this, kLoong64Int32ToFloat32, node);
930}
931
932void InstructionSelectorT::VisitRoundUint32ToFloat32(OpIndex node) {
933 VisitRR(this, kLoong64Uint32ToFloat32, node);
934}
935
936void InstructionSelectorT::VisitChangeInt32ToFloat64(OpIndex node) {
937 VisitRR(this, kLoong64Int32ToFloat64, node);
938}
939
940void InstructionSelectorT::VisitChangeInt64ToFloat64(OpIndex node) {
941 VisitRR(this, kLoong64Int64ToFloat64, node);
942}
943
944void InstructionSelectorT::VisitChangeUint32ToFloat64(OpIndex node) {
945 VisitRR(this, kLoong64Uint32ToFloat64, node);
946}
947
948void InstructionSelectorT::VisitTruncateFloat32ToInt32(OpIndex node) {
949 Loong64OperandGeneratorT g(this);
950
951 const Operation& op = this->Get(node);
952 InstructionCode opcode = kLoong64Float32ToInt32;
953 opcode |=
955 Emit(opcode, g.DefineAsRegister(node),
956 g.UseRegister(this->input_at(node, 0)));
957}
958
959void InstructionSelectorT::VisitTruncateFloat32ToUint32(OpIndex node) {
960 Loong64OperandGeneratorT g(this);
961
962 const Operation& op = this->Get(node);
963 InstructionCode opcode = kLoong64Float32ToUint32;
965 opcode |= MiscField::encode(true);
966 }
967
968 Emit(opcode, g.DefineAsRegister(node),
969 g.UseRegister(this->input_at(node, 0)));
970}
971
972void InstructionSelectorT::VisitChangeFloat64ToInt32(OpIndex node) {
973 VisitRR(this, kLoong64Float64ToInt32, node);
974}
975
976void InstructionSelectorT::VisitChangeFloat64ToInt64(OpIndex node) {
977 VisitRR(this, kLoong64Float64ToInt64, node);
978}
979
980void InstructionSelectorT::VisitChangeFloat64ToUint32(OpIndex node) {
981 VisitRR(this, kLoong64Float64ToUint32, node);
982}
983
984void InstructionSelectorT::VisitChangeFloat64ToUint64(OpIndex node) {
985 VisitRR(this, kLoong64Float64ToUint64, node);
986}
987
988void InstructionSelectorT::VisitTruncateFloat64ToUint32(OpIndex node) {
989 VisitRR(this, kLoong64Float64ToUint32, node);
990}
991
992void InstructionSelectorT::VisitTruncateFloat64ToInt64(OpIndex node) {
993 Loong64OperandGeneratorT g(this);
994 InstructionCode opcode = kLoong64Float64ToInt64;
995 const Operation& op = this->Get(node);
997 opcode |= MiscField::encode(true);
998 }
999
1000 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(op.input(0)));
1001}
1002
1003void InstructionSelectorT::VisitTruncateFloat64ToFloat16RawBits(OpIndex node) {
1004 UNIMPLEMENTED();
1005}
1006
1007void InstructionSelectorT::VisitChangeFloat16RawBitsToFloat64(OpIndex node) {
1008 UNIMPLEMENTED();
1009}
1010
1011void InstructionSelectorT::VisitTryTruncateFloat32ToInt64(OpIndex node) {
1012 Loong64OperandGeneratorT g(this);
1013
1014 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
1015 InstructionOperand outputs[2];
1016 size_t output_count = 0;
1017 outputs[output_count++] = g.DefineAsRegister(node);
1018
1019 OptionalOpIndex success_output = FindProjection(node, 1);
1020 if (success_output.valid()) {
1021 outputs[output_count++] = g.DefineAsRegister(success_output.value());
1022 }
1023
1024 Emit(kLoong64Float32ToInt64, output_count, outputs, 1, inputs);
1025}
1026
1027void InstructionSelectorT::VisitTryTruncateFloat64ToInt64(OpIndex node) {
1028 Loong64OperandGeneratorT g(this);
1029
1030 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
1031 InstructionOperand outputs[2];
1032 size_t output_count = 0;
1033 outputs[output_count++] = g.DefineAsRegister(node);
1034
1035 OptionalOpIndex success_output = FindProjection(node, 1);
1036 if (success_output.valid()) {
1037 outputs[output_count++] = g.DefineAsRegister(success_output.value());
1038 }
1039
1040 Emit(kLoong64Float64ToInt64, output_count, outputs, 1, inputs);
1041}
1042
1043void InstructionSelectorT::VisitTryTruncateFloat32ToUint64(OpIndex node) {
1044 Loong64OperandGeneratorT g(this);
1045
1046 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
1047 InstructionOperand outputs[2];
1048 size_t output_count = 0;
1049 outputs[output_count++] = g.DefineAsRegister(node);
1050
1051 OptionalOpIndex success_output = FindProjection(node, 1);
1052 if (success_output.valid()) {
1053 outputs[output_count++] = g.DefineAsRegister(success_output.value());
1054 }
1055
1056 Emit(kLoong64Float32ToUint64, output_count, outputs, 1, inputs);
1057}
1058
1059void InstructionSelectorT::VisitTryTruncateFloat64ToUint64(OpIndex node) {
1060 Loong64OperandGeneratorT g(this);
1061
1062 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
1063 InstructionOperand outputs[2];
1064 size_t output_count = 0;
1065 outputs[output_count++] = g.DefineAsRegister(node);
1066
1067 OptionalOpIndex success_output = FindProjection(node, 1);
1068 if (success_output.valid()) {
1069 outputs[output_count++] = g.DefineAsRegister(success_output.value());
1070 }
1071
1072 Emit(kLoong64Float64ToUint64, output_count, outputs, 1, inputs);
1073}
1074
1075void InstructionSelectorT::VisitTryTruncateFloat64ToInt32(OpIndex node) {
1076 Loong64OperandGeneratorT g(this);
1077
1078 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
1079 InstructionOperand outputs[2];
1080 size_t output_count = 0;
1081 outputs[output_count++] = g.DefineAsRegister(node);
1082
1083 OptionalOpIndex success_output = FindProjection(node, 1);
1084 if (success_output.valid()) {
1085 outputs[output_count++] = g.DefineAsRegister(success_output.value());
1086 }
1087
1088 Emit(kLoong64Float64ToInt32, output_count, outputs, 1, inputs);
1089}
1090
1091void InstructionSelectorT::VisitTryTruncateFloat64ToUint32(OpIndex node) {
1092 Loong64OperandGeneratorT g(this);
1093
1094 InstructionOperand inputs[] = {g.UseRegister(this->input_at(node, 0))};
1095 InstructionOperand outputs[2];
1096 size_t output_count = 0;
1097 outputs[output_count++] = g.DefineAsRegister(node);
1098
1099 OptionalOpIndex success_output = FindProjection(node, 1);
1100 if (success_output.valid()) {
1101 outputs[output_count++] = g.DefineAsRegister(success_output.value());
1102 }
1103
1104 Emit(kLoong64Float64ToUint32, output_count, outputs, 1, inputs);
1105}
1106
1107void InstructionSelectorT::VisitBitcastWord32ToWord64(OpIndex node) {
1110 EmitIdentity(node);
1111}
1112
1113void InstructionSelectorT::VisitChangeInt32ToInt64(OpIndex node) {
1114 Loong64OperandGeneratorT g(this);
1115 const ChangeOp& change_op = this->Get(node).template Cast<ChangeOp>();
1116 const Operation& input_op = this->Get(change_op.input());
1117 if (input_op.Is<LoadOp>() && CanCover(node, change_op.input())) {
1118 // Generate sign-extending load.
1119 LoadRepresentation load_rep =
1120 this->load_view(change_op.input()).loaded_rep();
1121 MachineRepresentation rep = load_rep.representation();
1122 InstructionCode opcode = kArchNop;
1123 switch (rep) {
1124 case MachineRepresentation::kBit: // Fall through.
1126 opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
1127 break;
1129 opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
1130 break;
1135 opcode = kLoong64Ld_w;
1136 break;
1137 default:
1138 UNREACHABLE();
1139 }
1140 EmitLoad(this, change_op.input(), opcode, node);
1141 return;
1142 } else if (input_op.Is<Opmask::kWord32ShiftRightArithmetic>() &&
1143 CanCover(node, change_op.input())) {
1144 // TODO(LOONG_dev): May also optimize 'TruncateInt64ToInt32' here.
1145 EmitIdentity(node);
1146 }
1147 Emit(kLoong64Sll_w, g.DefineAsRegister(node),
1148 g.UseRegister(change_op.input()), g.TempImmediate(0));
1149}
1150
1151bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis(OpIndex node) {
1152 DCHECK(!this->Get(node).Is<PhiOp>());
1153 const Operation& op = this->Get(node);
1154 switch (op.opcode) {
1155 // Comparisons only emit 0/1, so the upper 32 bits must be zero.
1156 case Opcode::kComparison:
1157 return op.Cast<ComparisonOp>().rep == RegisterRepresentation::Word32();
1158 case Opcode::kOverflowCheckedBinop:
1159 return op.Cast<OverflowCheckedBinopOp>().rep ==
1161 case Opcode::kLoad: {
1162 auto load = this->load_view(node);
1163 LoadRepresentation load_rep = load.loaded_rep();
1164 if (load_rep.IsUnsigned()) {
1165 switch (load_rep.representation()) {
1166 case MachineRepresentation::kBit: // Fall through.
1167 case MachineRepresentation::kWord8: // Fall through.
1169 return true;
1170 default:
1171 return false;
1172 }
1173 }
1174 return false;
1175 }
1176 default:
1177 return false;
1178 }
1179}
1180
1181void InstructionSelectorT::VisitChangeUint32ToUint64(OpIndex node) {
1182 Loong64OperandGeneratorT g(this);
1183 const ChangeOp& change_op = this->Get(node).template Cast<ChangeOp>();
1184 OpIndex input = change_op.input();
1185 const Operation& input_op = this->Get(input);
1186
1187 if (input_op.Is<LoadOp>() && CanCover(node, input)) {
1188 // Generate zero-extending load.
1189 LoadRepresentation load_rep = this->load_view(input).loaded_rep();
1190 if (load_rep.IsUnsigned() &&
1191 load_rep.representation() == MachineRepresentation::kWord32) {
1192 EmitLoad(this, input, kLoong64Ld_wu, node);
1193 return;
1194 }
1195 }
1196 if (ZeroExtendsWord32ToWord64(input)) {
1197 EmitIdentity(node);
1198 return;
1199 }
1200 Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node), g.UseRegister(input),
1201 g.TempImmediate(0), g.TempImmediate(32));
1202}
1203
1204void InstructionSelectorT::VisitTruncateInt64ToInt32(OpIndex node) {
1205 Loong64OperandGeneratorT g(this);
1206 auto value = input_at(node, 0);
1207 if (CanCover(node, value)) {
1209 auto shift_value = input_at(value, 1);
1210 if (CanCover(value, input_at(value, 0)) &&
1211 TryEmitExtendingLoad(this, value, node)) {
1212 return;
1213 } else if (int64_t constant;
1214 MatchSignedIntegralConstant(shift_value, &constant)) {
1215 if (constant >= 32 && constant <= 63) {
1216 // After smi untagging no need for truncate. Combine sequence.
1217 Emit(kLoong64Sra_d, g.DefineAsRegister(node),
1218 g.UseRegister(input_at(value, 0)), g.UseImmediate(constant));
1219 return;
1220 }
1221 }
1222 }
1223 }
1224 Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(value),
1225 g.TempImmediate(0));
1226}
1227
1228void InstructionSelectorT::VisitTruncateFloat64ToFloat32(OpIndex node) {
1229 VisitRR(this, kLoong64Float64ToFloat32, node);
1230}
1231
1232void InstructionSelectorT::VisitTruncateFloat64ToWord32(OpIndex node) {
1233 VisitRR(this, kArchTruncateDoubleToI, node);
1234}
1235
1236void InstructionSelectorT::VisitRoundFloat64ToInt32(OpIndex node) {
1237 VisitRR(this, kLoong64Float64ToInt32, node);
1238}
1239
1240void InstructionSelectorT::VisitRoundInt64ToFloat32(OpIndex node) {
1241 VisitRR(this, kLoong64Int64ToFloat32, node);
1242}
1243
1244void InstructionSelectorT::VisitRoundInt64ToFloat64(OpIndex node) {
1245 VisitRR(this, kLoong64Int64ToFloat64, node);
1246}
1247
1248void InstructionSelectorT::VisitRoundUint64ToFloat32(OpIndex node) {
1249 VisitRR(this, kLoong64Uint64ToFloat32, node);
1250}
1251
1252void InstructionSelectorT::VisitRoundUint64ToFloat64(OpIndex node) {
1253 VisitRR(this, kLoong64Uint64ToFloat64, node);
1254}
1255
1256void InstructionSelectorT::VisitBitcastFloat32ToInt32(OpIndex node) {
1257 VisitRR(this, kLoong64Float64ExtractLowWord32, node);
1258}
1259
1260void InstructionSelectorT::VisitBitcastFloat64ToInt64(OpIndex node) {
1261 VisitRR(this, kLoong64BitcastDL, node);
1262}
1263
1264void InstructionSelectorT::VisitBitcastInt32ToFloat32(OpIndex node) {
1265 // when move lower 32 bits of general registers to 64-bit fpu registers on
1266 // LoongArch64, the upper 32 bits of the fpu register is undefined. So we
1267 // could just move the whole 64 bits to fpu registers.
1268 VisitRR(this, kLoong64BitcastLD, node);
1269}
1270
1271void InstructionSelectorT::VisitBitcastInt64ToFloat64(OpIndex node) {
1272 VisitRR(this, kLoong64BitcastLD, node);
1273}
1274
1275void InstructionSelectorT::VisitFloat32Add(OpIndex node) {
1276 VisitRRR(this, kLoong64Float32Add, node);
1277}
1278
1279void InstructionSelectorT::VisitFloat64Add(OpIndex node) {
1280 VisitRRR(this, kLoong64Float64Add, node);
1281}
1282
1283void InstructionSelectorT::VisitFloat32Sub(OpIndex node) {
1284 VisitRRR(this, kLoong64Float32Sub, node);
1285}
1286
1287void InstructionSelectorT::VisitFloat64Sub(OpIndex node) {
1288 VisitRRR(this, kLoong64Float64Sub, node);
1289}
1290
1291void InstructionSelectorT::VisitFloat32Mul(OpIndex node) {
1292 VisitRRR(this, kLoong64Float32Mul, node);
1293}
1294
1295void InstructionSelectorT::VisitFloat64Mul(OpIndex node) {
1296 VisitRRR(this, kLoong64Float64Mul, node);
1297}
1298
1299void InstructionSelectorT::VisitFloat32Div(OpIndex node) {
1300 VisitRRR(this, kLoong64Float32Div, node);
1301}
1302
1303void InstructionSelectorT::VisitFloat64Div(OpIndex node) {
1304 VisitRRR(this, kLoong64Float64Div, node);
1305}
1306
1307void InstructionSelectorT::VisitFloat64Mod(OpIndex node) {
1308 Loong64OperandGeneratorT g(this);
1309 Emit(kLoong64Float64Mod, g.DefineAsFixed(node, f0),
1310 g.UseFixed(this->input_at(node, 0), f0),
1311 g.UseFixed(this->input_at(node, 1), f1))
1312 ->MarkAsCall();
1313}
1314
1315void InstructionSelectorT::VisitFloat32Max(OpIndex node) {
1316 VisitRRR(this, kLoong64Float32Max, node);
1317}
1318
1319void InstructionSelectorT::VisitFloat64Max(OpIndex node) {
1320 VisitRRR(this, kLoong64Float64Max, node);
1321}
1322
1323void InstructionSelectorT::VisitFloat32Min(OpIndex node) {
1324 VisitRRR(this, kLoong64Float32Min, node);
1325}
1326
1327void InstructionSelectorT::VisitFloat64Min(OpIndex node) {
1328 VisitRRR(this, kLoong64Float64Min, node);
1329}
1330
1331void InstructionSelectorT::VisitFloat32Abs(OpIndex node) {
1332 VisitRR(this, kLoong64Float32Abs, node);
1333}
1334
1335void InstructionSelectorT::VisitFloat64Abs(OpIndex node) {
1336 VisitRR(this, kLoong64Float64Abs, node);
1337}
1338
1339void InstructionSelectorT::VisitFloat32Sqrt(OpIndex node) {
1340 VisitRR(this, kLoong64Float32Sqrt, node);
1341}
1342
1343void InstructionSelectorT::VisitFloat64Sqrt(OpIndex node) {
1344 VisitRR(this, kLoong64Float64Sqrt, node);
1345}
1346
1347void InstructionSelectorT::VisitFloat32RoundDown(OpIndex node) {
1348 VisitRR(this, kLoong64Float32RoundDown, node);
1349}
1350
1351void InstructionSelectorT::VisitFloat64RoundDown(OpIndex node) {
1352 VisitRR(this, kLoong64Float64RoundDown, node);
1353}
1354
1355void InstructionSelectorT::VisitFloat32RoundUp(OpIndex node) {
1356 VisitRR(this, kLoong64Float32RoundUp, node);
1357}
1358
1359void InstructionSelectorT::VisitFloat64RoundUp(OpIndex node) {
1360 VisitRR(this, kLoong64Float64RoundUp, node);
1361}
1362
1363void InstructionSelectorT::VisitFloat32RoundTruncate(OpIndex node) {
1364 VisitRR(this, kLoong64Float32RoundTruncate, node);
1365}
1366
1367void InstructionSelectorT::VisitFloat64RoundTruncate(OpIndex node) {
1368 VisitRR(this, kLoong64Float64RoundTruncate, node);
1369}
1370
1371void InstructionSelectorT::VisitFloat64RoundTiesAway(OpIndex node) {
1372 UNREACHABLE();
1373}
1374
1375void InstructionSelectorT::VisitFloat32RoundTiesEven(OpIndex node) {
1376 VisitRR(this, kLoong64Float32RoundTiesEven, node);
1377}
1378
1379void InstructionSelectorT::VisitFloat64RoundTiesEven(OpIndex node) {
1380 VisitRR(this, kLoong64Float64RoundTiesEven, node);
1381}
1382
1383void InstructionSelectorT::VisitFloat32Neg(OpIndex node) {
1384 VisitRR(this, kLoong64Float32Neg, node);
1385}
1386
1387void InstructionSelectorT::VisitFloat64Neg(OpIndex node) {
1388 VisitRR(this, kLoong64Float64Neg, node);
1389}
1390
1392 InstructionCode opcode) {
1393 Loong64OperandGeneratorT g(this);
1394 Emit(opcode, g.DefineAsFixed(node, f0),
1395 g.UseFixed(this->input_at(node, 0), f0),
1396 g.UseFixed(this->input_at(node, 1), f1))
1397 ->MarkAsCall();
1398}
1399
1401 InstructionCode opcode) {
1402 Loong64OperandGeneratorT g(this);
1403 Emit(opcode, g.DefineAsFixed(node, f0),
1404 g.UseFixed(this->input_at(node, 0), f0))
1405 ->MarkAsCall();
1406}
1407
1408void InstructionSelectorT::EmitMoveParamToFPR(OpIndex node, int32_t index) {
1409 OperandGenerator g(this);
1410 int count = linkage()->GetParameterLocation(index).GetLocation();
1411 InstructionOperand out_op = g.TempRegister(-count);
1412 Emit(kArchNop, out_op);
1413 Emit(kLoong64BitcastLD, g.DefineAsRegister(node), out_op);
1414}
1415
1416void InstructionSelectorT::EmitMoveFPRToParam(InstructionOperand* op,
1417 LinkageLocation location) {
1418 OperandGenerator g(this);
1419 int count = location.GetLocation();
1420 InstructionOperand new_op = g.TempRegister(-count);
1421 Emit(kLoong64BitcastDL, new_op, *op);
1422 *op = new_op;
1423}
1424
1426 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1427 OpIndex node) {
1428 Loong64OperandGeneratorT g(this);
1429
1430 // Prepare for C function call.
1431 if (call_descriptor->IsCFunctionCall()) {
1432 int gp_param_count = static_cast<int>(call_descriptor->GPParameterCount());
1433 int fp_param_count = static_cast<int>(call_descriptor->FPParameterCount());
1434 Emit(kArchPrepareCallCFunction | ParamField::encode(gp_param_count) |
1435 FPParamField::encode(fp_param_count),
1436 0, nullptr, 0, nullptr);
1437
1438 // Poke any stack arguments.
1439 int slot = 0;
1440 for (PushParameter input : (*arguments)) {
1441 Emit(kLoong64Poke, g.NoOutput(), g.UseRegister(input.node),
1442 g.TempImmediate(slot << kSystemPointerSizeLog2));
1443 ++slot;
1444 }
1445 } else {
1446 int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
1447 if (push_count > 0) {
1448 // Calculate needed space
1449 int stack_size = 0;
1450 for (PushParameter input : (*arguments)) {
1451 if (input.node.valid()) {
1452 stack_size += input.location.GetSizeInPointers();
1453 }
1454 }
1455 Emit(kLoong64StackClaim, g.NoOutput(),
1456 g.TempImmediate(stack_size << kSystemPointerSizeLog2));
1457 }
1458 for (size_t n = 0; n < arguments->size(); ++n) {
1459 PushParameter input = (*arguments)[n];
1460 if (input.node.valid()) {
1461 Emit(kLoong64Poke, g.NoOutput(), g.UseRegister(input.node),
1462 g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
1463 }
1464 }
1465 }
1466}
1467
1469 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1470 OpIndex node) {
1471 Loong64OperandGeneratorT g(this);
1472
1473 for (PushParameter output : *results) {
1474 if (!output.location.IsCallerFrameSlot()) continue;
1475 // Skip any alignment holes in nodes.
1476 if (output.node.valid()) {
1477 DCHECK(!call_descriptor->IsCFunctionCall());
1478 if (output.location.GetType() == MachineType::Float32()) {
1479 MarkAsFloat32(output.node);
1480 } else if (output.location.GetType() == MachineType::Float64()) {
1481 MarkAsFloat64(output.node);
1482 } else if (output.location.GetType() == MachineType::Simd128()) {
1483 abort();
1484 }
1485 int offset = call_descriptor->GetOffsetToReturns();
1486 int reverse_slot = -output.location.GetLocation() - offset;
1487 Emit(kLoong64Peek, g.DefineAsRegister(output.node),
1488 g.UseImmediate(reverse_slot));
1489 }
1490 }
1491}
1492
1494
1495void InstructionSelectorT::VisitUnalignedLoad(OpIndex node) { UNREACHABLE(); }
1496
1497void InstructionSelectorT::VisitUnalignedStore(OpIndex node) { UNREACHABLE(); }
1498
1499namespace {
1500
1501// Shared routine for multiple compare operations.
1502static Instruction* VisitCompare(InstructionSelectorT* selector,
1503 InstructionCode opcode,
1504 InstructionOperand left,
1505 InstructionOperand right,
1506 FlagsContinuationT* cont) {
1507#ifdef V8_COMPRESS_POINTERS
1508 if (opcode == kLoong64Cmp32) {
1509 Loong64OperandGeneratorT g(selector);
1510 InstructionOperand inputs[] = {left, right};
1511 if (right.IsImmediate()) {
1512 InstructionOperand temps[1] = {g.TempRegister()};
1513 return selector->EmitWithContinuation(opcode, 0, nullptr,
1514 arraysize(inputs), inputs,
1515 arraysize(temps), temps, cont);
1516 } else {
1517 InstructionOperand temps[2] = {g.TempRegister(), g.TempRegister()};
1518 return selector->EmitWithContinuation(opcode, 0, nullptr,
1519 arraysize(inputs), inputs,
1520 arraysize(temps), temps, cont);
1521 }
1522 }
1523#endif
1524 return selector->EmitWithContinuation(opcode, left, right, cont);
1525}
1526
1527// Shared routine for multiple float32 compare operations.
1528void VisitFloat32Compare(InstructionSelectorT* selector, OpIndex node,
1529 FlagsContinuationT* cont) {
1530 Loong64OperandGeneratorT g(selector);
1531 const ComparisonOp& op = selector->Get(node).template Cast<ComparisonOp>();
1532 OpIndex left = op.left();
1533 OpIndex right = op.right();
1534 InstructionOperand lhs, rhs;
1535
1536 lhs = selector->MatchZero(left) ? g.UseImmediate(left) : g.UseRegister(left);
1537 rhs =
1538 selector->MatchZero(right) ? g.UseImmediate(right) : g.UseRegister(right);
1539 VisitCompare(selector, kLoong64Float32Cmp, lhs, rhs, cont);
1540}
1541
1542// Shared routine for multiple float64 compare operations.
1543void VisitFloat64Compare(InstructionSelectorT* selector, OpIndex node,
1544 FlagsContinuationT* cont) {
1545 Loong64OperandGeneratorT g(selector);
1546 const Operation& compare = selector->Get(node);
1547 DCHECK(compare.Is<ComparisonOp>());
1548 OpIndex lhs = compare.input(0);
1549 OpIndex rhs = compare.input(1);
1550 if (selector->MatchZero(rhs)) {
1551 VisitCompare(selector, kLoong64Float64Cmp, g.UseRegister(lhs),
1552 g.UseImmediate(rhs), cont);
1553 } else if (selector->MatchZero(lhs)) {
1554 VisitCompare(selector, kLoong64Float64Cmp, g.UseImmediate(lhs),
1555 g.UseRegister(rhs), cont);
1556 } else {
1557 VisitCompare(selector, kLoong64Float64Cmp, g.UseRegister(lhs),
1558 g.UseRegister(rhs), cont);
1559 }
1560}
1561
1562// Shared routine for multiple word compare operations.
1563void VisitWordCompare(InstructionSelectorT* selector, OpIndex node,
1564 InstructionCode opcode, FlagsContinuationT* cont,
1565 bool commutative) {
1566 Loong64OperandGeneratorT g(selector);
1567 DCHECK_EQ(selector->value_input_count(node), 2);
1568 auto left = selector->input_at(node, 0);
1569 auto right = selector->input_at(node, 1);
1570
1571 // Match immediates on left or right side of comparison.
1572 if (g.CanBeImmediate(right, opcode)) {
1573 if (opcode == kLoong64Tst) {
1574 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1575 cont);
1576 } else {
1577 switch (cont->condition()) {
1578 case kEqual:
1579 case kNotEqual:
1580 if (cont->IsSet()) {
1581 VisitCompare(selector, opcode, g.UseUniqueRegister(left),
1582 g.UseImmediate(right), cont);
1583 } else {
1584 VisitCompare(selector, opcode, g.UseUniqueRegister(left),
1585 g.UseImmediate(right), cont);
1586 }
1587 break;
1588 case kSignedLessThan:
1591 case kSignedGreaterThan:
1592 case kUnsignedLessThan:
1596 VisitCompare(selector, opcode, g.UseUniqueRegister(left),
1597 g.UseImmediate(right), cont);
1598 break;
1599 default:
1600 UNREACHABLE();
1601 }
1602 }
1603 } else if (g.CanBeImmediate(left, opcode)) {
1604 if (!commutative) cont->Commute();
1605 if (opcode == kLoong64Tst) {
1606 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1607 cont);
1608 } else {
1609 switch (cont->condition()) {
1610 case kEqual:
1611 case kNotEqual:
1612 if (cont->IsSet()) {
1613 VisitCompare(selector, opcode, g.UseUniqueRegister(right),
1614 g.UseImmediate(left), cont);
1615 } else {
1616 VisitCompare(selector, opcode, g.UseUniqueRegister(right),
1617 g.UseImmediate(left), cont);
1618 }
1619 break;
1620 case kSignedLessThan:
1623 case kSignedGreaterThan:
1624 case kUnsignedLessThan:
1628 VisitCompare(selector, opcode, g.UseUniqueRegister(right),
1629 g.UseImmediate(left), cont);
1630 break;
1631 default:
1632 UNREACHABLE();
1633 }
1634 }
1635 } else {
1636 VisitCompare(selector, opcode, g.UseUniqueRegister(left),
1637 g.UseUniqueRegister(right), cont);
1638 }
1639}
1640
1641// Shared routine for multiple word compare operations.
1642void VisitFullWord32Compare(InstructionSelectorT* selector, OpIndex node,
1643 InstructionCode opcode, FlagsContinuationT* cont) {
1644 Loong64OperandGeneratorT g(selector);
1645 InstructionOperand leftOp = g.TempRegister();
1646 InstructionOperand rightOp = g.TempRegister();
1647
1648 selector->Emit(kLoong64Sll_d, leftOp,
1649 g.UseRegister(selector->input_at(node, 0)),
1650 g.TempImmediate(32));
1651 selector->Emit(kLoong64Sll_d, rightOp,
1652 g.UseRegister(selector->input_at(node, 1)),
1653 g.TempImmediate(32));
1654
1655 Instruction* instr = VisitCompare(selector, opcode, leftOp, rightOp, cont);
1656 selector->UpdateSourcePosition(instr, node);
1657}
1658
1659void VisitWord32Compare(InstructionSelectorT* selector, OpIndex node,
1660 FlagsContinuationT* cont) {
1661 VisitFullWord32Compare(selector, node, kLoong64Cmp64, cont);
1662}
1663
1664void VisitWord64Compare(InstructionSelectorT* selector, OpIndex node,
1665 FlagsContinuationT* cont) {
1666 VisitWordCompare(selector, node, kLoong64Cmp64, cont, false);
1667}
1668
1669void VisitAtomicLoad(InstructionSelectorT* selector, OpIndex node,
1670 AtomicWidth width) {
1671 using OpIndex = OpIndex;
1672 Loong64OperandGeneratorT g(selector);
1673 auto load = selector->load_view(node);
1674 OpIndex base = load.base();
1675 OpIndex index = load.index();
1676
1677 // The memory order is ignored.
1678 LoadRepresentation load_rep = load.loaded_rep();
1680 switch (load_rep.representation()) {
1682 DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
1683 code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
1684 break;
1686 DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
1687 code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
1688 break;
1690 code = (width == AtomicWidth::kWord32) ? kAtomicLoadWord32
1691 : kLoong64Word64AtomicLoadUint32;
1692 break;
1694 code = kLoong64Word64AtomicLoadUint64;
1695 break;
1696#ifdef V8_COMPRESS_POINTERS
1698 code = kLoong64AtomicLoadDecompressTaggedSigned;
1699 break;
1702 code = kLoong64AtomicLoadDecompressTagged;
1703 break;
1704#else
1705 case MachineRepresentation::kTaggedSigned: // Fall through.
1706 case MachineRepresentation::kTaggedPointer: // Fall through.
1708 code = kLoong64Word64AtomicLoadUint64;
1709 break;
1710#endif
1711 case MachineRepresentation::kCompressedPointer: // Fall through.
1714 code = kLoong64Word64AtomicLoadUint32;
1715 break;
1716 default:
1717 UNREACHABLE();
1718 }
1719
1720 bool traps_on_null;
1721 if (load.is_protected(&traps_on_null)) {
1722 // Atomic loads and null dereference are mutually exclusive. This might
1723 // change with multi-threaded wasm-gc in which case the access mode should
1724 // probably be kMemoryAccessProtectedNullDereference.
1725 DCHECK(!traps_on_null);
1727 }
1728
1729 if (g.CanBeImmediate(index, code)) {
1730 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
1732 g.DefineAsRegister(node), g.UseRegister(base),
1733 g.UseImmediate(index));
1734 } else {
1735 selector->Emit(code | AddressingModeField::encode(kMode_MRR) |
1737 g.DefineAsRegister(node), g.UseRegister(base),
1738 g.UseRegister(index));
1739 }
1740}
1741
1742AtomicStoreParameters AtomicStoreParametersOf(InstructionSelectorT* selector,
1743 OpIndex node) {
1744 auto store = selector->store_view(node);
1745 return AtomicStoreParameters(store.stored_rep().representation(),
1746 store.stored_rep().write_barrier_kind(),
1747 store.memory_order().value(),
1748 store.access_kind());
1749}
1750
1751void VisitAtomicStore(InstructionSelectorT* selector, OpIndex node,
1752 AtomicWidth width) {
1753 using OpIndex = OpIndex;
1754 Loong64OperandGeneratorT g(selector);
1755 auto store = selector->store_view(node);
1756 OpIndex base = store.base();
1757 OpIndex index = selector->value(store.index());
1758 OpIndex value = store.value();
1759 DCHECK_EQ(store.displacement(), 0);
1760
1761 // The memory order is ignored.
1762 AtomicStoreParameters store_params = AtomicStoreParametersOf(selector, node);
1763 WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
1764 MachineRepresentation rep = store_params.representation();
1765
1766 if (v8_flags.enable_unconditional_write_barriers &&
1768 write_barrier_kind = kFullWriteBarrier;
1769 }
1770
1772
1773 if (write_barrier_kind != kNoWriteBarrier &&
1774 !v8_flags.disable_write_barriers) {
1777
1778 RecordWriteMode record_write_mode =
1779 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
1780 code = kArchAtomicStoreWithWriteBarrier;
1781 code |= RecordWriteModeField::encode(record_write_mode);
1782 } else {
1783 switch (rep) {
1785 code = kAtomicStoreWord8;
1786 break;
1788 code = kAtomicStoreWord16;
1789 break;
1791 code = kAtomicStoreWord32;
1792 break;
1795 code = kLoong64Word64AtomicStoreWord64;
1796 break;
1797 case MachineRepresentation::kTaggedSigned: // Fall through.
1798 case MachineRepresentation::kTaggedPointer: // Fall through.
1801 code = kLoong64AtomicStoreCompressTagged;
1802 break;
1803 case MachineRepresentation::kCompressedPointer: // Fall through.
1807 code = kLoong64AtomicStoreCompressTagged;
1808 break;
1809 default:
1810 UNREACHABLE();
1811 }
1812 }
1813
1814 if (store_params.kind() == MemoryAccessKind::kProtectedByTrapHandler) {
1816 }
1817
1818 if (g.CanBeImmediate(index, code)) {
1819 selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
1821 g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
1822 g.UseRegisterOrImmediateZero(value));
1823 } else {
1824 selector->Emit(code | AddressingModeField::encode(kMode_MRR) |
1826 g.NoOutput(), g.UseRegister(base), g.UseRegister(index),
1827 g.UseRegisterOrImmediateZero(value));
1828 }
1829}
1830
1831void VisitAtomicExchange(InstructionSelectorT* selector, OpIndex node,
1832 ArchOpcode opcode, AtomicWidth width,
1833 MemoryAccessKind access_kind) {
1834 using OpIndex = OpIndex;
1835 Loong64OperandGeneratorT g(selector);
1836 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
1837 OpIndex base = atomic_op.base();
1838 OpIndex index = atomic_op.index();
1839 OpIndex value = atomic_op.value();
1840
1841 AddressingMode addressing_mode = kMode_MRI;
1842 InstructionOperand inputs[3];
1843 size_t input_count = 0;
1844 inputs[input_count++] = g.UseUniqueRegister(base);
1845 inputs[input_count++] = g.UseUniqueRegister(index);
1846 inputs[input_count++] = g.UseUniqueRegister(value);
1847 InstructionOperand outputs[1];
1848 outputs[0] = g.UseUniqueRegister(node);
1849 InstructionOperand temp[3];
1850 temp[0] = g.TempRegister();
1851 temp[1] = g.TempRegister();
1852 temp[2] = g.TempRegister();
1853 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
1855 if (access_kind == MemoryAccessKind::kProtectedByTrapHandler) {
1857 }
1858 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
1859}
1860
1861void VisitAtomicCompareExchange(InstructionSelectorT* selector, OpIndex node,
1862 ArchOpcode opcode, AtomicWidth width,
1863 MemoryAccessKind access_kind) {
1864 using OpIndex = OpIndex;
1865 Loong64OperandGeneratorT g(selector);
1866 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
1867 OpIndex base = atomic_op.base();
1868 OpIndex index = atomic_op.index();
1869 OpIndex old_value = atomic_op.expected().value();
1870 OpIndex new_value = atomic_op.value();
1871
1872 AddressingMode addressing_mode = kMode_MRI;
1873 InstructionOperand inputs[4];
1874 size_t input_count = 0;
1875 inputs[input_count++] = g.UseUniqueRegister(base);
1876 inputs[input_count++] = g.UseUniqueRegister(index);
1877 inputs[input_count++] = g.UseUniqueRegister(old_value);
1878 inputs[input_count++] = g.UseUniqueRegister(new_value);
1879 InstructionOperand outputs[1];
1880 outputs[0] = g.UseUniqueRegister(node);
1881 InstructionOperand temp[3];
1882 temp[0] = g.TempRegister();
1883 temp[1] = g.TempRegister();
1884 temp[2] = g.TempRegister();
1885 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
1887 if (access_kind == MemoryAccessKind::kProtectedByTrapHandler) {
1889 }
1890 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
1891}
1892
1893void VisitAtomicBinop(InstructionSelectorT* selector, OpIndex node,
1894 ArchOpcode opcode, AtomicWidth width,
1895 MemoryAccessKind access_kind) {
1896 using OpIndex = OpIndex;
1897 Loong64OperandGeneratorT g(selector);
1898 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
1899 OpIndex base = atomic_op.base();
1900 OpIndex index = atomic_op.index();
1901 OpIndex value = atomic_op.value();
1902
1903 AddressingMode addressing_mode = kMode_MRI;
1904 InstructionOperand inputs[3];
1905 size_t input_count = 0;
1906 inputs[input_count++] = g.UseUniqueRegister(base);
1907 inputs[input_count++] = g.UseUniqueRegister(index);
1908 inputs[input_count++] = g.UseUniqueRegister(value);
1909 InstructionOperand outputs[1];
1910 outputs[0] = g.UseUniqueRegister(node);
1911 InstructionOperand temps[4];
1912 temps[0] = g.TempRegister();
1913 temps[1] = g.TempRegister();
1914 temps[2] = g.TempRegister();
1915 temps[3] = g.TempRegister();
1916 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
1918 if (access_kind == MemoryAccessKind::kProtectedByTrapHandler) {
1920 }
1921 selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
1922}
1923
1924} // namespace
1925
1927 OpIndex node, FlagsContinuationT* cont) {
1929 OpIndex value;
1930 const auto& op = this->turboshaft_graph()
1931 ->Get(node)
1933 kind = op.kind;
1934 value = op.stack_limit();
1936 kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
1937
1938 Loong64OperandGeneratorT g(this);
1939
1940 // No outputs.
1941 InstructionOperand* const outputs = nullptr;
1942 const int output_count = 0;
1943
1944 // TempRegister(0) is used to store the comparison result.
1945 // Applying an offset to this stack check requires a temp register. Offsets
1946 // are only applied to the first stack check. If applying an offset, we must
1947 // ensure the input and temp registers do not alias, thus kUniqueRegister.
1948 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
1949 const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1);
1950 const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
1953
1954 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
1955 static constexpr int input_count = arraysize(inputs);
1956
1957 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
1958 temp_count, temps, cont);
1959}
1960
1961// Shared routine for word comparisons against zero.
1963 FlagsContinuation* cont) {
1964 {
1965 Loong64OperandGeneratorT g(this);
1966 // Try to combine with comparisons against 0 by simply inverting the branch.
1967 while (const ComparisonOp* equal =
1968 this->TryCast<Opmask::kWord32Equal>(value)) {
1969 if (!CanCover(user, value)) break;
1970 if (!MatchIntegralZero(equal->right())) break;
1971 user = value;
1972 value = equal->left();
1973 cont->Negate();
1974 }
1975
1976 const Operation& value_op = Get(value);
1977 if (CanCover(user, value)) {
1978 if (const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
1979 switch (comparison->rep.value()) {
1981 cont->OverwriteAndNegateIfEqual(
1982 GetComparisonFlagCondition(*comparison));
1983 return VisitWord32Compare(this, value, cont);
1984
1986 cont->OverwriteAndNegateIfEqual(
1987 GetComparisonFlagCondition(*comparison));
1988 return VisitWord64Compare(this, value, cont);
1989
1991 switch (comparison->kind) {
1992 case ComparisonOp::Kind::kEqual:
1993 cont->OverwriteAndNegateIfEqual(kEqual);
1994 return VisitFloat32Compare(this, value, cont);
1995 case ComparisonOp::Kind::kSignedLessThan:
1996 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
1997 return VisitFloat32Compare(this, value, cont);
1998 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1999 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
2000 return VisitFloat32Compare(this, value, cont);
2001 default:
2002 UNREACHABLE();
2003 }
2004
2006 switch (comparison->kind) {
2007 case ComparisonOp::Kind::kEqual:
2008 cont->OverwriteAndNegateIfEqual(kEqual);
2009 return VisitFloat64Compare(this, value, cont);
2010 case ComparisonOp::Kind::kSignedLessThan:
2011 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
2012 return VisitFloat64Compare(this, value, cont);
2013 case ComparisonOp::Kind::kSignedLessThanOrEqual:
2014 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
2015 return VisitFloat64Compare(this, value, cont);
2016 default:
2017 UNREACHABLE();
2018 }
2019
2020 default:
2021 break;
2022 }
2023 } else if (const ProjectionOp* projection =
2024 value_op.TryCast<ProjectionOp>()) {
2025 // Check if this is the overflow output projection of an
2026 // <Operation>WithOverflow node.
2027 if (projection->index == 1u) {
2028 // We cannot combine the <Operation>WithOverflow with this branch
2029 // unless the 0th projection (the use of the actual value of the
2030 // <Operation> is either nullptr, which means there's no use of the
2031 // actual value, or was already defined, which means it is scheduled
2032 // *AFTER* this branch).
2033 OpIndex node = projection->input();
2034 OptionalOpIndex result = FindProjection(node, 0);
2035 if (!result.valid() || IsDefined(result.value())) {
2036 if (const OverflowCheckedBinopOp* binop =
2038 const bool is64 = binop->rep == WordRepresentation::Word64();
2039 switch (binop->kind) {
2040 case OverflowCheckedBinopOp::Kind::kSignedAdd:
2041 cont->OverwriteAndNegateIfEqual(kOverflow);
2042 return VisitBinop(this, node,
2043 is64 ? kLoong64AddOvf_d : kLoong64Add_d,
2044 cont);
2045 case OverflowCheckedBinopOp::Kind::kSignedSub:
2046 cont->OverwriteAndNegateIfEqual(kOverflow);
2047 return VisitBinop(this, node,
2048 is64 ? kLoong64SubOvf_d : kLoong64Sub_d,
2049 cont);
2050 case OverflowCheckedBinopOp::Kind::kSignedMul:
2051 cont->OverwriteAndNegateIfEqual(kOverflow);
2052 return VisitBinop(this, node,
2053 is64 ? kLoong64MulOvf_d : kLoong64MulOvf_w,
2054 cont);
2055 }
2056 }
2057 }
2058 }
2059 } else if (value_op.Is<Opmask::kWord32BitwiseAnd>() ||
2060 value_op.Is<Opmask::kWord64BitwiseAnd>()) {
2061 return VisitWordCompare(this, value, kLoong64Tst, cont, true);
2062 } else if (value_op.Is<StackPointerGreaterThanOp>()) {
2063 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
2064 return VisitStackPointerGreaterThan(value, cont);
2065 }
2066 }
2067
2068 // Continuation could not be combined with a compare, emit compare against
2069 // 0.
2070 VisitCompare(this, kLoong64Cmp32, g.UseRegister(value), g.TempImmediate(0),
2071 cont);
2072 }
2073}
2074
2075void InstructionSelectorT::VisitSwitch(OpIndex node, const SwitchInfo& sw) {
2076 Loong64OperandGeneratorT g(this);
2077 InstructionOperand value_operand = g.UseRegister(this->input_at(node, 0));
2078
2079 // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
2080 if (enable_switch_jump_table_ ==
2081 InstructionSelector::kEnableSwitchJumpTable) {
2082 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2083 size_t table_space_cost = 10 + 2 * sw.value_range();
2084 size_t table_time_cost = 3;
2085 size_t lookup_space_cost = 2 + 2 * sw.case_count();
2086 size_t lookup_time_cost = sw.case_count();
2087 if (sw.case_count() > 0 &&
2088 table_space_cost + 3 * table_time_cost <=
2089 lookup_space_cost + 3 * lookup_time_cost &&
2090 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2091 sw.value_range() <= kMaxTableSwitchValueRange) {
2092 InstructionOperand index_operand = value_operand;
2093 if (sw.min_value()) {
2094 index_operand = g.TempRegister();
2095 Emit(kLoong64Sub_w, index_operand, value_operand,
2096 g.TempImmediate(sw.min_value()));
2097 }
2098 // Generate a table lookup.
2099 return EmitTableSwitch(sw, index_operand);
2100 }
2101 }
2102
2103 // Generate a tree of conditional jumps.
2104 return EmitBinarySearchSwitch(sw, value_operand);
2105}
2106
2107void InstructionSelectorT::VisitWord32Equal(OpIndex node) {
2108 const Operation& equal = Get(node);
2109 DCHECK(equal.Is<ComparisonOp>());
2110 OpIndex left = equal.input(0);
2111 OpIndex right = equal.input(1);
2112 OpIndex user = node;
2113 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2114
2115 if (MatchZero(right)) {
2116 return VisitWordCompareZero(user, left, &cont);
2117 }
2118
2119 if (isolate() && (V8_STATIC_ROOTS_BOOL ||
2120 (COMPRESS_POINTERS_BOOL && !isolate()->bootstrapper()))) {
2121 Loong64OperandGeneratorT g(this);
2122 const RootsTable& roots_table = isolate()->roots_table();
2123 RootIndex root_index;
2124 Handle<HeapObject> right;
2125 // HeapConstants and CompressedHeapConstants can be treated the same when
2126 // using them as an input to a 32-bit comparison. Check whether either is
2127 // present.
2128 if (MatchHeapConstant(node, &right) && !right.is_null() &&
2129 roots_table.IsRootHandle(right, &root_index)) {
2130 if (RootsTable::IsReadOnly(root_index)) {
2131 Tagged_t ptr =
2133 if (g.CanBeImmediate(ptr, kLoong64Cmp32)) {
2134 VisitCompare(this, kLoong64Cmp32, g.UseRegister(left),
2135 g.TempImmediate(int32_t(ptr)), &cont);
2136 return;
2137 }
2138 }
2139 }
2140 }
2141 VisitWord32Compare(this, node, &cont);
2142}
2143
2144void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
2145 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2146 VisitWord32Compare(this, node, &cont);
2147}
2148
2149void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
2150 FlagsContinuation cont =
2151 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2152 VisitWord32Compare(this, node, &cont);
2153}
2154
2155void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
2156 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2157 VisitWord32Compare(this, node, &cont);
2158}
2159
2160void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
2161 FlagsContinuation cont =
2162 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2163 VisitWord32Compare(this, node, &cont);
2164}
2165
2166void InstructionSelectorT::VisitInt32AddWithOverflow(OpIndex node) {
2167 OptionalOpIndex ovf = FindProjection(node, 1);
2168 if (ovf.valid() && IsUsed(ovf.value())) {
2169 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2170 return VisitBinop(this, node, kLoong64Add_d, &cont);
2171 }
2172
2173 FlagsContinuation cont;
2174 VisitBinop(this, node, kLoong64Add_d, &cont);
2175}
2176
2177void InstructionSelectorT::VisitInt32SubWithOverflow(OpIndex node) {
2178 OptionalOpIndex ovf = FindProjection(node, 1);
2179 if (ovf.valid()) {
2180 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2181 return VisitBinop(this, node, kLoong64Sub_d, &cont);
2182 }
2183
2184 FlagsContinuation cont;
2185 VisitBinop(this, node, kLoong64Sub_d, &cont);
2186}
2187
2188void InstructionSelectorT::VisitInt32MulWithOverflow(OpIndex node) {
2189 OptionalOpIndex ovf = FindProjection(node, 1);
2190 if (ovf.valid()) {
2191 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2192 return VisitBinop(this, node, kLoong64MulOvf_w, &cont);
2193 }
2194
2195 FlagsContinuation cont;
2196 VisitBinop(this, node, kLoong64MulOvf_w, &cont);
2197}
2198
2199void InstructionSelectorT::VisitInt64MulWithOverflow(OpIndex node) {
2200 OptionalOpIndex ovf = FindProjection(node, 1);
2201 if (ovf.valid()) {
2202 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2203 return VisitBinop(this, node, kLoong64MulOvf_d, &cont);
2204 }
2205
2206 FlagsContinuation cont;
2207 VisitBinop(this, node, kLoong64MulOvf_d, &cont);
2208}
2209
2210void InstructionSelectorT::VisitInt64AddWithOverflow(OpIndex node) {
2211 OptionalOpIndex ovf = FindProjection(node, 1);
2212 if (ovf.valid()) {
2213 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2214 return VisitBinop(this, node, kLoong64AddOvf_d, &cont);
2215 }
2216
2217 FlagsContinuation cont;
2218 VisitBinop(this, node, kLoong64AddOvf_d, &cont);
2219}
2220
2221void InstructionSelectorT::VisitInt64SubWithOverflow(OpIndex node) {
2222 OptionalOpIndex ovf = FindProjection(node, 1);
2223 if (ovf.valid()) {
2224 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf.value());
2225 return VisitBinop(this, node, kLoong64SubOvf_d, &cont);
2226 }
2227
2228 FlagsContinuation cont;
2229 VisitBinop(this, node, kLoong64SubOvf_d, &cont);
2230}
2231
2232void InstructionSelectorT::VisitWord64Equal(OpIndex node) {
2233 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2234 VisitWord64Compare(this, node, &cont);
2235}
2236
2237void InstructionSelectorT::VisitInt64LessThan(OpIndex node) {
2238 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2239 VisitWord64Compare(this, node, &cont);
2240}
2241
2242void InstructionSelectorT::VisitInt64LessThanOrEqual(OpIndex node) {
2243 FlagsContinuation cont =
2244 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2245 VisitWord64Compare(this, node, &cont);
2246}
2247
2248void InstructionSelectorT::VisitUint64LessThan(OpIndex node) {
2249 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2250 VisitWord64Compare(this, node, &cont);
2251}
2252
2253void InstructionSelectorT::VisitUint64LessThanOrEqual(OpIndex node) {
2254 FlagsContinuation cont =
2255 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2256 VisitWord64Compare(this, node, &cont);
2257}
2258
2259void InstructionSelectorT::VisitFloat32Equal(OpIndex node) {
2260 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2261 VisitFloat32Compare(this, node, &cont);
2262}
2263
2264void InstructionSelectorT::VisitFloat32LessThan(OpIndex node) {
2265 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2266 VisitFloat32Compare(this, node, &cont);
2267}
2268
2269void InstructionSelectorT::VisitFloat32LessThanOrEqual(OpIndex node) {
2270 FlagsContinuation cont =
2271 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2272 VisitFloat32Compare(this, node, &cont);
2273}
2274
2275void InstructionSelectorT::VisitFloat64Equal(OpIndex node) {
2276 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2277 VisitFloat64Compare(this, node, &cont);
2278}
2279
2280void InstructionSelectorT::VisitFloat64LessThan(OpIndex node) {
2281 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2282 VisitFloat64Compare(this, node, &cont);
2283}
2284
2285void InstructionSelectorT::VisitFloat64LessThanOrEqual(OpIndex node) {
2286 FlagsContinuation cont =
2287 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2288 VisitFloat64Compare(this, node, &cont);
2289}
2290
2291void InstructionSelectorT::VisitFloat64ExtractLowWord32(OpIndex node) {
2292 VisitRR(this, kLoong64Float64ExtractLowWord32, node);
2293}
2294
2295void InstructionSelectorT::VisitFloat64ExtractHighWord32(OpIndex node) {
2296 VisitRR(this, kLoong64Float64ExtractHighWord32, node);
2297}
2298
2299void InstructionSelectorT::VisitBitcastWord32PairToFloat64(OpIndex node) {
2300 Loong64OperandGeneratorT g(this);
2301 const auto& bitcast = this->Cast<BitcastWord32PairToFloat64Op>(node);
2302 OpIndex hi = bitcast.high_word32();
2303 OpIndex lo = bitcast.low_word32();
2304
2305 InstructionOperand temps[] = {g.TempRegister()};
2306 Emit(kLoong64Float64FromWord32Pair, g.DefineAsRegister(node), g.Use(hi),
2307 g.Use(lo), arraysize(temps), temps);
2308}
2309
2310void InstructionSelectorT::VisitFloat64SilenceNaN(OpIndex node) {
2311 VisitRR(this, kLoong64Float64SilenceNaN, node);
2312}
2313
2314void InstructionSelectorT::VisitFloat64InsertLowWord32(OpIndex node) {
2315 UNIMPLEMENTED();
2316}
2317
2318void InstructionSelectorT::VisitFloat64InsertHighWord32(OpIndex node) {
2319 UNIMPLEMENTED();
2320}
2321
2322void InstructionSelectorT::VisitMemoryBarrier(OpIndex node) {
2323 Loong64OperandGeneratorT g(this);
2324 Emit(kLoong64Dbar, g.NoOutput());
2325}
2326
2327void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
2328 VisitAtomicLoad(this, node, AtomicWidth::kWord32);
2329}
2330
2331void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
2332 VisitAtomicStore(this, node, AtomicWidth::kWord32);
2333}
2334
2335void InstructionSelectorT::VisitWord64AtomicLoad(OpIndex node) {
2336 VisitAtomicLoad(this, node, AtomicWidth::kWord64);
2337}
2338
2339void InstructionSelectorT::VisitWord64AtomicStore(OpIndex node) {
2340 VisitAtomicStore(this, node, AtomicWidth::kWord64);
2341}
2342
2343void InstructionSelectorT::VisitWord32AtomicExchange(OpIndex node) {
2344 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2345 ArchOpcode opcode;
2346 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2347 opcode = kAtomicExchangeInt8;
2348 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2349 opcode = kAtomicExchangeUint8;
2350 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2351 opcode = kAtomicExchangeInt16;
2352 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2353 opcode = kAtomicExchangeUint16;
2354 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2355 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2356 opcode = kAtomicExchangeWord32;
2357 } else {
2358 UNREACHABLE();
2359 }
2360 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32,
2361 atomic_op.memory_access_kind);
2362}
2363
2364void InstructionSelectorT::VisitWord64AtomicExchange(OpIndex node) {
2365 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2366 ArchOpcode opcode;
2367 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2368 opcode = kAtomicExchangeUint8;
2369 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2370 opcode = kAtomicExchangeUint16;
2371 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2372 opcode = kAtomicExchangeWord32;
2373 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2374 opcode = kLoong64Word64AtomicExchangeUint64;
2375 } else {
2376 UNREACHABLE();
2377 }
2378 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64,
2379 atomic_op.memory_access_kind);
2380}
2381
2382void InstructionSelectorT::VisitWord32AtomicCompareExchange(OpIndex node) {
2383 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2384 ArchOpcode opcode;
2385 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2386 opcode = kAtomicCompareExchangeInt8;
2387 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2388 opcode = kAtomicCompareExchangeUint8;
2389 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2390 opcode = kAtomicCompareExchangeInt16;
2391 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2392 opcode = kAtomicCompareExchangeUint16;
2393 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2394 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2395 opcode = kAtomicCompareExchangeWord32;
2396 } else {
2397 UNREACHABLE();
2398 }
2399 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32,
2400 atomic_op.memory_access_kind);
2401}
2402
2403void InstructionSelectorT::VisitWord64AtomicCompareExchange(OpIndex node) {
2404 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2405 ArchOpcode opcode;
2406 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2407 opcode = kAtomicCompareExchangeUint8;
2408 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2409 opcode = kAtomicCompareExchangeUint16;
2410 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2411 opcode = kAtomicCompareExchangeWord32;
2412 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2413 opcode = kLoong64Word64AtomicCompareExchangeUint64;
2414 } else {
2415 UNREACHABLE();
2416 }
2417 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64,
2418 atomic_op.memory_access_kind);
2419}
2420
2421void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
2422 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2423 ArchOpcode uint16_op, ArchOpcode word32_op) {
2424 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2425 ArchOpcode opcode;
2426 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2427 opcode = int8_op;
2428 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2429 opcode = uint8_op;
2430 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2431 opcode = int16_op;
2432 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2433 opcode = uint16_op;
2434 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2435 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2436 opcode = word32_op;
2437 } else {
2438 UNREACHABLE();
2439 }
2440 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32,
2441 atomic_op.memory_access_kind);
2442}
2443
2444#define VISIT_ATOMIC_BINOP(op) \
2445 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
2446 VisitWord32AtomicBinaryOperation( \
2447 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2448 kAtomic##op##Uint16, kAtomic##op##Word32); \
2449 }
2455#undef VISIT_ATOMIC_BINOP
2456
2457void InstructionSelectorT::VisitWord64AtomicBinaryOperation(
2458 OpIndex node, ArchOpcode uint8_op, ArchOpcode uint16_op,
2459 ArchOpcode uint32_op, ArchOpcode uint64_op) {
2460 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2461 ArchOpcode opcode;
2462 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2463 opcode = uint8_op;
2464 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2465 opcode = uint16_op;
2466 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2467 opcode = uint32_op;
2468 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2469 opcode = uint64_op;
2470 } else {
2471 UNREACHABLE();
2472 }
2473 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64,
2474 atomic_op.memory_access_kind);
2475}
2476
2477#define VISIT_ATOMIC_BINOP(op) \
2478 void InstructionSelectorT::VisitWord64Atomic##op(OpIndex node) { \
2479 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
2480 kAtomic##op##Uint16, kAtomic##op##Word32, \
2481 kLoong64Word64Atomic##op##Uint64); \
2482 }
2488#undef VISIT_ATOMIC_BINOP
2489
2490void InstructionSelectorT::VisitInt32AbsWithOverflow(OpIndex node) {
2491 UNREACHABLE();
2492}
2493
2494void InstructionSelectorT::VisitInt64AbsWithOverflow(OpIndex node) {
2495 UNREACHABLE();
2496}
2497
2498#define SIMD_TYPE_LIST(V) \
2499 V(F64x2) \
2500 V(F32x4) \
2501 V(I64x2) \
2502 V(I32x4) \
2503 V(I16x8) \
2504 V(I8x16)
2505
2506#define SIMD_UNOP_LIST(V) \
2507 V(F64x2Abs, kLoong64F64x2Abs) \
2508 V(F64x2Neg, kLoong64F64x2Neg) \
2509 V(F64x2Sqrt, kLoong64F64x2Sqrt) \
2510 V(F64x2Ceil, kLoong64F64x2Ceil) \
2511 V(F64x2Floor, kLoong64F64x2Floor) \
2512 V(F64x2Trunc, kLoong64F64x2Trunc) \
2513 V(F64x2NearestInt, kLoong64F64x2NearestInt) \
2514 V(I64x2Neg, kLoong64I64x2Neg) \
2515 V(I64x2BitMask, kLoong64I64x2BitMask) \
2516 V(F64x2ConvertLowI32x4S, kLoong64F64x2ConvertLowI32x4S) \
2517 V(F64x2ConvertLowI32x4U, kLoong64F64x2ConvertLowI32x4U) \
2518 V(F64x2PromoteLowF32x4, kLoong64F64x2PromoteLowF32x4) \
2519 V(F32x4SConvertI32x4, kLoong64F32x4SConvertI32x4) \
2520 V(F32x4UConvertI32x4, kLoong64F32x4UConvertI32x4) \
2521 V(F32x4Abs, kLoong64F32x4Abs) \
2522 V(F32x4Neg, kLoong64F32x4Neg) \
2523 V(F32x4Sqrt, kLoong64F32x4Sqrt) \
2524 V(F32x4Ceil, kLoong64F32x4Ceil) \
2525 V(F32x4Floor, kLoong64F32x4Floor) \
2526 V(F32x4Trunc, kLoong64F32x4Trunc) \
2527 V(F32x4NearestInt, kLoong64F32x4NearestInt) \
2528 V(F32x4DemoteF64x2Zero, kLoong64F32x4DemoteF64x2Zero) \
2529 V(I64x2Abs, kLoong64I64x2Abs) \
2530 V(I64x2SConvertI32x4Low, kLoong64I64x2SConvertI32x4Low) \
2531 V(I64x2SConvertI32x4High, kLoong64I64x2SConvertI32x4High) \
2532 V(I64x2UConvertI32x4Low, kLoong64I64x2UConvertI32x4Low) \
2533 V(I64x2UConvertI32x4High, kLoong64I64x2UConvertI32x4High) \
2534 V(I32x4SConvertF32x4, kLoong64I32x4SConvertF32x4) \
2535 V(I32x4UConvertF32x4, kLoong64I32x4UConvertF32x4) \
2536 V(I32x4Neg, kLoong64I32x4Neg) \
2537 V(I32x4SConvertI16x8Low, kLoong64I32x4SConvertI16x8Low) \
2538 V(I32x4SConvertI16x8High, kLoong64I32x4SConvertI16x8High) \
2539 V(I32x4UConvertI16x8Low, kLoong64I32x4UConvertI16x8Low) \
2540 V(I32x4UConvertI16x8High, kLoong64I32x4UConvertI16x8High) \
2541 V(I32x4Abs, kLoong64I32x4Abs) \
2542 V(I32x4BitMask, kLoong64I32x4BitMask) \
2543 V(I32x4TruncSatF64x2SZero, kLoong64I32x4TruncSatF64x2SZero) \
2544 V(I32x4TruncSatF64x2UZero, kLoong64I32x4TruncSatF64x2UZero) \
2545 V(I32x4RelaxedTruncF32x4S, kLoong64I32x4RelaxedTruncF32x4S) \
2546 V(I32x4RelaxedTruncF32x4U, kLoong64I32x4RelaxedTruncF32x4U) \
2547 V(I32x4RelaxedTruncF64x2SZero, kLoong64I32x4RelaxedTruncF64x2SZero) \
2548 V(I32x4RelaxedTruncF64x2UZero, kLoong64I32x4RelaxedTruncF64x2UZero) \
2549 V(I16x8Neg, kLoong64I16x8Neg) \
2550 V(I16x8SConvertI8x16Low, kLoong64I16x8SConvertI8x16Low) \
2551 V(I16x8SConvertI8x16High, kLoong64I16x8SConvertI8x16High) \
2552 V(I16x8UConvertI8x16Low, kLoong64I16x8UConvertI8x16Low) \
2553 V(I16x8UConvertI8x16High, kLoong64I16x8UConvertI8x16High) \
2554 V(I16x8Abs, kLoong64I16x8Abs) \
2555 V(I16x8BitMask, kLoong64I16x8BitMask) \
2556 V(I8x16Neg, kLoong64I8x16Neg) \
2557 V(I8x16Abs, kLoong64I8x16Abs) \
2558 V(I8x16Popcnt, kLoong64I8x16Popcnt) \
2559 V(I8x16BitMask, kLoong64I8x16BitMask) \
2560 V(S128Not, kLoong64S128Not) \
2561 V(I64x2AllTrue, kLoong64I64x2AllTrue) \
2562 V(I32x4AllTrue, kLoong64I32x4AllTrue) \
2563 V(I16x8AllTrue, kLoong64I16x8AllTrue) \
2564 V(I8x16AllTrue, kLoong64I8x16AllTrue) \
2565 V(V128AnyTrue, kLoong64V128AnyTrue)
2566
2567#define SIMD_SHIFT_OP_LIST(V) \
2568 V(I64x2Shl) \
2569 V(I64x2ShrS) \
2570 V(I64x2ShrU) \
2571 V(I32x4Shl) \
2572 V(I32x4ShrS) \
2573 V(I32x4ShrU) \
2574 V(I16x8Shl) \
2575 V(I16x8ShrS) \
2576 V(I16x8ShrU) \
2577 V(I8x16Shl) \
2578 V(I8x16ShrS) \
2579 V(I8x16ShrU)
2580
2581#define SIMD_BINOP_LIST(V) \
2582 V(F64x2Add, kLoong64F64x2Add) \
2583 V(F64x2Sub, kLoong64F64x2Sub) \
2584 V(F64x2Mul, kLoong64F64x2Mul) \
2585 V(F64x2Div, kLoong64F64x2Div) \
2586 V(F64x2Min, kLoong64F64x2Min) \
2587 V(F64x2Max, kLoong64F64x2Max) \
2588 V(F64x2Eq, kLoong64F64x2Eq) \
2589 V(F64x2Ne, kLoong64F64x2Ne) \
2590 V(F64x2Lt, kLoong64F64x2Lt) \
2591 V(F64x2Le, kLoong64F64x2Le) \
2592 V(F64x2RelaxedMin, kLoong64F64x2RelaxedMin) \
2593 V(F64x2RelaxedMax, kLoong64F64x2RelaxedMax) \
2594 V(I64x2Eq, kLoong64I64x2Eq) \
2595 V(I64x2Ne, kLoong64I64x2Ne) \
2596 V(I64x2Add, kLoong64I64x2Add) \
2597 V(I64x2Sub, kLoong64I64x2Sub) \
2598 V(I64x2Mul, kLoong64I64x2Mul) \
2599 V(I64x2GtS, kLoong64I64x2GtS) \
2600 V(I64x2GeS, kLoong64I64x2GeS) \
2601 V(F32x4Add, kLoong64F32x4Add) \
2602 V(F32x4Sub, kLoong64F32x4Sub) \
2603 V(F32x4Mul, kLoong64F32x4Mul) \
2604 V(F32x4Div, kLoong64F32x4Div) \
2605 V(F32x4Max, kLoong64F32x4Max) \
2606 V(F32x4Min, kLoong64F32x4Min) \
2607 V(F32x4Eq, kLoong64F32x4Eq) \
2608 V(F32x4Ne, kLoong64F32x4Ne) \
2609 V(F32x4Lt, kLoong64F32x4Lt) \
2610 V(F32x4Le, kLoong64F32x4Le) \
2611 V(F32x4RelaxedMin, kLoong64F32x4RelaxedMin) \
2612 V(F32x4RelaxedMax, kLoong64F32x4RelaxedMax) \
2613 V(I32x4Add, kLoong64I32x4Add) \
2614 V(I32x4Sub, kLoong64I32x4Sub) \
2615 V(I32x4Mul, kLoong64I32x4Mul) \
2616 V(I32x4MaxS, kLoong64I32x4MaxS) \
2617 V(I32x4MinS, kLoong64I32x4MinS) \
2618 V(I32x4MaxU, kLoong64I32x4MaxU) \
2619 V(I32x4MinU, kLoong64I32x4MinU) \
2620 V(I32x4Eq, kLoong64I32x4Eq) \
2621 V(I32x4Ne, kLoong64I32x4Ne) \
2622 V(I32x4GtS, kLoong64I32x4GtS) \
2623 V(I32x4GeS, kLoong64I32x4GeS) \
2624 V(I32x4GtU, kLoong64I32x4GtU) \
2625 V(I32x4GeU, kLoong64I32x4GeU) \
2626 V(I32x4DotI16x8S, kLoong64I32x4DotI16x8S) \
2627 V(I16x8Add, kLoong64I16x8Add) \
2628 V(I16x8AddSatS, kLoong64I16x8AddSatS) \
2629 V(I16x8AddSatU, kLoong64I16x8AddSatU) \
2630 V(I16x8Sub, kLoong64I16x8Sub) \
2631 V(I16x8SubSatS, kLoong64I16x8SubSatS) \
2632 V(I16x8SubSatU, kLoong64I16x8SubSatU) \
2633 V(I16x8Mul, kLoong64I16x8Mul) \
2634 V(I16x8MaxS, kLoong64I16x8MaxS) \
2635 V(I16x8MinS, kLoong64I16x8MinS) \
2636 V(I16x8MaxU, kLoong64I16x8MaxU) \
2637 V(I16x8MinU, kLoong64I16x8MinU) \
2638 V(I16x8Eq, kLoong64I16x8Eq) \
2639 V(I16x8Ne, kLoong64I16x8Ne) \
2640 V(I16x8GtS, kLoong64I16x8GtS) \
2641 V(I16x8GeS, kLoong64I16x8GeS) \
2642 V(I16x8GtU, kLoong64I16x8GtU) \
2643 V(I16x8GeU, kLoong64I16x8GeU) \
2644 V(I16x8RoundingAverageU, kLoong64I16x8RoundingAverageU) \
2645 V(I16x8SConvertI32x4, kLoong64I16x8SConvertI32x4) \
2646 V(I16x8UConvertI32x4, kLoong64I16x8UConvertI32x4) \
2647 V(I16x8Q15MulRSatS, kLoong64I16x8Q15MulRSatS) \
2648 V(I16x8RelaxedQ15MulRS, kLoong64I16x8RelaxedQ15MulRS) \
2649 V(I8x16Add, kLoong64I8x16Add) \
2650 V(I8x16AddSatS, kLoong64I8x16AddSatS) \
2651 V(I8x16AddSatU, kLoong64I8x16AddSatU) \
2652 V(I8x16Sub, kLoong64I8x16Sub) \
2653 V(I8x16SubSatS, kLoong64I8x16SubSatS) \
2654 V(I8x16SubSatU, kLoong64I8x16SubSatU) \
2655 V(I8x16MaxS, kLoong64I8x16MaxS) \
2656 V(I8x16MinS, kLoong64I8x16MinS) \
2657 V(I8x16MaxU, kLoong64I8x16MaxU) \
2658 V(I8x16MinU, kLoong64I8x16MinU) \
2659 V(I8x16Eq, kLoong64I8x16Eq) \
2660 V(I8x16Ne, kLoong64I8x16Ne) \
2661 V(I8x16GtS, kLoong64I8x16GtS) \
2662 V(I8x16GeS, kLoong64I8x16GeS) \
2663 V(I8x16GtU, kLoong64I8x16GtU) \
2664 V(I8x16GeU, kLoong64I8x16GeU) \
2665 V(I8x16RoundingAverageU, kLoong64I8x16RoundingAverageU) \
2666 V(I8x16SConvertI16x8, kLoong64I8x16SConvertI16x8) \
2667 V(I8x16UConvertI16x8, kLoong64I8x16UConvertI16x8) \
2668 V(S128And, kLoong64S128And) \
2669 V(S128Or, kLoong64S128Or) \
2670 V(S128Xor, kLoong64S128Xor) \
2671 V(S128AndNot, kLoong64S128AndNot)
2672
2673void InstructionSelectorT::VisitS128Const(OpIndex node) { UNIMPLEMENTED(); }
2674
2675void InstructionSelectorT::VisitS128Zero(OpIndex node) { UNIMPLEMENTED(); }
2676
2677#define SIMD_VISIT_SPLAT(Type) \
2678 void InstructionSelectorT::Visit##Type##Splat(OpIndex node) { \
2679 VisitRR(this, kLoong64##Type##Splat, node); \
2680 }
2682#undef SIMD_VISIT_SPLAT
2683
2684#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
2685 void InstructionSelectorT::Visit##Type##ExtractLane##Sign(OpIndex node) { \
2686 VisitRRI(this, kLoong64##Type##ExtractLane##Sign, node); \
2687 }
2696#undef SIMD_VISIT_EXTRACT_LANE
2697
2698#define SIMD_VISIT_REPLACE_LANE(Type) \
2699 void InstructionSelectorT::Visit##Type##ReplaceLane(OpIndex node) { \
2700 VisitRRIR(this, kLoong64##Type##ReplaceLane, node); \
2701 }
2703#undef SIMD_VISIT_REPLACE_LANE
2704
2705#define SIMD_VISIT_UNOP(Name, instruction) \
2706 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2707 VisitRR(this, instruction, node); \
2708 }
2710#undef SIMD_VISIT_UNOP
2711
2712#define SIMD_VISIT_SHIFT_OP(Name) \
2713 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2714 VisitSimdShift(this, kLoong64##Name, node); \
2715 }
2717#undef SIMD_VISIT_SHIFT_OP
2718
2719#define SIMD_VISIT_BINOP(Name, instruction) \
2720 void InstructionSelectorT::Visit##Name(OpIndex node) { \
2721 VisitRRR(this, instruction, node); \
2722 }
2724#undef SIMD_VISIT_BINOP
2725
2726void InstructionSelectorT::VisitS128Select(OpIndex node) {
2727 VisitRRRR(this, kLoong64S128Select, node);
2728}
2729
2730void InstructionSelectorT::VisitI8x16RelaxedLaneSelect(OpIndex node) {
2731 VisitS128Select(node);
2732}
2733
2734void InstructionSelectorT::VisitI16x8RelaxedLaneSelect(OpIndex node) {
2735 VisitS128Select(node);
2736}
2737
2738void InstructionSelectorT::VisitI32x4RelaxedLaneSelect(OpIndex node) {
2739 VisitS128Select(node);
2740}
2741
2742void InstructionSelectorT::VisitI64x2RelaxedLaneSelect(OpIndex node) {
2743 VisitS128Select(node);
2744}
2745
2746#define SIMD_UNIMP_OP_LIST(V) \
2747 V(F64x2Qfma) \
2748 V(F64x2Qfms) \
2749 V(F32x4Qfma) \
2750 V(F32x4Qfms) \
2751 V(I16x8DotI8x16I7x16S) \
2752 V(I32x4DotI8x16I7x16AddS)
2753
2754#define SIMD_VISIT_UNIMP_OP(Name) \
2755 void InstructionSelectorT::Visit##Name(OpIndex node) { UNIMPLEMENTED(); }
2757
2758#undef SIMD_VISIT_UNIMP_OP
2759#undef SIMD_UNIMP_OP_LIST
2760
2761#define UNIMPLEMENTED_SIMD_FP16_OP_LIST(V) \
2762 V(F16x8Splat) \
2763 V(F16x8ExtractLane) \
2764 V(F16x8ReplaceLane) \
2765 V(F16x8Abs) \
2766 V(F16x8Neg) \
2767 V(F16x8Sqrt) \
2768 V(F16x8Floor) \
2769 V(F16x8Ceil) \
2770 V(F16x8Trunc) \
2771 V(F16x8NearestInt) \
2772 V(F16x8Add) \
2773 V(F16x8Sub) \
2774 V(F16x8Mul) \
2775 V(F16x8Div) \
2776 V(F16x8Min) \
2777 V(F16x8Max) \
2778 V(F16x8Pmin) \
2779 V(F16x8Pmax) \
2780 V(F16x8Eq) \
2781 V(F16x8Ne) \
2782 V(F16x8Lt) \
2783 V(F16x8Le) \
2784 V(F16x8SConvertI16x8) \
2785 V(F16x8UConvertI16x8) \
2786 V(I16x8SConvertF16x8) \
2787 V(I16x8UConvertF16x8) \
2788 V(F32x4PromoteLowF16x8) \
2789 V(F16x8DemoteF32x4Zero) \
2790 V(F16x8DemoteF64x2Zero) \
2791 V(F16x8Qfma) \
2792 V(F16x8Qfms)
2793
2794#define SIMD_VISIT_UNIMPL_FP16_OP(Name) \
2795 void InstructionSelectorT::Visit##Name(OpIndex node) { UNIMPLEMENTED(); }
2796
2798#undef SIMD_VISIT_UNIMPL_FP16_OP
2799#undef UNIMPLEMENTED_SIMD_FP16_OP_LIST
2800
2801#if V8_ENABLE_WEBASSEMBLY
2802
2803void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) { UNIMPLEMENTED(); }
2804
2805#else
2806void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) { UNREACHABLE(); }
2807#endif // V8_ENABLE_WEBASSEMBLY
2808
2809void InstructionSelectorT::VisitI8x16Swizzle(OpIndex node) { UNIMPLEMENTED(); }
2810
2811void InstructionSelectorT::VisitSetStackPointer(OpIndex node) {
2812 OperandGenerator g(this);
2813 auto input = g.UseRegister(this->input_at(node, 0));
2814 Emit(kArchSetStackPointer, 0, nullptr, 1, &input);
2815}
2816
2817void InstructionSelectorT::VisitSignExtendWord8ToInt32(OpIndex node) {
2818 VisitRR(this, kLoong64Ext_w_b, node);
2819}
2820
2821void InstructionSelectorT::VisitSignExtendWord16ToInt32(OpIndex node) {
2822 VisitRR(this, kLoong64Ext_w_h, node);
2823}
2824
2825void InstructionSelectorT::VisitSignExtendWord8ToInt64(OpIndex node) {
2826 VisitRR(this, kLoong64Ext_w_b, node);
2827}
2828
2829void InstructionSelectorT::VisitSignExtendWord16ToInt64(OpIndex node) {
2830 VisitRR(this, kLoong64Ext_w_h, node);
2831}
2832
2833void InstructionSelectorT::VisitSignExtendWord32ToInt64(OpIndex node) {
2834 UNIMPLEMENTED();
2835}
2836
2837void InstructionSelectorT::VisitF32x4Pmin(OpIndex node) {
2838 VisitUniqueRRR(this, kLoong64F32x4Pmin, node);
2839}
2840
2841void InstructionSelectorT::VisitF32x4Pmax(OpIndex node) {
2842 VisitUniqueRRR(this, kLoong64F32x4Pmax, node);
2843}
2844
2845void InstructionSelectorT::VisitF64x2Pmin(OpIndex node) {
2846 VisitUniqueRRR(this, kLoong64F64x2Pmin, node);
2847}
2848
2849void InstructionSelectorT::VisitF64x2Pmax(OpIndex node) {
2850 VisitUniqueRRR(this, kLoong64F64x2Pmax, node);
2851}
2852
2853#define VISIT_EXT_MUL(OPCODE1, OPCODE2) \
2854 void InstructionSelectorT::Visit##OPCODE1##ExtMulLow##OPCODE2( \
2855 OpIndex node) {} \
2856 void InstructionSelectorT::Visit##OPCODE1##ExtMulHigh##OPCODE2( \
2857 OpIndex node) {}
2858
2859VISIT_EXT_MUL(I64x2, I32x4S)
2860VISIT_EXT_MUL(I64x2, I32x4U)
2861VISIT_EXT_MUL(I32x4, I16x8S)
2862VISIT_EXT_MUL(I32x4, I16x8U)
2863VISIT_EXT_MUL(I16x8, I8x16S)
2864VISIT_EXT_MUL(I16x8, I8x16U)
2865#undef VISIT_EXT_MUL
2866
2867#define VISIT_EXTADD_PAIRWISE(OPCODE) \
2868 void InstructionSelectorT::Visit##OPCODE(OpIndex node) { UNIMPLEMENTED(); }
2869VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16S)
2870VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16U)
2871VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S)
2872VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U)
2873#undef VISIT_EXTADD_PAIRWISE
2874
2875void InstructionSelectorT::AddOutputToSelectContinuation(OperandGenerator* g,
2876 int first_input_index,
2877 OpIndex node) {
2878 UNREACHABLE();
2879}
2880
2881// static
2882MachineOperatorBuilder::Flags
2883InstructionSelector::SupportedMachineOperatorFlags() {
2884 MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
2885 return flags | MachineOperatorBuilder::kWord32ShiftIsSafe |
2886 MachineOperatorBuilder::kInt32DivIsSafe |
2887 MachineOperatorBuilder::kUint32DivIsSafe |
2888 MachineOperatorBuilder::kFloat64RoundDown |
2889 MachineOperatorBuilder::kFloat32RoundDown |
2890 MachineOperatorBuilder::kFloat64RoundUp |
2891 MachineOperatorBuilder::kFloat32RoundUp |
2892 MachineOperatorBuilder::kFloat64RoundTruncate |
2893 MachineOperatorBuilder::kFloat32RoundTruncate |
2894 MachineOperatorBuilder::kFloat64RoundTiesEven |
2895 MachineOperatorBuilder::kFloat32RoundTiesEven;
2896}
2897
2898// static
2899MachineOperatorBuilder::AlignmentRequirements
2900InstructionSelector::AlignmentRequirements() {
2901 return MachineOperatorBuilder::AlignmentRequirements::
2902 FullUnalignedAccessSupport();
2903}
2904
2905#undef SIMD_BINOP_LIST
2906#undef SIMD_SHIFT_OP_LIST
2907#undef SIMD_UNOP_LIST
2908#undef SIMD_TYPE_LIST
2909#undef TRACE
2910
2911} // namespace compiler
2912} // namespace internal
2913} // namespace v8
Builtins::Kind kind
Definition builtins.cc:40
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr U encode(T value)
Definition bit-field.h:55
RootsTable & roots_table()
Definition isolate.h:1250
static constexpr MachineType Float64()
constexpr MachineRepresentation representation() const
static constexpr MachineType Simd128()
static constexpr MachineType Float32()
Tagged_t ReadOnlyRootPtr(RootIndex index)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
bool IsRootHandle(IndirectHandle< T > handle, RootIndex *index) const
Definition roots-inl.h:65
static constexpr bool IsReadOnly(RootIndex root_index)
Definition roots.h:623
int AllocateSpillSlot(int width, int alignment=0, bool is_tagged=false)
Definition frame.h:138
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
FlagsCondition GetComparisonFlagCondition(const turboshaft::ComparisonOp &op) const
void EmitPrepareArguments(ZoneVector< PushParameter > *arguments, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
void EmitMoveFPRToParam(InstructionOperand *op, LinkageLocation location)
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void VisitLoadTransform(Node *node, Node *value, InstructionCode opcode)
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
bool CanAddressRelativeToRootsRegister(const ExternalReference &reference) const
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
void EmitMoveParamToFPR(turboshaft::OpIndex node, int index)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
void EmitPrepareResults(ZoneVector< PushParameter > *results, const CallDescriptor *call_descriptor, turboshaft::OpIndex node)
LinkageLocation GetParameterLocation(int index) const
Definition linkage.h:444
bool CanBeImmediate(OpIndex node, InstructionCode mode)
std::optional< int64_t > GetOptionalIntegerConstant(OpIndex operation)
bool CanBeImmediate(int64_t value, InstructionCode opcode)
InstructionOperand UseOperand(OpIndex node, InstructionCode opcode)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand UseUniqueRegister(turboshaft::OpIndex node)
InstructionOperand UseRegister(turboshaft::OpIndex node)
MachineRepresentation representation() const
V8_INLINE const Operation & Get(OpIndex i) const
Definition graph.h:618
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation Float16()
static constexpr MemoryRepresentation AnyUncompressedTagged()
static constexpr MemoryRepresentation Uint32()
static constexpr MemoryRepresentation TaggedSigned()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Int64()
static constexpr MemoryRepresentation Simd128()
static constexpr MemoryRepresentation SandboxedPointer()
static constexpr MemoryRepresentation Uint16()
static constexpr MemoryRepresentation ProtectedPointer()
static constexpr MemoryRepresentation Uint8()
static constexpr MemoryRepresentation Int16()
static constexpr MemoryRepresentation IndirectPointer()
static constexpr MemoryRepresentation Uint64()
static constexpr MemoryRepresentation Float32()
static constexpr MemoryRepresentation Float64()
const Operation & Get(V< AnyOrNone > op_idx) const
const underlying_operation_t< Op > * TryCast(V< AnyOrNone > op_idx) const
bool MatchExternalConstant(V< Any > matched, ExternalReference *reference) const
bool MatchIntegralWord64Constant(V< Any > matched, uint64_t *constant) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Float32()
static constexpr RegisterRepresentation Word64()
static constexpr RegisterRepresentation Tagged()
Handle< Code > code
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
Isolate * isolate
int64_t immediate_
InstructionSelectorT * selector_
ArchOpcode opcode_
#define SIMD_SHIFT_OP_LIST(V)
#define VISIT_ATOMIC_BINOP(op)
int32_t offset
#define SIMD_VISIT_SHIFT_OP(Name)
#define SIMD_VISIT_UNIMPL_FP16_OP(Name)
#define VISIT_EXT_MUL(OPCODE1, OPCODE2)
#define SIMD_VISIT_SPLAT(Type)
#define VISIT_EXTADD_PAIRWISE(OPCODE)
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign)
#define SIMD_VISIT_UNIMP_OP(Name)
#define SIMD_VISIT_REPLACE_LANE(Type)
#define SIMD_UNIMP_OP_LIST(V)
#define SIMD_VISIT_UNOP(Name, instruction)
#define SIMD_VISIT_BINOP(Name, instruction)
#define SIMD_TYPE_LIST(V)
#define UNIMPLEMENTED_SIMD_FP16_OP_LIST(V)
Node * node
Instruction * instr
ZoneVector< RpoNumber > & result
#define TRACE(...)
#define SIMD_UNOP_LIST(V)
#define SIMD_BINOP_LIST(V)
int m
Definition mul-fft.cc:294
int n
Definition mul-fft.cc:296
int int32_t
Definition unicode.cc:40
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float64(), RegisterRepresentation::Word64()> kTruncateFloat64ToInt64OverflowToMin
Definition opmasks.h:276
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word64()> kWord64BitwiseAnd
Definition opmasks.h:171
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word32()> kWord32BitwiseAnd
Definition opmasks.h:159
ConstantMask::For< ConstantOp::Kind::kWord32 > kWord32Constant
Definition opmasks.h:242
ChangeOpMask::For< ChangeOp::Kind::kSignExtend, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Word32(), RegisterRepresentation::Word64()> kChangeInt32ToInt64
Definition opmasks.h:267
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToInt32OverflowToMin
Definition opmasks.h:281
ChangeOpMask::For< ChangeOp::Kind::kZeroExtend, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Word32(), RegisterRepresentation::Word64()> kChangeUint32ToUint64
Definition opmasks.h:270
ChangeOpMask::For< ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToUint32OverflowToMin
Definition opmasks.h:286
ShiftMask::For< ShiftOp::Kind::kShiftRightArithmetic, WordRepresentation::Word32()> kWord32ShiftRightArithmetic
Definition opmasks.h:216
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
WordWithBits< 256 > Simd256
Definition index.h:237
void VisitAtomicBinop(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode, AtomicWidth width)
static void VisitRRIR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
static void VisitRRO(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitRRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRRI(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
AtomicStoreParameters const & AtomicStoreParametersOf(Operator const *op)
bool TryEmitExtendingLoad(InstructionSelectorT *selector, OpIndex node, OpIndex output_node)
size_t AtomicWidthSize(AtomicWidth width)
static Instruction * VisitCompare(InstructionSelectorT *selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuationT *cont)
static void VisitSimdShift(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
void EmitLoad(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, ImmediateMode immediate_mode, MachineRepresentation rep, OptionalOpIndex output={})
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
static constexpr FlagsCondition kStackPointerGreaterThanCondition
bool TryMatchImmediate(InstructionSelectorT *selector, InstructionCode *opcode_return, OpIndex node, size_t *input_count_return, InstructionOperand *inputs)
static void VisitUniqueRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int kTaggedSize
Definition globals.h:542
constexpr bool CanBeTaggedOrCompressedOrIndirectPointer(MachineRepresentation rep)
switch(set_by_)
Definition flags.cc:3669
Tagged(T object) -> Tagged< T >
constexpr bool CanBeTaggedOrCompressedPointer(MachineRepresentation rep)
Address Tagged_t
Definition globals.h:547
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
constexpr int U
constexpr int S
constexpr bool SmiValuesAre31Bits()
constexpr bool CanBeTaggedPointer(MachineRepresentation rep)
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
Operation
Definition operation.h:43
uint32_t compare
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define arraysize(array)
Definition macros.h:67
ExtendingLoadMatcher(OpIndex node, InstructionSelectorT *selector)
bool is_load_root_register(turboshaft::OpIndex node) const
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
V8_INLINE OpIndex input(size_t i) const
Definition operations.h:959
underlying_operation_t< Op > & Cast()
Definition operations.h:980
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001