v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
instruction-selector-ppc.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <optional>
6
7#include "src/base/iterator.h"
11#include "src/roots/roots-inl.h"
12
13namespace v8 {
14namespace internal {
15namespace compiler {
16
17using namespace turboshaft; // NOLINT(build/namespaces)
18
29
30// Adds PPC-specific methods for generating operands.
32 public:
35
37 if (CanBeImmediate(node, mode)) {
38 return UseImmediate(node);
39 }
40 return UseRegister(node);
41 }
42
44 const ConstantOp* constant = selector()->Get(node).TryCast<ConstantOp>();
45 if (!constant) return false;
46 if (constant->kind == ConstantOp::Kind::kCompressedHeapObject) {
47 if (!COMPRESS_POINTERS_BOOL) return false;
48 // For builtin code we need static roots
49 if (selector()->isolate()->bootstrapper() && !V8_STATIC_ROOTS_BOOL) {
50 return false;
51 }
52 const RootsTable& roots_table = selector()->isolate()->roots_table();
53 RootIndex root_index;
54 Handle<HeapObject> value = constant->handle();
55 if (roots_table.IsRootHandle(value, &root_index)) {
56 if (!RootsTable::IsReadOnly(root_index)) return false;
58 root_index, selector()->isolate()),
59 mode);
60 }
61 return false;
62 }
63
64 int64_t value;
65 if (!selector()->MatchSignedIntegralConstant(node, &value)) return false;
66 return CanBeImmediate(value, mode);
67 }
68
69 bool CanBeImmediate(int64_t value, ImmediateMode mode) {
70 switch (mode) {
71 case kInt16Imm:
72 return is_int16(value);
74 return is_uint16(value);
76 return is_int16(-value);
78 return is_int16(value) && !(value & 3);
79 case kShift32Imm:
80 return 0 <= value && value < 32;
81 case kInt34Imm:
82 return is_int34(value);
83 case kShift64Imm:
84 return 0 <= value && value < 64;
85 case kNoImmediate:
86 return false;
87 }
88 return false;
89 }
90};
91
92namespace {
93
94void VisitRR(InstructionSelectorT* selector, InstructionCode opcode,
95 OpIndex node) {
96 PPCOperandGeneratorT g(selector);
97 selector->Emit(opcode, g.DefineAsRegister(node),
98 g.UseRegister(selector->input_at(node, 0)));
99}
100
101void VisitRRR(InstructionSelectorT* selector, InstructionCode opcode,
102 OpIndex node) {
103 PPCOperandGeneratorT g(selector);
104 selector->Emit(opcode, g.DefineAsRegister(node),
105 g.UseRegister(selector->input_at(node, 0)),
106 g.UseRegister(selector->input_at(node, 1)));
107}
108
109void VisitRRO(InstructionSelectorT* selector, InstructionCode opcode,
110 OpIndex node, ImmediateMode operand_mode) {
111 PPCOperandGeneratorT g(selector);
112 selector->Emit(opcode, g.DefineAsRegister(node),
113 g.UseRegister(selector->input_at(node, 0)),
114 g.UseOperand(selector->input_at(node, 1), operand_mode));
115}
116
117void VisitTryTruncateDouble(InstructionSelectorT* selector,
118 InstructionCode opcode, OpIndex node) {
119 PPCOperandGeneratorT g(selector);
120 InstructionOperand inputs[] = {g.UseRegister(selector->input_at(node, 0))};
121 InstructionOperand outputs[2];
122 size_t output_count = 0;
123 outputs[output_count++] = g.DefineAsRegister(node);
124
125 OptionalOpIndex success_output = selector->FindProjection(node, 1);
126 if (success_output.valid()) {
127 outputs[output_count++] = g.DefineAsRegister(success_output.value());
128 }
129
130 selector->Emit(opcode, output_count, outputs, 1, inputs);
131}
132
133// Shared routine for multiple binary operations.
134void VisitBinop(InstructionSelectorT* selector, OpIndex node,
135 InstructionCode opcode, ImmediateMode operand_mode,
136 FlagsContinuationT* cont) {
137 PPCOperandGeneratorT g(selector);
138 InstructionOperand inputs[4];
139 size_t input_count = 0;
140 InstructionOperand outputs[2];
141 size_t output_count = 0;
142
143 inputs[input_count++] = g.UseRegister(selector->input_at(node, 0));
144 inputs[input_count++] =
145 g.UseOperand(selector->input_at(node, 1), operand_mode);
146
147 if (cont->IsDeoptimize()) {
148 // If we can deoptimize as a result of the binop, we need to make sure that
149 // the deopt inputs are not overwritten by the binop result. One way
150 // to achieve that is to declare the output register as same-as-first.
151 outputs[output_count++] = g.DefineSameAsFirst(node);
152 } else {
153 outputs[output_count++] = g.DefineAsRegister(node);
154 }
155
156 DCHECK_NE(0u, input_count);
157 DCHECK_NE(0u, output_count);
158 DCHECK_GE(arraysize(inputs), input_count);
159 DCHECK_GE(arraysize(outputs), output_count);
160
161 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
162 inputs, cont);
163}
164
165// Shared routine for multiple binary operations.
166void VisitBinop(InstructionSelectorT* selector, OpIndex node,
167 InstructionCode opcode, ImmediateMode operand_mode) {
168 FlagsContinuationT cont;
169 VisitBinop(selector, node, opcode, operand_mode, &cont);
170}
171
172} // namespace
173
174void InstructionSelectorT::VisitStackSlot(OpIndex node) {
175 const StackSlotOp& stack_slot = Cast<StackSlotOp>(node);
176 int slot = frame_->AllocateSpillSlot(stack_slot.size, stack_slot.alignment,
177 stack_slot.is_tagged);
178 OperandGenerator g(this);
179
180 Emit(kArchStackSlot, g.DefineAsRegister(node),
181 sequence()->AddImmediate(Constant(slot)), 0, nullptr);
182}
183
184void InstructionSelectorT::VisitAbortCSADcheck(OpIndex node) {
185 PPCOperandGeneratorT g(this);
186 Emit(kArchAbortCSADcheck, g.NoOutput(),
187 g.UseFixed(this->input_at(node, 0), r4));
188}
189
191 RegisterRepresentation result_rep,
192 ImmediateMode* mode) {
193 // NOTE: The meaning of `loaded_rep` = `MemoryRepresentation::AnyTagged()` is
194 // we are loading a compressed tagged field, while `result_rep` =
195 // `RegisterRepresentation::Tagged()` refers to an uncompressed tagged value.
196 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
197 *mode = kInt34Imm;
198 } else {
199 *mode = kInt16Imm;
200 }
201 switch (loaded_rep) {
204 return kPPC_LoadWordS8;
207 return kPPC_LoadWordU8;
210 return kPPC_LoadWordS16;
213 return kPPC_LoadWordU16;
217 return kPPC_LoadWordU32;
221 if (*mode != kInt34Imm) *mode = kInt16Imm_4ByteAligned;
222 return kPPC_LoadWord64;
227 return kPPC_LoadFloat32;
230 return kPPC_LoadDouble;
231#ifdef V8_COMPRESS_POINTERS
234 if (result_rep == RegisterRepresentation::Compressed()) {
235 if (*mode != kInt34Imm) *mode = kInt16Imm_4ByteAligned;
236 return kPPC_LoadWordS32;
237 }
239 return kPPC_LoadDecompressTagged;
241 if (result_rep == RegisterRepresentation::Compressed()) {
242 if (*mode != kInt34Imm) *mode = kInt16Imm_4ByteAligned;
243 return kPPC_LoadWordS32;
244 }
246 return kPPC_LoadDecompressTaggedSigned;
247#else
248 USE(result_rep);
253 if (*mode != kInt34Imm) *mode = kInt16Imm_4ByteAligned;
254 return kPPC_LoadWord64;
255#endif
260 if (*mode != kInt34Imm) *mode = kInt16Imm_4ByteAligned;
261 return kPPC_LoadWord64;
263 return kPPC_LoadDecodeSandboxedPointer;
266 // Vectors do not support MRI mode, only MRR is available.
267 *mode = kNoImmediate;
268 return kPPC_LoadSimd128;
272 UNREACHABLE();
273 }
274}
275
277 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
278 *mode = kInt34Imm;
279 } else {
280 *mode = kInt16Imm;
281 }
282 switch (load_rep.representation()) {
284 return kPPC_LoadFloat32;
286 return kPPC_LoadDouble;
287 case MachineRepresentation::kBit: // Fall through.
289 return load_rep.IsSigned() ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
291 return load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
293 return kPPC_LoadWordU32;
294 case MachineRepresentation::kCompressedPointer: // Fall through.
296#ifdef V8_COMPRESS_POINTERS
297 if (*mode != kInt34Imm) *mode = kInt16Imm_4ByteAligned;
298 return kPPC_LoadWordS32;
299#else
300 UNREACHABLE();
301#endif
303 UNREACHABLE();
305 return kPPC_LoadDecodeSandboxedPointer;
306#ifdef V8_COMPRESS_POINTERS
308 return kPPC_LoadDecompressTaggedSigned;
310 return kPPC_LoadDecompressTagged;
312 return kPPC_LoadDecompressTagged;
313#else
314 case MachineRepresentation::kTaggedSigned: // Fall through.
315 case MachineRepresentation::kTaggedPointer: // Fall through.
316 case MachineRepresentation::kTagged: // Fall through.
317#endif
319 if (*mode != kInt34Imm) *mode = kInt16Imm_4ByteAligned;
320 return kPPC_LoadWord64;
322 // Vectors do not support MRI mode, only MRR is available.
323 *mode = kNoImmediate;
324 return kPPC_LoadSimd128;
327 case MachineRepresentation::kProtectedPointer: // Fall through.
328 case MachineRepresentation::kSimd256: // Fall through.
329 case MachineRepresentation::kMapWord: // Fall through.
330 case MachineRepresentation::kFloat16RawBits: // Fall through.
332 UNREACHABLE();
333 }
334}
335
336static void VisitLoadCommon(InstructionSelectorT* selector, OpIndex node,
337 ImmediateMode mode, InstructionCode opcode) {
338 PPCOperandGeneratorT g(selector);
339 auto load_view = selector->load_view(node);
340 OpIndex base = load_view.base();
341 OpIndex offset = load_view.index();
342
343 bool is_atomic = load_view.is_atomic();
344
345 if (selector->is_load_root_register(base)) {
346 selector->Emit(opcode |= AddressingModeField::encode(kMode_Root),
348 g.UseImmediate(is_atomic));
349 } else if (g.CanBeImmediate(offset, mode)) {
350 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
352 g.UseImmediate(offset), g.UseImmediate(is_atomic));
353 } else if (g.CanBeImmediate(base, mode)) {
354 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
356 g.UseImmediate(base), g.UseImmediate(is_atomic));
357 } else {
358 selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
360 g.UseRegister(offset), g.UseImmediate(is_atomic));
361 }
362}
363
365 TurboshaftAdapter::LoadView load_view = this->load_view(node);
368 load_view.ts_result_rep(), &mode);
369 VisitLoadCommon(this, node, mode, opcode);
370}
371
372void InstructionSelectorT::VisitProtectedLoad(OpIndex node) {
373 // TODO(eholk)
375}
376
378 StoreRepresentation store_rep,
379 std::optional<AtomicMemoryOrder> atomic_order) {
380 PPCOperandGeneratorT g(selector);
381 auto store_view = selector->store_view(node);
382 OpIndex base = store_view.base();
383 OpIndex offset = selector->value(store_view.index());
384 OpIndex value = store_view.value();
385 bool is_atomic = store_view.is_atomic();
386
387 MachineRepresentation rep = store_rep.representation();
388 WriteBarrierKind write_barrier_kind = kNoWriteBarrier;
389
390 if (!is_atomic) {
391 write_barrier_kind = store_rep.write_barrier_kind();
392 }
393
394 if (v8_flags.enable_unconditional_write_barriers &&
396 write_barrier_kind = kFullWriteBarrier;
397 }
398
399 if (write_barrier_kind != kNoWriteBarrier &&
400 !v8_flags.disable_write_barriers) {
402 // Uncompressed stores should not happen if we need a write barrier.
403 CHECK((store_view.ts_stored_rep() !=
405 (store_view.ts_stored_rep() !=
407 (store_view.ts_stored_rep() !=
409 AddressingMode addressing_mode;
410 InstructionOperand inputs[4];
411 size_t input_count = 0;
412 inputs[input_count++] = g.UseUniqueRegister(base);
413 // OutOfLineRecordWrite uses the offset in an 'add' instruction as well as
414 // for the store itself, so we must check compatibility with both.
417 ) {
418 inputs[input_count++] = g.UseImmediate(offset);
419 addressing_mode = kMode_MRI;
420 } else {
421 inputs[input_count++] = g.UseUniqueRegister(offset);
422 addressing_mode = kMode_MRR;
423 }
424 inputs[input_count++] = g.UseUniqueRegister(value);
425 RecordWriteMode record_write_mode =
426 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
427 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
428 size_t const temp_count = arraysize(temps);
431 DCHECK_EQ(write_barrier_kind, kIndirectPointerWriteBarrier);
432 // In this case we need to add the IndirectPointerTag as additional input.
433 code = kArchStoreIndirectWithWriteBarrier;
434 IndirectPointerTag tag = store_view.indirect_pointer_tag();
435 inputs[input_count++] = g.UseImmediate(static_cast<int64_t>(tag));
436 } else {
437 code = kArchStoreWithWriteBarrier;
438 }
439 code |= AddressingModeField::encode(addressing_mode);
440 code |= RecordWriteModeField::encode(record_write_mode);
441 CHECK_EQ(is_atomic, false);
442 selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
443 } else {
444 ArchOpcode opcode;
446 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
447 mode = kInt34Imm;
448 } else {
449 mode = kInt16Imm;
450 }
451 switch (store_view.ts_stored_rep()) {
454 opcode = kPPC_StoreWord8;
455 break;
458 opcode = kPPC_StoreWord16;
459 break;
462 opcode = kPPC_StoreWord32;
463 const Operation& reverse_op = selector->Get(value);
464 if (reverse_op.Is<Opmask::kWord32ReverseBytes>()) {
465 opcode = kPPC_StoreByteRev32;
466 value = selector->input_at(value, 0);
467 mode = kNoImmediate;
468 }
469 break;
470 }
473 if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned;
474 opcode = kPPC_StoreWord64;
475 const Operation& reverse_op = selector->Get(value);
476 if (reverse_op.Is<Opmask::kWord64ReverseBytes>()) {
477 opcode = kPPC_StoreByteRev64;
478 value = selector->input_at(value, 0);
479 mode = kNoImmediate;
480 }
481 break;
482 }
486 opcode = kPPC_StoreFloat32;
487 break;
489 opcode = kPPC_StoreDouble;
490 break;
494 if (mode != kInt34Imm) mode = kInt16Imm;
495 opcode = kPPC_StoreCompressTagged;
496 break;
500 if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned;
501 opcode = kPPC_StoreWord64;
502 break;
504 // We never store directly to protected pointers from generated code.
505 UNREACHABLE();
507 if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned;
508 opcode = kPPC_StoreIndirectPointer;
509 break;
511 if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned;
512 opcode = kPPC_StoreEncodeSandboxedPointer;
513 break;
515 opcode = kPPC_StoreSimd128;
516 // Vectors do not support MRI mode, only MRR is available.
517 mode = kNoImmediate;
518 break;
520 UNREACHABLE();
521 }
522
523 if (selector->is_load_root_register(base)) {
524 selector->Emit(opcode | AddressingModeField::encode(kMode_Root),
525 g.NoOutput(), g.UseRegister(offset), g.UseRegister(value),
526 g.UseImmediate(is_atomic));
527 } else if (g.CanBeImmediate(offset, mode)) {
528 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
530 g.UseRegister(value), g.UseImmediate(is_atomic));
531 } else if (g.CanBeImmediate(base, mode)) {
532 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
534 g.UseRegister(value), g.UseImmediate(is_atomic));
535 } else {
536 selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
538 g.UseRegister(value), g.UseImmediate(is_atomic));
539 }
540 }
541}
542
543void InstructionSelectorT::VisitStorePair(OpIndex node) { UNREACHABLE(); }
544
545void InstructionSelectorT::VisitStore(OpIndex node) {
546 VisitStoreCommon(this, node, this->store_view(node).stored_rep(),
547 std::nullopt);
548}
549
550void InstructionSelectorT::VisitProtectedStore(OpIndex node) {
551 // TODO(eholk)
553}
554
555// Architecture supports unaligned access, therefore VisitLoad is used instead
556void InstructionSelectorT::VisitUnalignedLoad(OpIndex node) { UNREACHABLE(); }
557
558// Architecture supports unaligned access, therefore VisitStore is used instead
559void InstructionSelectorT::VisitUnalignedStore(OpIndex node) { UNREACHABLE(); }
560
561static void VisitLogical(InstructionSelectorT* selector, OpIndex node,
562 ArchOpcode opcode, bool left_can_cover,
563 bool right_can_cover, ImmediateMode imm_mode) {
564 PPCOperandGeneratorT g(selector);
565 const WordBinopOp& logical_op = selector->Get(node).Cast<WordBinopOp>();
566 const Operation& lhs = selector->Get(logical_op.left());
567 const Operation& rhs = selector->Get(logical_op.right());
568
569 // Map instruction to equivalent operation with inverted right input.
570 ArchOpcode inv_opcode = opcode;
571 switch (opcode) {
572 case kPPC_And:
573 inv_opcode = kPPC_AndComplement;
574 break;
575 case kPPC_Or:
576 inv_opcode = kPPC_OrComplement;
577 break;
578 default:
579 UNREACHABLE();
580 }
581
582 // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
583 if (lhs.Is<Opmask::kBitwiseXor>() && left_can_cover) {
584 const WordBinopOp& xor_op = lhs.Cast<WordBinopOp>();
585 int64_t xor_rhs_val;
586 if (selector->MatchSignedIntegralConstant(xor_op.right(), &xor_rhs_val) &&
587 xor_rhs_val == -1) {
588 // TODO(all): support shifted operand on right.
589 selector->Emit(inv_opcode, g.DefineAsRegister(node),
590 g.UseRegister(logical_op.right()),
591 g.UseRegister(xor_op.left()));
592 return;
593 }
594 }
595
596 // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
597 if (rhs.Is<Opmask::kBitwiseXor>() && right_can_cover) {
598 const WordBinopOp& xor_op = rhs.Cast<WordBinopOp>();
599 int64_t xor_rhs_val;
600 if (selector->MatchSignedIntegralConstant(xor_op.right(), &xor_rhs_val) &&
601 xor_rhs_val == -1) {
602 // TODO(all): support shifted operand on right.
603 selector->Emit(inv_opcode, g.DefineAsRegister(node),
604 g.UseRegister(logical_op.left()),
605 g.UseRegister(xor_op.left()));
606 return;
607 }
608 }
609
610 VisitBinop(selector, node, opcode, imm_mode);
611}
612
613static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
614 int mask_width = base::bits::CountPopulation(value);
615 int mask_msb = base::bits::CountLeadingZeros32(value);
616 int mask_lsb = base::bits::CountTrailingZeros32(value);
617 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
618 return false;
619 *mb = mask_lsb + mask_width - 1;
620 *me = mask_lsb;
621 return true;
622}
623
624static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
625 int mask_width = base::bits::CountPopulation(value);
626 int mask_msb = base::bits::CountLeadingZeros64(value);
627 int mask_lsb = base::bits::CountTrailingZeros64(value);
628 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
629 return false;
630 *mb = mask_lsb + mask_width - 1;
631 *me = mask_lsb;
632 return true;
633}
634
635// TODO(mbrandy): Absorb rotate-right into rlwinm?
636void InstructionSelectorT::VisitWord32And(OpIndex node) {
637 PPCOperandGeneratorT g(this);
638
639 const WordBinopOp& bitwise_and = Get(node).Cast<WordBinopOp>();
640 int mb = 0;
641 int me = 0;
642 int64_t value;
643 if (MatchSignedIntegralConstant(bitwise_and.right(), &value) &&
644 IsContiguousMask32(value, &mb, &me)) {
645 int sh = 0;
646 OpIndex left = bitwise_and.left();
647 const Operation& lhs = Get(left);
650 CanCover(node, left)) {
651 // Try to absorb left/right shift into rlwinm
652 int32_t shift_by;
653 const ShiftOp& shift_op = lhs.Cast<ShiftOp>();
654 if (MatchIntegralWord32Constant(shift_op.right(), &shift_by) &&
655 base::IsInRange(shift_by, 0, 31)) {
656 left = shift_op.left();
657 sh = shift_by;
659 // Adjust the mask such that it doesn't include any rotated bits.
660 if (mb > 31 - sh) mb = 31 - sh;
661 sh = (32 - sh) & 0x1F;
662 } else {
663 // Adjust the mask such that it doesn't include any rotated bits.
664 if (me < sh) me = sh;
665 }
666 }
667 }
668 if (mb >= me) {
669 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), g.UseRegister(left),
670 g.TempImmediate(sh), g.TempImmediate(mb), g.TempImmediate(me));
671 return;
672 }
673 }
674 VisitLogical(this, node, kPPC_And, CanCover(node, bitwise_and.left()),
675 CanCover(node, bitwise_and.right()), kInt16Imm_Unsigned);
676}
677
678// TODO(mbrandy): Absorb rotate-right into rldic?
679void InstructionSelectorT::VisitWord64And(OpIndex node) {
680 PPCOperandGeneratorT g(this);
681
682 const WordBinopOp& bitwise_and = Get(node).Cast<WordBinopOp>();
683 int mb = 0;
684 int me = 0;
685 int64_t value;
686 if (MatchSignedIntegralConstant(bitwise_and.right(), &value) &&
687 IsContiguousMask64(value, &mb, &me)) {
688 int sh = 0;
689 OpIndex left = bitwise_and.left();
690 const Operation& lhs = Get(left);
691 if ((lhs.Is<Opmask::kWord64ShiftRightLogical>() ||
692 lhs.Is<Opmask::kWord64ShiftLeft>()) &&
693 CanCover(node, left)) {
694 // Try to absorb left/right shift into rldic
695 int64_t shift_by;
696 const ShiftOp& shift_op = lhs.Cast<ShiftOp>();
697 if (MatchIntegralWord64Constant(shift_op.right(), &shift_by) &&
698 base::IsInRange(shift_by, 0, 63)) {
699 left = shift_op.left();
700 sh = shift_by;
701 if (lhs.Is<Opmask::kWord64ShiftRightLogical>()) {
702 // Adjust the mask such that it doesn't include any rotated bits.
703 if (mb > 63 - sh) mb = 63 - sh;
704 sh = (64 - sh) & 0x3F;
705 } else {
706 // Adjust the mask such that it doesn't include any rotated bits.
707 if (me < sh) me = sh;
708 }
709 }
710 }
711 if (mb >= me) {
712 bool match = false;
714 int mask;
715 if (me == 0) {
716 match = true;
717 opcode = kPPC_RotLeftAndClearLeft64;
718 mask = mb;
719 } else if (mb == 63) {
720 match = true;
721 opcode = kPPC_RotLeftAndClearRight64;
722 mask = me;
723 } else if (sh && me <= sh && lhs.Is<Opmask::kWord64ShiftLeft>()) {
724 match = true;
725 opcode = kPPC_RotLeftAndClear64;
726 mask = mb;
727 }
728 if (match) {
729 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
730 g.TempImmediate(sh), g.TempImmediate(mask));
731 return;
732 }
733 }
734 }
735 VisitLogical(this, node, kPPC_And, CanCover(node, bitwise_and.left()),
736 CanCover(node, bitwise_and.right()), kInt16Imm_Unsigned);
737}
738
739void InstructionSelectorT::VisitWord32Or(OpIndex node) {
740 const WordBinopOp& op = this->Get(node).template Cast<WordBinopOp>();
741 VisitLogical(this, node, kPPC_Or, CanCover(node, op.left()),
742 CanCover(node, op.right()), kInt16Imm_Unsigned);
743}
744
745void InstructionSelectorT::VisitWord64Or(OpIndex node) {
746 const WordBinopOp& op = this->Get(node).template Cast<WordBinopOp>();
747 VisitLogical(this, node, kPPC_Or, CanCover(node, op.left()),
748 CanCover(node, op.right()), kInt16Imm_Unsigned);
749}
750
751void InstructionSelectorT::VisitWord32Xor(OpIndex node) {
752 PPCOperandGeneratorT g(this);
753 const WordBinopOp& bitwise_xor = this->Get(node).template Cast<WordBinopOp>();
755 if (this->MatchIntegralWord32Constant(bitwise_xor.right(), &mask) &&
756 mask == -1) {
757 Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(bitwise_xor.left()));
758 } else {
759 VisitBinop(this, node, kPPC_Xor, kInt16Imm_Unsigned);
760 }
761}
762
764 OpIndex node, FlagsContinuation* cont) {
767 const auto& op = this->turboshaft_graph()
768 ->Get(node)
770 kind = op.kind;
771 value = op.stack_limit();
773 kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
774
775 PPCOperandGeneratorT g(this);
776
777 // No outputs.
778 InstructionOperand* const outputs = nullptr;
779 const int output_count = 0;
780
781 // Applying an offset to this stack check requires a temp register. Offsets
782 // are only applied to the first stack check. If applying an offset, we must
783 // ensure the input and temp registers do not alias, thus kUniqueRegister.
784 InstructionOperand temps[] = {g.TempRegister()};
785 const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0;
786 const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
789
790 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
791 static constexpr int input_count = arraysize(inputs);
792
793 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
794 temp_count, temps, cont);
795}
796
797void InstructionSelectorT::VisitWord64Xor(OpIndex node) {
798 PPCOperandGeneratorT g(this);
799 const WordBinopOp& bitwise_xor = this->Get(node).template Cast<WordBinopOp>();
800 int64_t mask;
801 if (this->MatchIntegralWord64Constant(bitwise_xor.right(), &mask) &&
802 mask == -1) {
803 Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(bitwise_xor.left()));
804 } else {
805 VisitBinop(this, node, kPPC_Xor, kInt16Imm_Unsigned);
806 }
807}
808
809void InstructionSelectorT::VisitWord32Shl(OpIndex node) {
810 PPCOperandGeneratorT g(this);
811 const ShiftOp& shl = this->Get(node).template Cast<ShiftOp>();
812 const Operation& lhs = this->Get(shl.left());
813 int64_t value;
814 if (lhs.Is<Opmask::kWord32BitwiseAnd>() &&
815 this->MatchSignedIntegralConstant(shl.right(), &value) &&
816 base::IsInRange(value, 0, 31)) {
817 int sh = value;
818 int mb;
819 int me;
820 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
821 int64_t right_value;
822 if (MatchSignedIntegralConstant(bitwise_and.right(), &right_value) &&
823 IsContiguousMask32(right_value << sh, &mb, &me)) {
824 // Adjust the mask such that it doesn't include any rotated bits.
825 if (me < sh) me = sh;
826 if (mb >= me) {
827 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
828 g.UseRegister(bitwise_and.left()), g.TempImmediate(sh),
829 g.TempImmediate(mb), g.TempImmediate(me));
830 return;
831 }
832 }
833 }
834 VisitRRO(this, kPPC_ShiftLeft32, node, kShift32Imm);
835}
836
837void InstructionSelectorT::VisitWord64Shl(OpIndex node) {
838 PPCOperandGeneratorT g(this);
839 const ShiftOp& shl = this->Get(node).template Cast<ShiftOp>();
840 const Operation& lhs = this->Get(shl.left());
841 int64_t value;
842 if (lhs.Is<Opmask::kWord64BitwiseAnd>() &&
843 this->MatchSignedIntegralConstant(shl.right(), &value) &&
844 base::IsInRange(value, 0, 63)) {
845 int sh = value;
846 int mb;
847 int me;
848 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
849 int64_t right_value;
850 if (MatchSignedIntegralConstant(bitwise_and.right(), &right_value) &&
851 IsContiguousMask64(right_value << sh, &mb, &me)) {
852 // Adjust the mask such that it doesn't include any rotated bits.
853 if (me < sh) me = sh;
854 if (mb >= me) {
855 bool match = false;
857 int mask;
858 if (me == 0) {
859 match = true;
860 opcode = kPPC_RotLeftAndClearLeft64;
861 mask = mb;
862 } else if (mb == 63) {
863 match = true;
864 opcode = kPPC_RotLeftAndClearRight64;
865 mask = me;
866 } else if (sh && me <= sh) {
867 match = true;
868 opcode = kPPC_RotLeftAndClear64;
869 mask = mb;
870 }
871 if (match) {
872 Emit(opcode, g.DefineAsRegister(node),
873 g.UseRegister(bitwise_and.left()), g.TempImmediate(sh),
874 g.TempImmediate(mask));
875 return;
876 }
877 }
878 }
879 }
880 VisitRRO(this, kPPC_ShiftLeft64, node, kShift64Imm);
881}
882
883void InstructionSelectorT::VisitWord32Shr(OpIndex node) {
884 PPCOperandGeneratorT g(this);
885 const ShiftOp& shr = this->Get(node).template Cast<ShiftOp>();
886 const Operation& lhs = this->Get(shr.left());
887 int64_t value;
888 if (lhs.Is<Opmask::kWord32BitwiseAnd>() &&
889 MatchSignedIntegralConstant(shr.right(), &value) &&
890 base::IsInRange(value, 0, 31)) {
891 int sh = value;
892 int mb;
893 int me;
894 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
895 uint64_t right_value;
896 if (MatchUnsignedIntegralConstant(bitwise_and.right(), &right_value) &&
897 IsContiguousMask32(static_cast<uint32_t>(right_value >> sh), &mb,
898 &me)) {
899 // Adjust the mask such that it doesn't include any rotated bits.
900 if (mb > 31 - sh) mb = 31 - sh;
901 sh = (32 - sh) & 0x1F;
902 if (mb >= me) {
903 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
904 g.UseRegister(bitwise_and.left()), g.TempImmediate(sh),
905 g.TempImmediate(mb), g.TempImmediate(me));
906 return;
907 }
908 }
909 }
910 VisitRRO(this, kPPC_ShiftRight32, node, kShift32Imm);
911}
912
913void InstructionSelectorT::VisitWord64Shr(OpIndex node) {
914 PPCOperandGeneratorT g(this);
915 const ShiftOp& shr = this->Get(node).template Cast<ShiftOp>();
916 const Operation& lhs = this->Get(shr.left());
917 int64_t value;
918 if (lhs.Is<Opmask::kWord64BitwiseAnd>() &&
919 MatchSignedIntegralConstant(shr.right(), &value) &&
920 base::IsInRange(value, 0, 63)) {
921 int sh = value;
922 int mb;
923 int me;
924 const WordBinopOp& bitwise_and = lhs.Cast<WordBinopOp>();
925 uint64_t right_value;
926 if (MatchUnsignedIntegralConstant(bitwise_and.right(), &right_value) &&
927 IsContiguousMask64(static_cast<uint64_t>(right_value >> sh), &mb,
928 &me)) {
929 // Adjust the mask such that it doesn't include any rotated bits.
930 if (mb > 63 - sh) mb = 63 - sh;
931 sh = (64 - sh) & 0x3F;
932 if (mb >= me) {
933 bool match = false;
935 int mask;
936 if (me == 0) {
937 match = true;
938 opcode = kPPC_RotLeftAndClearLeft64;
939 mask = mb;
940 } else if (mb == 63) {
941 match = true;
942 opcode = kPPC_RotLeftAndClearRight64;
943 mask = me;
944 }
945 if (match) {
946 Emit(opcode, g.DefineAsRegister(node),
947 g.UseRegister(bitwise_and.left()), g.TempImmediate(sh),
948 g.TempImmediate(mask));
949 return;
950 }
951 }
952 }
953 }
954 VisitRRO(this, kPPC_ShiftRight64, node, kShift64Imm);
955}
956
957void InstructionSelectorT::VisitWord32Sar(OpIndex node) {
958 PPCOperandGeneratorT g(this);
959 const ShiftOp& sar = this->Get(node).template Cast<ShiftOp>();
960 const Operation& lhs = this->Get(sar.left());
961 if (CanCover(node, sar.left()) && lhs.Is<Opmask::kWord32ShiftLeft>()) {
962 const ShiftOp& shl = lhs.Cast<ShiftOp>();
963 uint64_t sar_value;
964 uint64_t shl_value;
965 if (MatchUnsignedIntegralConstant(sar.right(), &sar_value) &&
966 MatchUnsignedIntegralConstant(shl.right(), &shl_value)) {
967 uint32_t sar_by = sar_value;
968 uint32_t shl_by = shl_value;
969 if ((sar_by == shl_by) && (sar_by == 16)) {
970 Emit(kPPC_ExtendSignWord16, g.DefineAsRegister(node),
971 g.UseRegister(shl.left()));
972 return;
973 } else if ((sar_by == shl_by) && (sar_by == 24)) {
974 Emit(kPPC_ExtendSignWord8, g.DefineAsRegister(node),
975 g.UseRegister(shl.left()));
976 return;
977 }
978 }
979 }
980 VisitRRO(this, kPPC_ShiftRightAlg32, node, kShift32Imm);
981}
982
983void InstructionSelectorT::VisitWord64Sar(OpIndex node) {
984 PPCOperandGeneratorT g(this);
985 DCHECK(this->Get(node).template Cast<ShiftOp>().IsRightShift());
986 const ShiftOp& shift = this->Get(node).template Cast<ShiftOp>();
987 const Operation& lhs = this->Get(shift.left());
988 int64_t constant_rhs;
989
990 if (lhs.Is<LoadOp>() &&
991 this->MatchIntegralWord64Constant(shift.right(), &constant_rhs) &&
992 constant_rhs == 32 && this->CanCover(node, shift.left())) {
993 // Just load and sign-extend the interesting 4 bytes instead. This
994 // happens, for example, when we're loading and untagging SMIs.
995 const LoadOp& load = lhs.Cast<LoadOp>();
996 int64_t offset = 0;
997 if (load.index().has_value()) {
998 int64_t index_constant;
999 if (this->MatchIntegralWord64Constant(load.index().value(),
1000 &index_constant)) {
1001 DCHECK_EQ(load.element_size_log2, 0);
1002 offset = index_constant;
1003 }
1004 } else {
1005 offset = load.offset;
1006 }
1008 if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) {
1009 Emit(kPPC_LoadWordS32 | AddressingModeField::encode(kMode_MRI),
1010 g.DefineAsRegister(node), g.UseRegister(load.base()),
1011 g.TempImmediate(offset), g.UseImmediate(0));
1012 return;
1013 }
1014 }
1015
1016 VisitRRO(this, kPPC_ShiftRightAlg64, node, kShift64Imm);
1017}
1018
1019void InstructionSelectorT::VisitWord32Rol(OpIndex node) { UNREACHABLE(); }
1020
1021void InstructionSelectorT::VisitWord64Rol(OpIndex node) { UNREACHABLE(); }
1022
1023// TODO(mbrandy): Absorb logical-and into rlwinm?
1024void InstructionSelectorT::VisitWord32Ror(OpIndex node) {
1025 VisitRRO(this, kPPC_RotRight32, node, kShift32Imm);
1026}
1027
1028// TODO(mbrandy): Absorb logical-and into rldic?
1029void InstructionSelectorT::VisitWord64Ror(OpIndex node) {
1030 VisitRRO(this, kPPC_RotRight64, node, kShift64Imm);
1031}
1032
1033void InstructionSelectorT::VisitWord32Clz(OpIndex node) {
1034 PPCOperandGeneratorT g(this);
1035 Emit(kPPC_Cntlz32, g.DefineAsRegister(node),
1036 g.UseRegister(this->input_at(node, 0)));
1037}
1038
1039void InstructionSelectorT::VisitWord64Clz(OpIndex node) {
1040 PPCOperandGeneratorT g(this);
1041 Emit(kPPC_Cntlz64, g.DefineAsRegister(node),
1042 g.UseRegister(this->input_at(node, 0)));
1043}
1044
1045void InstructionSelectorT::VisitWord32Popcnt(OpIndex node) {
1046 PPCOperandGeneratorT g(this);
1047 Emit(kPPC_Popcnt32, g.DefineAsRegister(node),
1048 g.UseRegister(this->input_at(node, 0)));
1049}
1050
1051void InstructionSelectorT::VisitWord64Popcnt(OpIndex node) {
1052 PPCOperandGeneratorT g(this);
1053 Emit(kPPC_Popcnt64, g.DefineAsRegister(node),
1054 g.UseRegister(this->input_at(node, 0)));
1055}
1056
1057void InstructionSelectorT::VisitWord32Ctz(OpIndex node) { UNREACHABLE(); }
1058
1059void InstructionSelectorT::VisitWord64Ctz(OpIndex node) { UNREACHABLE(); }
1060
1061void InstructionSelectorT::VisitWord32ReverseBits(OpIndex node) {
1062 UNREACHABLE();
1063}
1064
1065void InstructionSelectorT::VisitWord64ReverseBits(OpIndex node) {
1066 UNREACHABLE();
1067}
1068
1069void InstructionSelectorT::VisitWord64ReverseBytes(OpIndex node) {
1070 PPCOperandGeneratorT g(this);
1071 InstructionOperand temp[] = {g.TempRegister()};
1072 OpIndex input = this->Get(node).input(0);
1073 const Operation& input_op = this->Get(input);
1074 if (CanCover(node, input) && input_op.Is<LoadOp>()) {
1075 auto load = this->load_view(input);
1076 LoadRepresentation load_rep = load.loaded_rep();
1077 if (load_rep.representation() == MachineRepresentation::kWord64) {
1078 OpIndex base = load.base();
1079 OpIndex offset = load.index();
1080 bool is_atomic = load.is_atomic();
1081 Emit(kPPC_LoadByteRev64 | AddressingModeField::encode(kMode_MRR),
1082 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
1083 g.UseImmediate(is_atomic));
1084 return;
1085 }
1086 }
1087 Emit(kPPC_ByteRev64, g.DefineAsRegister(node),
1088 g.UseUniqueRegister(this->input_at(node, 0)), 1, temp);
1089}
1090
1091void InstructionSelectorT::VisitWord32ReverseBytes(OpIndex node) {
1092 PPCOperandGeneratorT g(this);
1093 OpIndex input = this->Get(node).input(0);
1094 const Operation& input_op = this->Get(input);
1095 if (CanCover(node, input) && input_op.Is<LoadOp>()) {
1096 auto load = this->load_view(input);
1097 LoadRepresentation load_rep = load.loaded_rep();
1098 if (load_rep.representation() == MachineRepresentation::kWord32) {
1099 OpIndex base = load.base();
1100 OpIndex offset = load.index();
1101 bool is_atomic = load.is_atomic();
1102 Emit(kPPC_LoadByteRev32 | AddressingModeField::encode(kMode_MRR),
1103 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
1104 g.UseImmediate(is_atomic));
1105 return;
1106 }
1107 }
1108 Emit(kPPC_ByteRev32, g.DefineAsRegister(node),
1109 g.UseUniqueRegister(this->input_at(node, 0)));
1110}
1111
1112void InstructionSelectorT::VisitSimd128ReverseBytes(OpIndex node) {
1113 PPCOperandGeneratorT g(this);
1114 Emit(kPPC_LoadReverseSimd128RR, g.DefineAsRegister(node),
1115 g.UseRegister(this->input_at(node, 0)));
1116}
1117
1118void InstructionSelectorT::VisitInt32Add(OpIndex node) {
1119 VisitBinop(this, node, kPPC_Add32, kInt16Imm);
1120}
1121
1122void InstructionSelectorT::VisitInt64Add(OpIndex node) {
1123 VisitBinop(this, node, kPPC_Add64, kInt16Imm);
1124}
1125
1126void InstructionSelectorT::VisitInt32Sub(OpIndex node) {
1127 PPCOperandGeneratorT g(this);
1128 const WordBinopOp& sub = this->Get(node).template Cast<WordBinopOp>();
1129 if (this->MatchIntegralZero(sub.left())) {
1130 Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(sub.right()));
1131 } else {
1132 VisitBinop(this, node, kPPC_Sub, kInt16Imm_Negate);
1133 }
1134}
1135
1136void InstructionSelectorT::VisitInt64Sub(OpIndex node) {
1137 PPCOperandGeneratorT g(this);
1138 const WordBinopOp& sub = this->Get(node).template Cast<WordBinopOp>();
1139 if (this->MatchIntegralZero(sub.left())) {
1140 Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(sub.right()));
1141 } else {
1142 VisitBinop(this, node, kPPC_Sub, kInt16Imm_Negate);
1143 }
1144}
1145
1146namespace {
1147
1148void VisitCompare(InstructionSelectorT* selector, InstructionCode opcode,
1149 InstructionOperand left, InstructionOperand right,
1150 FlagsContinuationT* cont);
1151void EmitInt32MulWithOverflow(InstructionSelectorT* selector, OpIndex node,
1152 FlagsContinuationT* cont) {
1153 PPCOperandGeneratorT g(selector);
1154 OpIndex lhs = selector->input_at(node, 0);
1155 OpIndex rhs = selector->input_at(node, 1);
1156 InstructionOperand result_operand = g.DefineAsRegister(node);
1157 InstructionOperand high32_operand = g.TempRegister();
1158 InstructionOperand temp_operand = g.TempRegister();
1159 {
1160 InstructionOperand outputs[] = {result_operand, high32_operand};
1161 InstructionOperand inputs[] = {g.UseRegister(lhs), g.UseRegister(rhs)};
1162 selector->Emit(kPPC_Mul32WithHigh32, 2, outputs, 2, inputs);
1163 }
1164 {
1165 InstructionOperand shift_31 = g.UseImmediate(31);
1166 InstructionOperand outputs[] = {temp_operand};
1167 InstructionOperand inputs[] = {result_operand, shift_31};
1168 selector->Emit(kPPC_ShiftRightAlg32, 1, outputs, 2, inputs);
1169 }
1170
1171 VisitCompare(selector, kPPC_Cmp32, high32_operand, temp_operand, cont);
1172}
1173
1174void EmitInt64MulWithOverflow(InstructionSelectorT* selector, OpIndex node,
1175 FlagsContinuationT* cont) {
1176 PPCOperandGeneratorT g(selector);
1177 OpIndex lhs = selector->input_at(node, 0);
1178 OpIndex rhs = selector->input_at(node, 1);
1179 InstructionOperand result = g.DefineAsRegister(node);
1180 InstructionOperand left = g.UseRegister(lhs);
1181 InstructionOperand high = g.TempRegister();
1182 InstructionOperand result_sign = g.TempRegister();
1183 InstructionOperand right = g.UseRegister(rhs);
1184 selector->Emit(kPPC_Mul64, result, left, right);
1185 selector->Emit(kPPC_MulHighS64, high, left, right);
1186 selector->Emit(kPPC_ShiftRightAlg64, result_sign, result,
1187 g.TempImmediate(63));
1188 // Test whether {high} is a sign-extension of {result}.
1189 selector->EmitWithContinuation(kPPC_Cmp64, high, result_sign, cont);
1190}
1191
1192} // namespace
1193
1194void InstructionSelectorT::VisitInt32Mul(OpIndex node) {
1195 VisitRRR(this, kPPC_Mul32, node);
1196}
1197
1198void InstructionSelectorT::VisitInt64Mul(OpIndex node) {
1199 VisitRRR(this, kPPC_Mul64, node);
1200}
1201
1202void InstructionSelectorT::VisitInt32MulHigh(OpIndex node) {
1203 PPCOperandGeneratorT g(this);
1204 Emit(kPPC_MulHigh32, g.DefineAsRegister(node),
1205 g.UseRegister(this->input_at(node, 0)),
1206 g.UseRegister(this->input_at(node, 1)));
1207}
1208
1209void InstructionSelectorT::VisitUint32MulHigh(OpIndex node) {
1210 PPCOperandGeneratorT g(this);
1211 Emit(kPPC_MulHighU32, g.DefineAsRegister(node),
1212 g.UseRegister(this->input_at(node, 0)),
1213 g.UseRegister(this->input_at(node, 1)));
1214}
1215
1216void InstructionSelectorT::VisitInt64MulHigh(OpIndex node) {
1217 PPCOperandGeneratorT g(this);
1218 Emit(kPPC_MulHighS64, g.DefineAsRegister(node),
1219 g.UseRegister(this->input_at(node, 0)),
1220 g.UseRegister(this->input_at(node, 1)));
1221}
1222
1223void InstructionSelectorT::VisitUint64MulHigh(OpIndex node) {
1224 PPCOperandGeneratorT g(this);
1225 Emit(kPPC_MulHighU64, g.DefineAsRegister(node),
1226 g.UseRegister(this->input_at(node, 0)),
1227 g.UseRegister(this->input_at(node, 1)));
1228}
1229
1230void InstructionSelectorT::VisitInt32Div(OpIndex node) {
1231 VisitRRR(this, kPPC_Div32, node);
1232}
1233
1234void InstructionSelectorT::VisitInt64Div(OpIndex node) {
1235 VisitRRR(this, kPPC_Div64, node);
1236}
1237
1238void InstructionSelectorT::VisitUint32Div(OpIndex node) {
1239 VisitRRR(this, kPPC_DivU32, node);
1240}
1241
1242void InstructionSelectorT::VisitUint64Div(OpIndex node) {
1243 VisitRRR(this, kPPC_DivU64, node);
1244}
1245
1246void InstructionSelectorT::VisitInt32Mod(OpIndex node) {
1247 VisitRRR(this, kPPC_Mod32, node);
1248}
1249
1250void InstructionSelectorT::VisitInt64Mod(OpIndex node) {
1251 VisitRRR(this, kPPC_Mod64, node);
1252}
1253
1254void InstructionSelectorT::VisitUint32Mod(OpIndex node) {
1255 VisitRRR(this, kPPC_ModU32, node);
1256}
1257
1258void InstructionSelectorT::VisitUint64Mod(OpIndex node) {
1259 VisitRRR(this, kPPC_ModU64, node);
1260}
1261
1262void InstructionSelectorT::VisitChangeFloat32ToFloat64(OpIndex node) {
1263 VisitRR(this, kPPC_Float32ToDouble, node);
1264}
1265
1266void InstructionSelectorT::VisitRoundInt32ToFloat32(OpIndex node) {
1267 VisitRR(this, kPPC_Int32ToFloat32, node);
1268}
1269
1270void InstructionSelectorT::VisitRoundUint32ToFloat32(OpIndex node) {
1271 VisitRR(this, kPPC_Uint32ToFloat32, node);
1272}
1273
1274void InstructionSelectorT::VisitChangeInt32ToFloat64(OpIndex node) {
1275 VisitRR(this, kPPC_Int32ToDouble, node);
1276}
1277
1278void InstructionSelectorT::VisitChangeUint32ToFloat64(OpIndex node) {
1279 VisitRR(this, kPPC_Uint32ToDouble, node);
1280}
1281
1282void InstructionSelectorT::VisitChangeFloat64ToInt32(OpIndex node) {
1283 VisitRR(this, kPPC_DoubleToInt32, node);
1284}
1285
1286void InstructionSelectorT::VisitChangeFloat64ToUint32(OpIndex node) {
1287 VisitRR(this, kPPC_DoubleToUint32, node);
1288}
1289
1290void InstructionSelectorT::VisitTruncateFloat64ToUint32(OpIndex node) {
1291 VisitRR(this, kPPC_DoubleToUint32, node);
1292}
1293
1294void InstructionSelectorT::VisitSignExtendWord8ToInt32(OpIndex node) {
1295 // TODO(mbrandy): inspect input to see if nop is appropriate.
1296 VisitRR(this, kPPC_ExtendSignWord8, node);
1297}
1298
1299void InstructionSelectorT::VisitSignExtendWord16ToInt32(OpIndex node) {
1300 // TODO(mbrandy): inspect input to see if nop is appropriate.
1301 VisitRR(this, kPPC_ExtendSignWord16, node);
1302}
1303
1304void InstructionSelectorT::VisitTryTruncateFloat32ToInt64(OpIndex node) {
1305 VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
1306}
1307
1308void InstructionSelectorT::VisitTryTruncateFloat64ToInt64(OpIndex node) {
1309 VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
1310}
1311
1312void InstructionSelectorT::VisitTruncateFloat64ToInt64(OpIndex node) {
1313 VisitRR(this, kPPC_DoubleToInt64, node);
1314}
1315
1316void InstructionSelectorT::VisitTryTruncateFloat32ToUint64(OpIndex node) {
1317 VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
1318}
1319
1320void InstructionSelectorT::VisitTryTruncateFloat64ToUint64(OpIndex node) {
1321 VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
1322}
1323
1324void InstructionSelectorT::VisitTryTruncateFloat64ToInt32(OpIndex node) {
1325 VisitTryTruncateDouble(this, kPPC_DoubleToInt32, node);
1326}
1327
1328void InstructionSelectorT::VisitTryTruncateFloat64ToUint32(OpIndex node) {
1329 VisitTryTruncateDouble(this, kPPC_DoubleToUint32, node);
1330}
1331
1332void InstructionSelectorT::VisitBitcastWord32ToWord64(OpIndex node) {
1335 EmitIdentity(node);
1336}
1337
1338void InstructionSelectorT::VisitChangeInt32ToInt64(OpIndex node) {
1339 // TODO(mbrandy): inspect input to see if nop is appropriate.
1340 VisitRR(this, kPPC_ExtendSignWord32, node);
1341}
1342
1343void InstructionSelectorT::VisitSignExtendWord8ToInt64(OpIndex node) {
1344 // TODO(mbrandy): inspect input to see if nop is appropriate.
1345 VisitRR(this, kPPC_ExtendSignWord8, node);
1346}
1347
1348void InstructionSelectorT::VisitSignExtendWord16ToInt64(OpIndex node) {
1349 // TODO(mbrandy): inspect input to see if nop is appropriate.
1350 VisitRR(this, kPPC_ExtendSignWord16, node);
1351}
1352
1353void InstructionSelectorT::VisitSignExtendWord32ToInt64(OpIndex node) {
1354 // TODO(mbrandy): inspect input to see if nop is appropriate.
1355 VisitRR(this, kPPC_ExtendSignWord32, node);
1356}
1357
1358bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis(OpIndex node) {
1359 UNIMPLEMENTED();
1360}
1361
1362void InstructionSelectorT::VisitChangeUint32ToUint64(OpIndex node) {
1363 // TODO(mbrandy): inspect input to see if nop is appropriate.
1364 VisitRR(this, kPPC_Uint32ToUint64, node);
1365}
1366
1367void InstructionSelectorT::VisitTruncateFloat64ToFloat16RawBits(OpIndex node) {
1368 UNIMPLEMENTED();
1369}
1370
1371void InstructionSelectorT::VisitChangeFloat16RawBitsToFloat64(OpIndex node) {
1372 UNIMPLEMENTED();
1373}
1374
1375void InstructionSelectorT::VisitChangeFloat64ToUint64(OpIndex node) {
1376 VisitRR(this, kPPC_DoubleToUint64, node);
1377}
1378
1379void InstructionSelectorT::VisitChangeFloat64ToInt64(OpIndex node) {
1380 VisitRR(this, kPPC_DoubleToInt64, node);
1381}
1382
1383void InstructionSelectorT::VisitTruncateFloat64ToFloat32(OpIndex node) {
1384 VisitRR(this, kPPC_DoubleToFloat32, node);
1385}
1386
1387void InstructionSelectorT::VisitTruncateFloat64ToWord32(OpIndex node) {
1388 VisitRR(this, kArchTruncateDoubleToI, node);
1389}
1390
1391void InstructionSelectorT::VisitRoundFloat64ToInt32(OpIndex node) {
1392 VisitRR(this, kPPC_DoubleToInt32, node);
1393}
1394
1395void InstructionSelectorT::VisitTruncateFloat32ToInt32(OpIndex node) {
1396 PPCOperandGeneratorT g(this);
1397 const Operation& op = this->Get(node);
1398 InstructionCode opcode = kPPC_Float32ToInt32;
1400 opcode |= MiscField::encode(true);
1401 }
1402 Emit(opcode, g.DefineAsRegister(node),
1403 g.UseRegister(this->input_at(node, 0)));
1404}
1405
1406void InstructionSelectorT::VisitTruncateFloat32ToUint32(OpIndex node) {
1407 PPCOperandGeneratorT g(this);
1408 const Operation& op = this->Get(node);
1409 InstructionCode opcode = kPPC_Float32ToUint32;
1411 opcode |= MiscField::encode(true);
1412 }
1413
1414 Emit(opcode, g.DefineAsRegister(node),
1415 g.UseRegister(this->input_at(node, 0)));
1416}
1417
1418void InstructionSelectorT::VisitTruncateInt64ToInt32(OpIndex node) {
1419 // TODO(mbrandy): inspect input to see if nop is appropriate.
1420 VisitRR(this, kPPC_Int64ToInt32, node);
1421}
1422
1423void InstructionSelectorT::VisitRoundInt64ToFloat32(OpIndex node) {
1424 VisitRR(this, kPPC_Int64ToFloat32, node);
1425}
1426
1427void InstructionSelectorT::VisitRoundInt64ToFloat64(OpIndex node) {
1428 VisitRR(this, kPPC_Int64ToDouble, node);
1429}
1430
1431void InstructionSelectorT::VisitChangeInt64ToFloat64(OpIndex node) {
1432 VisitRR(this, kPPC_Int64ToDouble, node);
1433}
1434
1435void InstructionSelectorT::VisitRoundUint64ToFloat32(OpIndex node) {
1436 VisitRR(this, kPPC_Uint64ToFloat32, node);
1437}
1438
1439void InstructionSelectorT::VisitRoundUint64ToFloat64(OpIndex node) {
1440 VisitRR(this, kPPC_Uint64ToDouble, node);
1441}
1442
1443void InstructionSelectorT::VisitBitcastFloat32ToInt32(OpIndex node) {
1444 VisitRR(this, kPPC_BitcastFloat32ToInt32, node);
1445}
1446
1447void InstructionSelectorT::VisitBitcastFloat64ToInt64(OpIndex node) {
1448 VisitRR(this, kPPC_BitcastDoubleToInt64, node);
1449}
1450
1451void InstructionSelectorT::VisitBitcastInt32ToFloat32(OpIndex node) {
1452 VisitRR(this, kPPC_BitcastInt32ToFloat32, node);
1453}
1454
1455void InstructionSelectorT::VisitBitcastInt64ToFloat64(OpIndex node) {
1456 VisitRR(this, kPPC_BitcastInt64ToDouble, node);
1457}
1458
1459void InstructionSelectorT::VisitFloat32Add(OpIndex node) {
1460 VisitRRR(this, kPPC_AddDouble | MiscField::encode(1), node);
1461}
1462
1463void InstructionSelectorT::VisitFloat64Add(OpIndex node) {
1464 // TODO(mbrandy): detect multiply-add
1465 VisitRRR(this, kPPC_AddDouble, node);
1466}
1467
1468void InstructionSelectorT::VisitFloat32Sub(OpIndex node) {
1469 VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
1470}
1471
1472void InstructionSelectorT::VisitFloat64Sub(OpIndex node) {
1473 // TODO(mbrandy): detect multiply-subtract
1474 VisitRRR(this, kPPC_SubDouble, node);
1475}
1476
1477void InstructionSelectorT::VisitFloat32Mul(OpIndex node) {
1478 VisitRRR(this, kPPC_MulDouble | MiscField::encode(1), node);
1479}
1480
1481void InstructionSelectorT::VisitFloat64Mul(OpIndex node) {
1482 // TODO(mbrandy): detect negate
1483 VisitRRR(this, kPPC_MulDouble, node);
1484}
1485
1486void InstructionSelectorT::VisitFloat32Div(OpIndex node) {
1487 VisitRRR(this, kPPC_DivDouble | MiscField::encode(1), node);
1488}
1489
1490void InstructionSelectorT::VisitFloat64Div(OpIndex node) {
1491 VisitRRR(this, kPPC_DivDouble, node);
1492}
1493
1494void InstructionSelectorT::VisitFloat64Mod(OpIndex node) {
1495 PPCOperandGeneratorT g(this);
1496 Emit(kPPC_ModDouble, g.DefineAsFixed(node, d1),
1497 g.UseFixed(this->input_at(node, 0), d1),
1498 g.UseFixed(this->input_at(node, 1), d2))
1499 ->MarkAsCall();
1500}
1501
1502void InstructionSelectorT::VisitFloat32Max(OpIndex node) {
1503 VisitRRR(this, kPPC_MaxDouble | MiscField::encode(1), node);
1504}
1505
1506void InstructionSelectorT::VisitFloat64Max(OpIndex node) {
1507 VisitRRR(this, kPPC_MaxDouble, node);
1508}
1509
1510void InstructionSelectorT::VisitFloat64SilenceNaN(OpIndex node) {
1511 VisitRR(this, kPPC_Float64SilenceNaN, node);
1512}
1513
1514void InstructionSelectorT::VisitFloat32Min(OpIndex node) {
1515 VisitRRR(this, kPPC_MinDouble | MiscField::encode(1), node);
1516}
1517
1518void InstructionSelectorT::VisitFloat64Min(OpIndex node) {
1519 VisitRRR(this, kPPC_MinDouble, node);
1520}
1521
1522void InstructionSelectorT::VisitFloat32Abs(OpIndex node) {
1523 VisitRR(this, kPPC_AbsDouble | MiscField::encode(1), node);
1524}
1525
1526void InstructionSelectorT::VisitFloat64Abs(OpIndex node) {
1527 VisitRR(this, kPPC_AbsDouble, node);
1528}
1529
1530void InstructionSelectorT::VisitFloat32Sqrt(OpIndex node) {
1531 VisitRR(this, kPPC_SqrtDouble | MiscField::encode(1), node);
1532}
1533
1535 InstructionCode opcode) {
1536 PPCOperandGeneratorT g(this);
1537 Emit(opcode, g.DefineAsFixed(node, d1),
1538 g.UseFixed(this->input_at(node, 0), d1))
1539 ->MarkAsCall();
1540}
1541
1543 InstructionCode opcode) {
1544 PPCOperandGeneratorT g(this);
1545 Emit(opcode, g.DefineAsFixed(node, d1),
1546 g.UseFixed(this->input_at(node, 0), d1),
1547 g.UseFixed(this->input_at(node, 1), d2))
1548 ->MarkAsCall();
1549}
1550
1551void InstructionSelectorT::VisitFloat64Sqrt(OpIndex node) {
1552 VisitRR(this, kPPC_SqrtDouble, node);
1553}
1554
1555void InstructionSelectorT::VisitFloat32RoundDown(OpIndex node) {
1556 VisitRR(this, kPPC_FloorDouble | MiscField::encode(1), node);
1557}
1558
1559void InstructionSelectorT::VisitFloat64RoundDown(OpIndex node) {
1560 VisitRR(this, kPPC_FloorDouble, node);
1561}
1562
1563void InstructionSelectorT::VisitFloat32RoundUp(OpIndex node) {
1564 VisitRR(this, kPPC_CeilDouble | MiscField::encode(1), node);
1565}
1566
1567void InstructionSelectorT::VisitFloat64RoundUp(OpIndex node) {
1568 VisitRR(this, kPPC_CeilDouble, node);
1569}
1570
1571void InstructionSelectorT::VisitFloat32RoundTruncate(OpIndex node) {
1572 VisitRR(this, kPPC_TruncateDouble | MiscField::encode(1), node);
1573}
1574
1575void InstructionSelectorT::VisitFloat64RoundTruncate(OpIndex node) {
1576 VisitRR(this, kPPC_TruncateDouble, node);
1577}
1578
1579void InstructionSelectorT::VisitFloat64RoundTiesAway(OpIndex node) {
1580 VisitRR(this, kPPC_RoundDouble, node);
1581}
1582
1583void InstructionSelectorT::VisitFloat32Neg(OpIndex node) {
1584 VisitRR(this, kPPC_NegDouble, node);
1585}
1586
1587void InstructionSelectorT::VisitFloat64Neg(OpIndex node) {
1588 VisitRR(this, kPPC_NegDouble, node);
1589}
1590
1591void InstructionSelectorT::VisitInt32AddWithOverflow(OpIndex node) {
1592 OptionalOpIndex ovf = FindProjection(node, 1);
1593 if (ovf.valid()) {
1595 return VisitBinop(this, node, kPPC_AddWithOverflow32, kInt16Imm, &cont);
1596 }
1597 FlagsContinuation cont;
1598 VisitBinop(this, node, kPPC_AddWithOverflow32, kInt16Imm, &cont);
1599}
1600
1601void InstructionSelectorT::VisitInt32SubWithOverflow(OpIndex node) {
1602 OptionalOpIndex ovf = FindProjection(node, 1);
1603 if (ovf.valid()) {
1605 return VisitBinop(this, node, kPPC_SubWithOverflow32, kInt16Imm_Negate,
1606 &cont);
1607 }
1608 FlagsContinuation cont;
1609 VisitBinop(this, node, kPPC_SubWithOverflow32, kInt16Imm_Negate, &cont);
1610}
1611
1612void InstructionSelectorT::VisitInt64AddWithOverflow(OpIndex node) {
1613 OptionalOpIndex ovf = FindProjection(node, 1);
1614 if (ovf.valid()) {
1616 return VisitBinop(this, node, kPPC_Add64, kInt16Imm, &cont);
1617 }
1618 FlagsContinuation cont;
1619 VisitBinop(this, node, kPPC_Add64, kInt16Imm, &cont);
1620}
1621
1622void InstructionSelectorT::VisitInt64SubWithOverflow(OpIndex node) {
1623 OptionalOpIndex ovf = FindProjection(node, 1);
1624 if (ovf.valid()) {
1626 return VisitBinop(this, node, kPPC_Sub, kInt16Imm_Negate, &cont);
1627 }
1628 FlagsContinuation cont;
1629 VisitBinop(this, node, kPPC_Sub, kInt16Imm_Negate, &cont);
1630}
1631
1632void InstructionSelectorT::VisitInt64MulWithOverflow(OpIndex node) {
1633 OptionalOpIndex ovf = FindProjection(node, 1);
1634 if (ovf.valid()) {
1636 return EmitInt64MulWithOverflow(this, node, &cont);
1637 }
1638 FlagsContinuation cont;
1639 EmitInt64MulWithOverflow(this, node, &cont);
1640}
1641
1643 switch (cont->condition()) {
1644 case kUnsignedLessThan:
1648 return true;
1649 default:
1650 return false;
1651 }
1652 UNREACHABLE();
1653}
1654
1655namespace {
1656
1657// Shared routine for multiple compare operations.
1658void VisitCompare(InstructionSelectorT* selector, InstructionCode opcode,
1659 InstructionOperand left, InstructionOperand right,
1660 FlagsContinuationT* cont) {
1661 selector->EmitWithContinuation(opcode, left, right, cont);
1662}
1663
1664// Shared routine for multiple word compare operations.
1665void VisitWordCompare(InstructionSelectorT* selector, OpIndex node,
1666 InstructionCode opcode, FlagsContinuationT* cont,
1667 bool commutative, ImmediateMode immediate_mode) {
1668 PPCOperandGeneratorT g(selector);
1669 OpIndex lhs = selector->input_at(node, 0);
1670 OpIndex rhs = selector->input_at(node, 1);
1671
1672 // Match immediates on left or right side of comparison.
1673 if (g.CanBeImmediate(rhs, immediate_mode)) {
1674 VisitCompare(selector, opcode, g.UseRegister(lhs), g.UseImmediate(rhs),
1675 cont);
1676 } else if (g.CanBeImmediate(lhs, immediate_mode)) {
1677 if (!commutative) cont->Commute();
1678 VisitCompare(selector, opcode, g.UseRegister(rhs), g.UseImmediate(lhs),
1679 cont);
1680 } else {
1681 VisitCompare(selector, opcode, g.UseRegister(lhs), g.UseRegister(rhs),
1682 cont);
1683 }
1684}
1685
1686void VisitWord32Compare(InstructionSelectorT* selector, OpIndex node,
1687 FlagsContinuationT* cont) {
1689 VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode);
1690}
1691
1692void VisitWord64Compare(InstructionSelectorT* selector, OpIndex node,
1693 FlagsContinuationT* cont) {
1695 VisitWordCompare(selector, node, kPPC_Cmp64, cont, false, mode);
1696}
1697
1698// Shared routine for multiple float32 compare operations.
1699void VisitFloat32Compare(InstructionSelectorT* selector, OpIndex node,
1700 FlagsContinuationT* cont) {
1701 PPCOperandGeneratorT g(selector);
1702 OpIndex lhs = selector->input_at(node, 0);
1703 OpIndex rhs = selector->input_at(node, 1);
1704 VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(lhs), g.UseRegister(rhs),
1705 cont);
1706}
1707
1708// Shared routine for multiple float64 compare operations.
1709void VisitFloat64Compare(InstructionSelectorT* selector, OpIndex node,
1710 FlagsContinuationT* cont) {
1711 PPCOperandGeneratorT g(selector);
1712 OpIndex lhs = selector->input_at(node, 0);
1713 OpIndex rhs = selector->input_at(node, 1);
1714 VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(lhs), g.UseRegister(rhs),
1715 cont);
1716}
1717
1718} // namespace
1719
1721 FlagsContinuation* cont) {
1722 // Try to combine with comparisons against 0 by simply inverting the branch.
1723 ConsumeEqualZero(&user, &value, cont);
1724
1725 if (CanCover(user, value)) {
1726 const Operation& value_op = Get(value);
1727 if (const ComparisonOp* comparison = value_op.TryCast<ComparisonOp>()) {
1728 switch (comparison->rep.MapTaggedToWord().value()) {
1730 cont->OverwriteAndNegateIfEqual(
1731 GetComparisonFlagCondition(*comparison));
1732 return VisitWord32Compare(this, value, cont);
1734 cont->OverwriteAndNegateIfEqual(
1735 GetComparisonFlagCondition(*comparison));
1736 return VisitWord64Compare(this, value, cont);
1738 switch (comparison->kind) {
1739 case ComparisonOp::Kind::kEqual:
1740 cont->OverwriteAndNegateIfEqual(kEqual);
1741 return VisitFloat32Compare(this, value, cont);
1742 case ComparisonOp::Kind::kSignedLessThan:
1743 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1744 return VisitFloat32Compare(this, value, cont);
1745 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1746 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1747 return VisitFloat32Compare(this, value, cont);
1748 default:
1749 UNREACHABLE();
1750 }
1752 switch (comparison->kind) {
1753 case ComparisonOp::Kind::kEqual:
1754 cont->OverwriteAndNegateIfEqual(kEqual);
1755 return VisitFloat64Compare(this, value, cont);
1756 case ComparisonOp::Kind::kSignedLessThan:
1757 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1758 return VisitFloat64Compare(this, value, cont);
1759 case ComparisonOp::Kind::kSignedLessThanOrEqual:
1760 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1761 return VisitFloat64Compare(this, value, cont);
1762 default:
1763 UNREACHABLE();
1764 }
1765 default:
1766 break;
1767 }
1768 } else if (const ProjectionOp* projection =
1769 value_op.TryCast<ProjectionOp>()) {
1770 // Check if this is the overflow output projection of an
1771 // <Operation>WithOverflow node.
1772 if (projection->index == 1u) {
1773 // We cannot combine the <Operation>WithOverflow with this branch
1774 // unless the 0th projection (the use of the actual value of the
1775 // <Operation> is either nullptr, which means there's no use of the
1776 // actual value, or was already defined, which means it is scheduled
1777 // *AFTER* this branch).
1778 OpIndex node = projection->input();
1779 if (const OverflowCheckedBinopOp* binop =
1781 binop && CanDoBranchIfOverflowFusion(node)) {
1782 const bool is64 = binop->rep == WordRepresentation::Word64();
1783 switch (binop->kind) {
1784 case OverflowCheckedBinopOp::Kind::kSignedAdd:
1785 cont->OverwriteAndNegateIfEqual(kOverflow);
1786 return VisitBinop(this, node,
1787 is64 ? kPPC_Add64 : kPPC_AddWithOverflow32,
1788 kInt16Imm, cont);
1789 case OverflowCheckedBinopOp::Kind::kSignedSub:
1790 cont->OverwriteAndNegateIfEqual(kOverflow);
1791 return VisitBinop(this, node,
1792 is64 ? kPPC_Sub : kPPC_SubWithOverflow32,
1793 kInt16Imm_Negate, cont);
1794 case OverflowCheckedBinopOp::Kind::kSignedMul:
1795 if (is64) {
1796 cont->OverwriteAndNegateIfEqual(kNotEqual);
1797 return EmitInt64MulWithOverflow(this, node, cont);
1798 } else {
1799 cont->OverwriteAndNegateIfEqual(kNotEqual);
1800 return EmitInt32MulWithOverflow(this, node, cont);
1801 }
1802 }
1803 }
1804 }
1805 } else if (value_op.Is<Opmask::kWord32Sub>()) {
1806 return VisitWord32Compare(this, value, cont);
1807 } else if (value_op.Is<Opmask::kWord32BitwiseAnd>()) {
1808 return VisitWordCompare(this, value, kPPC_Tst32, cont, true,
1809 kInt16Imm_Unsigned);
1810 } else if (value_op.Is<Opmask::kWord64Sub>()) {
1811 return VisitWord64Compare(this, value, cont);
1812 } else if (value_op.Is<Opmask::kWord64BitwiseAnd>()) {
1813 return VisitWordCompare(this, value, kPPC_Tst64, cont, true,
1814 kInt16Imm_Unsigned);
1815 } else if (value_op.Is<StackPointerGreaterThanOp>()) {
1816 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
1817 return VisitStackPointerGreaterThan(value, cont);
1818 }
1819 }
1820
1821 // Branch could not be combined with a compare, emit compare against 0.
1822 PPCOperandGeneratorT g(this);
1823 VisitCompare(this, kPPC_Cmp32, g.UseRegister(value), g.TempImmediate(0),
1824 cont);
1825}
1826
1827void InstructionSelectorT::VisitSwitch(OpIndex node, const SwitchInfo& sw) {
1828 PPCOperandGeneratorT g(this);
1829 InstructionOperand value_operand = g.UseRegister(this->input_at(node, 0));
1830
1831 // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
1832 if (enable_switch_jump_table_ ==
1833 InstructionSelector::kEnableSwitchJumpTable) {
1834 static const size_t kMaxTableSwitchValueRange = 2 << 16;
1835 size_t table_space_cost = 4 + sw.value_range();
1836 size_t table_time_cost = 3;
1837 size_t lookup_space_cost = 3 + 2 * sw.case_count();
1838 size_t lookup_time_cost = sw.case_count();
1839 if (sw.case_count() > 0 &&
1840 table_space_cost + 3 * table_time_cost <=
1841 lookup_space_cost + 3 * lookup_time_cost &&
1842 sw.min_value() > std::numeric_limits<int32_t>::min() &&
1843 sw.value_range() <= kMaxTableSwitchValueRange) {
1844 InstructionOperand index_operand = value_operand;
1845 if (sw.min_value()) {
1846 index_operand = g.TempRegister();
1847 Emit(kPPC_Sub, index_operand, value_operand,
1848 g.TempImmediate(sw.min_value()));
1849 }
1850 // Zero extend, because we use it as 64-bit index into the jump table.
1851 InstructionOperand index_operand_zero_ext = g.TempRegister();
1852 Emit(kPPC_Uint32ToUint64, index_operand_zero_ext, index_operand);
1853 index_operand = index_operand_zero_ext;
1854 // Generate a table lookup.
1855 return EmitTableSwitch(sw, index_operand);
1856 }
1857 }
1858
1859 // Generate a tree of conditional jumps.
1860 return EmitBinarySearchSwitch(sw, value_operand);
1861}
1862
1863void InstructionSelectorT::VisitWord32Equal(OpIndex const node) {
1864 const Operation& equal = Get(node);
1865 DCHECK(equal.Is<ComparisonOp>());
1866 OpIndex left = equal.input(0);
1867 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1868 if (isolate() && (V8_STATIC_ROOTS_BOOL ||
1869 (COMPRESS_POINTERS_BOOL && !isolate()->bootstrapper()))) {
1870 PPCOperandGeneratorT g(this);
1871 const RootsTable& roots_table = isolate()->roots_table();
1872 RootIndex root_index;
1873 Handle<HeapObject> right;
1874 // HeapConstants and CompressedHeapConstants can be treated the same when
1875 // using them as an input to a 32-bit comparison. Check whether either is
1876 // present.
1877 if (MatchHeapConstant(node, &right) && !right.is_null() &&
1878 roots_table.IsRootHandle(right, &root_index)) {
1879 if (RootsTable::IsReadOnly(root_index)) {
1880 Tagged_t ptr =
1881 MacroAssemblerBase::ReadOnlyRootPtr(root_index, isolate());
1882 if (g.CanBeImmediate(ptr, kInt16Imm)) {
1883 return VisitCompare(this, kPPC_Cmp32, g.UseRegister(left),
1884 g.TempImmediate(ptr), &cont);
1885 }
1886 }
1887 }
1888 }
1889 VisitWord32Compare(this, node, &cont);
1890}
1891
1892void InstructionSelectorT::VisitInt32LessThan(OpIndex node) {
1893 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1894 VisitWord32Compare(this, node, &cont);
1895}
1896
1897void InstructionSelectorT::VisitInt32LessThanOrEqual(OpIndex node) {
1898 FlagsContinuation cont =
1899 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1900 VisitWord32Compare(this, node, &cont);
1901}
1902
1903void InstructionSelectorT::VisitUint32LessThan(OpIndex node) {
1904 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1905 VisitWord32Compare(this, node, &cont);
1906}
1907
1908void InstructionSelectorT::VisitUint32LessThanOrEqual(OpIndex node) {
1909 FlagsContinuation cont =
1910 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1911 VisitWord32Compare(this, node, &cont);
1912}
1913
1914void InstructionSelectorT::VisitWord64Equal(OpIndex const node) {
1915 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1916 VisitWord64Compare(this, node, &cont);
1917}
1918
1919void InstructionSelectorT::VisitInt64LessThan(OpIndex node) {
1920 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1921 VisitWord64Compare(this, node, &cont);
1922}
1923
1924void InstructionSelectorT::VisitInt64LessThanOrEqual(OpIndex node) {
1925 FlagsContinuation cont =
1926 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1927 VisitWord64Compare(this, node, &cont);
1928}
1929
1930void InstructionSelectorT::VisitUint64LessThan(OpIndex node) {
1931 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1932 VisitWord64Compare(this, node, &cont);
1933}
1934
1935void InstructionSelectorT::VisitUint64LessThanOrEqual(OpIndex node) {
1936 FlagsContinuation cont =
1937 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1938 VisitWord64Compare(this, node, &cont);
1939}
1940
1941void InstructionSelectorT::VisitInt32MulWithOverflow(OpIndex node) {
1942 OptionalOpIndex ovf = FindProjection(node, 1);
1943 if (ovf.valid()) {
1944 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf.value());
1945 return EmitInt32MulWithOverflow(this, node, &cont);
1946 }
1947 FlagsContinuation cont;
1948 EmitInt32MulWithOverflow(this, node, &cont);
1949}
1950
1951void InstructionSelectorT::VisitFloat32Equal(OpIndex node) {
1952 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1953 VisitFloat32Compare(this, node, &cont);
1954}
1955
1956void InstructionSelectorT::VisitFloat32LessThan(OpIndex node) {
1957 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1958 VisitFloat32Compare(this, node, &cont);
1959}
1960
1961void InstructionSelectorT::VisitFloat32LessThanOrEqual(OpIndex node) {
1962 FlagsContinuation cont =
1963 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1964 VisitFloat32Compare(this, node, &cont);
1965}
1966
1967void InstructionSelectorT::VisitFloat64Equal(OpIndex node) {
1968 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1969 VisitFloat64Compare(this, node, &cont);
1970}
1971
1972void InstructionSelectorT::VisitFloat64LessThan(OpIndex node) {
1973 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1974 VisitFloat64Compare(this, node, &cont);
1975}
1976
1977void InstructionSelectorT::VisitFloat64LessThanOrEqual(OpIndex node) {
1978 FlagsContinuation cont =
1979 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1980 VisitFloat64Compare(this, node, &cont);
1981}
1982
1983void InstructionSelectorT::EmitMoveParamToFPR(OpIndex node, int index) {}
1984
1985void InstructionSelectorT::EmitMoveFPRToParam(InstructionOperand* op,
1986 LinkageLocation location) {}
1987
1988void InstructionSelectorT::EmitPrepareArguments(
1989 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1990 OpIndex node) {
1991 PPCOperandGeneratorT g(this);
1992
1993 // Prepare for C function call.
1994 if (call_descriptor->IsCFunctionCall()) {
1995 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1996 call_descriptor->ParameterCount())),
1997 0, nullptr, 0, nullptr);
1998
1999 // Poke any stack arguments.
2000 int slot = kStackFrameExtraParamSlot;
2001 for (PushParameter input : (*arguments)) {
2002 if (!input.node.valid()) continue;
2003 Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
2004 g.TempImmediate(slot));
2005 ++slot;
2006 }
2007 } else {
2008 // Push any stack arguments.
2009 int stack_decrement = 0;
2010 for (PushParameter input : base::Reversed(*arguments)) {
2011 stack_decrement += kSystemPointerSize;
2012 // Skip any alignment holes in pushed nodes.
2013 if (!input.node.valid()) continue;
2014 InstructionOperand decrement = g.UseImmediate(stack_decrement);
2015 stack_decrement = 0;
2016 Emit(kPPC_Push, g.NoOutput(), decrement, g.UseRegister(input.node));
2017 }
2018 }
2019}
2020
2021bool InstructionSelectorT::IsTailCallAddressImmediate() { return false; }
2022
2023void InstructionSelectorT::VisitFloat64ExtractLowWord32(OpIndex node) {
2024 PPCOperandGeneratorT g(this);
2025 Emit(kPPC_DoubleExtractLowWord32, g.DefineAsRegister(node),
2026 g.UseRegister(this->input_at(node, 0)));
2027}
2028
2029void InstructionSelectorT::VisitFloat64ExtractHighWord32(OpIndex node) {
2030 PPCOperandGeneratorT g(this);
2031 Emit(kPPC_DoubleExtractHighWord32, g.DefineAsRegister(node),
2032 g.UseRegister(this->input_at(node, 0)));
2033}
2034
2035void InstructionSelectorT::VisitBitcastWord32PairToFloat64(OpIndex node) {
2036 PPCOperandGeneratorT g(this);
2037 const auto& bitcast = this->Cast<BitcastWord32PairToFloat64Op>(node);
2038 OpIndex hi = bitcast.high_word32();
2039 OpIndex lo = bitcast.low_word32();
2040
2041 InstructionOperand temps[] = {g.TempRegister()};
2042 Emit(kPPC_DoubleFromWord32Pair, g.DefineAsRegister(node), g.UseRegister(hi),
2043 g.UseRegister(lo), arraysize(temps), temps);
2044}
2045
2046void InstructionSelectorT::VisitFloat64InsertLowWord32(OpIndex node) {
2047 UNIMPLEMENTED();
2048}
2049
2050void InstructionSelectorT::VisitFloat64InsertHighWord32(OpIndex node) {
2051 UNIMPLEMENTED();
2052}
2053
2054void InstructionSelectorT::VisitMemoryBarrier(OpIndex node) {
2055 PPCOperandGeneratorT g(this);
2056 Emit(kPPC_Sync, g.NoOutput());
2057}
2058
2059void InstructionSelectorT::VisitWord32AtomicLoad(OpIndex node) {
2060 auto load_view = this->load_view(node);
2062 InstructionCode opcode = SelectLoadOpcode(load_view.loaded_rep(), &mode);
2063 VisitLoadCommon(this, node, mode, opcode);
2064}
2065
2066void InstructionSelectorT::VisitWord64AtomicLoad(OpIndex node) {
2067 auto load_view = this->load_view(node);
2069 InstructionCode opcode = SelectLoadOpcode(load_view.loaded_rep(), &mode);
2070 VisitLoadCommon(this, node, mode, opcode);
2071}
2072
2073void InstructionSelectorT::VisitWord32AtomicStore(OpIndex node) {
2074 auto store = this->store_view(node);
2075 AtomicStoreParameters store_params(store.stored_rep().representation(),
2076 store.stored_rep().write_barrier_kind(),
2077 store.memory_order().value(),
2078 store.access_kind());
2079 VisitStoreCommon(this, node, store_params.store_representation(),
2080 store_params.order());
2081}
2082
2083void InstructionSelectorT::VisitWord64AtomicStore(OpIndex node) {
2084 auto store = this->store_view(node);
2085 AtomicStoreParameters store_params(store.stored_rep().representation(),
2086 store.stored_rep().write_barrier_kind(),
2087 store.memory_order().value(),
2088 store.access_kind());
2089 VisitStoreCommon(this, node, store_params.store_representation(),
2090 store_params.order());
2091}
2092
2094 ArchOpcode opcode) {
2095 PPCOperandGeneratorT g(selector);
2096 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
2097 OpIndex base = atomic_op.base();
2098 OpIndex index = atomic_op.index();
2099 OpIndex value = atomic_op.value();
2100
2101 AddressingMode addressing_mode = kMode_MRR;
2102 InstructionOperand inputs[3];
2103 size_t input_count = 0;
2104 inputs[input_count++] = g.UseUniqueRegister(base);
2105 inputs[input_count++] = g.UseUniqueRegister(index);
2106 inputs[input_count++] = g.UseUniqueRegister(value);
2107 InstructionOperand outputs[1];
2108 outputs[0] = g.UseUniqueRegister(node);
2109 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2110 selector->Emit(code, 1, outputs, input_count, inputs);
2111}
2112
2113void InstructionSelectorT::VisitWord32AtomicExchange(OpIndex node) {
2114 ArchOpcode opcode;
2115 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2116 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2117 opcode = kAtomicExchangeInt8;
2118 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2119 opcode = kPPC_AtomicExchangeUint8;
2120 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2121 opcode = kAtomicExchangeInt16;
2122 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2123 opcode = kPPC_AtomicExchangeUint16;
2124 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2125 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2126 opcode = kPPC_AtomicExchangeWord32;
2127 } else {
2128 UNREACHABLE();
2129 }
2130 VisitAtomicExchange(this, node, opcode);
2131}
2132
2133void InstructionSelectorT::VisitWord64AtomicExchange(OpIndex node) {
2134 ArchOpcode opcode;
2135 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2136 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2137 opcode = kPPC_AtomicExchangeUint8;
2138 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2139 opcode = kPPC_AtomicExchangeUint16;
2140 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2141 opcode = kPPC_AtomicExchangeWord32;
2142 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2143 opcode = kPPC_AtomicExchangeWord64;
2144 } else {
2145 UNREACHABLE();
2146 }
2147 VisitAtomicExchange(this, node, opcode);
2148}
2149
2151 ArchOpcode opcode) {
2152 PPCOperandGeneratorT g(selector);
2153 const AtomicRMWOp& atomic_op = selector->Cast<AtomicRMWOp>(node);
2154 OpIndex base = atomic_op.base();
2155 OpIndex index = atomic_op.index();
2156 OpIndex old_value = atomic_op.expected().value();
2157 OpIndex new_value = atomic_op.value();
2158
2159 AddressingMode addressing_mode = kMode_MRR;
2160 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2161
2162 InstructionOperand inputs[4];
2163 size_t input_count = 0;
2164 inputs[input_count++] = g.UseUniqueRegister(base);
2165 inputs[input_count++] = g.UseUniqueRegister(index);
2166 inputs[input_count++] = g.UseUniqueRegister(old_value);
2167 inputs[input_count++] = g.UseUniqueRegister(new_value);
2168
2169 InstructionOperand outputs[1];
2170 size_t output_count = 0;
2171 outputs[output_count++] = g.DefineAsRegister(node);
2172
2173 selector->Emit(code, output_count, outputs, input_count, inputs);
2174}
2175
2176void InstructionSelectorT::VisitWord32AtomicCompareExchange(OpIndex node) {
2177 ArchOpcode opcode;
2178 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2179 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2180 opcode = kAtomicCompareExchangeInt8;
2181 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2182 opcode = kPPC_AtomicCompareExchangeUint8;
2183 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2184 opcode = kAtomicCompareExchangeInt16;
2185 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2186 opcode = kPPC_AtomicCompareExchangeUint16;
2187 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32() ||
2188 atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2189 opcode = kPPC_AtomicCompareExchangeWord32;
2190 } else {
2191 UNREACHABLE();
2192 }
2193 VisitAtomicCompareExchange(this, node, opcode);
2194}
2195
2196void InstructionSelectorT::VisitWord64AtomicCompareExchange(OpIndex node) {
2197 ArchOpcode opcode;
2198 const AtomicRMWOp& atomic_op = this->Get(node).template Cast<AtomicRMWOp>();
2199 if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2200 opcode = kPPC_AtomicCompareExchangeUint8;
2201 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2202 opcode = kPPC_AtomicCompareExchangeUint16;
2203 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2204 opcode = kPPC_AtomicCompareExchangeWord32;
2205 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2206 opcode = kPPC_AtomicCompareExchangeWord64;
2207 } else {
2208 UNREACHABLE();
2209 }
2210 VisitAtomicCompareExchange(this, node, opcode);
2211}
2212
2214 ArchOpcode int8_op, ArchOpcode uint8_op,
2215 ArchOpcode int16_op, ArchOpcode uint16_op,
2216 ArchOpcode int32_op, ArchOpcode uint32_op,
2217 ArchOpcode int64_op, ArchOpcode uint64_op) {
2218 PPCOperandGeneratorT g(selector);
2219 ArchOpcode opcode;
2220 const AtomicRMWOp& atomic_op =
2221 selector->Get(node).template Cast<AtomicRMWOp>();
2222 OpIndex base = atomic_op.base();
2223 OpIndex index = atomic_op.index();
2224 OpIndex value = atomic_op.value();
2225 if (atomic_op.memory_rep == MemoryRepresentation::Int8()) {
2226 opcode = int8_op;
2227 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint8()) {
2228 opcode = uint8_op;
2229 } else if (atomic_op.memory_rep == MemoryRepresentation::Int16()) {
2230 opcode = int16_op;
2231 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint16()) {
2232 opcode = uint16_op;
2233 } else if (atomic_op.memory_rep == MemoryRepresentation::Int32()) {
2234 opcode = int32_op;
2235 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint32()) {
2236 opcode = uint32_op;
2237 } else if (atomic_op.memory_rep == MemoryRepresentation::Int64()) {
2238 opcode = int64_op;
2239 } else if (atomic_op.memory_rep == MemoryRepresentation::Uint64()) {
2240 opcode = uint64_op;
2241 } else {
2242 UNREACHABLE();
2243 }
2244
2245 AddressingMode addressing_mode = kMode_MRR;
2246 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2247 InstructionOperand inputs[3];
2248
2249 size_t input_count = 0;
2250 inputs[input_count++] = g.UseUniqueRegister(base);
2251 inputs[input_count++] = g.UseUniqueRegister(index);
2252 inputs[input_count++] = g.UseUniqueRegister(value);
2253
2254 InstructionOperand outputs[1];
2255 size_t output_count = 0;
2256 outputs[output_count++] = g.DefineAsRegister(node);
2257
2258 selector->Emit(code, output_count, outputs, input_count, inputs);
2259}
2260
2261void InstructionSelectorT::VisitWord32AtomicBinaryOperation(
2262 OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2263 ArchOpcode uint16_op, ArchOpcode word32_op) {
2264 // Unused
2265 UNREACHABLE();
2266}
2267
2268void InstructionSelectorT::VisitWord64AtomicBinaryOperation(
2269 OpIndex node, ArchOpcode uint8_op, ArchOpcode uint16_op,
2270 ArchOpcode uint32_op, ArchOpcode uint64_op) {
2271 // Unused
2272 UNREACHABLE();
2273}
2274
2275#define VISIT_ATOMIC_BINOP(op) \
2276 void InstructionSelectorT::VisitWord32Atomic##op(OpIndex node) { \
2277 VisitAtomicBinaryOperation( \
2278 this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
2279 kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \
2280 kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \
2281 kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \
2282 } \
2283 void InstructionSelectorT::VisitWord64Atomic##op(OpIndex node) { \
2284 VisitAtomicBinaryOperation( \
2285 this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \
2286 kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \
2287 kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \
2288 kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \
2289 }
2295#undef VISIT_ATOMIC_BINOP
2296
2297void InstructionSelectorT::VisitInt32AbsWithOverflow(OpIndex node) {
2298 UNREACHABLE();
2299}
2300
2301void InstructionSelectorT::VisitInt64AbsWithOverflow(OpIndex node) {
2302 UNREACHABLE();
2303}
2304
2305#define SIMD_TYPES(V) \
2306 V(F64x2) \
2307 V(F32x4) \
2308 V(I64x2) \
2309 V(I32x4) \
2310 V(I16x8) \
2311 V(I8x16)
2312
2313#define SIMD_BINOP_LIST(V) \
2314 V(F64x2Add) \
2315 V(F64x2Sub) \
2316 V(F64x2Mul) \
2317 V(F64x2Eq) \
2318 V(F64x2Ne) \
2319 V(F64x2Le) \
2320 V(F64x2Lt) \
2321 V(F64x2Div) \
2322 V(F64x2Min) \
2323 V(F64x2Max) \
2324 V(F64x2Pmin) \
2325 V(F64x2Pmax) \
2326 V(F32x4Add) \
2327 V(F32x4Sub) \
2328 V(F32x4Mul) \
2329 V(F32x4Eq) \
2330 V(F32x4Ne) \
2331 V(F32x4Lt) \
2332 V(F32x4Le) \
2333 V(F32x4Div) \
2334 V(F32x4Min) \
2335 V(F32x4Max) \
2336 V(F32x4Pmin) \
2337 V(F32x4Pmax) \
2338 V(I64x2Add) \
2339 V(I64x2Sub) \
2340 V(I64x2Mul) \
2341 V(I64x2Eq) \
2342 V(I64x2Ne) \
2343 V(I64x2ExtMulLowI32x4S) \
2344 V(I64x2ExtMulHighI32x4S) \
2345 V(I64x2ExtMulLowI32x4U) \
2346 V(I64x2ExtMulHighI32x4U) \
2347 V(I64x2GtS) \
2348 V(I64x2GeS) \
2349 V(I64x2Shl) \
2350 V(I64x2ShrS) \
2351 V(I64x2ShrU) \
2352 V(I32x4Add) \
2353 V(I32x4Sub) \
2354 V(I32x4Mul) \
2355 V(I32x4MinS) \
2356 V(I32x4MinU) \
2357 V(I32x4MaxS) \
2358 V(I32x4MaxU) \
2359 V(I32x4Eq) \
2360 V(I32x4Ne) \
2361 V(I32x4GtS) \
2362 V(I32x4GeS) \
2363 V(I32x4GtU) \
2364 V(I32x4GeU) \
2365 V(I32x4DotI16x8S) \
2366 V(I32x4ExtMulLowI16x8S) \
2367 V(I32x4ExtMulHighI16x8S) \
2368 V(I32x4ExtMulLowI16x8U) \
2369 V(I32x4ExtMulHighI16x8U) \
2370 V(I32x4Shl) \
2371 V(I32x4ShrS) \
2372 V(I32x4ShrU) \
2373 V(I16x8Add) \
2374 V(I16x8Sub) \
2375 V(I16x8Mul) \
2376 V(I16x8MinS) \
2377 V(I16x8MinU) \
2378 V(I16x8MaxS) \
2379 V(I16x8MaxU) \
2380 V(I16x8Eq) \
2381 V(I16x8Ne) \
2382 V(I16x8GtS) \
2383 V(I16x8GeS) \
2384 V(I16x8GtU) \
2385 V(I16x8GeU) \
2386 V(I16x8SConvertI32x4) \
2387 V(I16x8UConvertI32x4) \
2388 V(I16x8AddSatS) \
2389 V(I16x8SubSatS) \
2390 V(I16x8AddSatU) \
2391 V(I16x8SubSatU) \
2392 V(I16x8RoundingAverageU) \
2393 V(I16x8Q15MulRSatS) \
2394 V(I16x8ExtMulLowI8x16S) \
2395 V(I16x8ExtMulHighI8x16S) \
2396 V(I16x8ExtMulLowI8x16U) \
2397 V(I16x8ExtMulHighI8x16U) \
2398 V(I16x8Shl) \
2399 V(I16x8ShrS) \
2400 V(I16x8ShrU) \
2401 V(I8x16Add) \
2402 V(I8x16Sub) \
2403 V(I8x16MinS) \
2404 V(I8x16MinU) \
2405 V(I8x16MaxS) \
2406 V(I8x16MaxU) \
2407 V(I8x16Eq) \
2408 V(I8x16Ne) \
2409 V(I8x16GtS) \
2410 V(I8x16GeS) \
2411 V(I8x16GtU) \
2412 V(I8x16GeU) \
2413 V(I8x16SConvertI16x8) \
2414 V(I8x16UConvertI16x8) \
2415 V(I8x16AddSatS) \
2416 V(I8x16SubSatS) \
2417 V(I8x16AddSatU) \
2418 V(I8x16SubSatU) \
2419 V(I8x16RoundingAverageU) \
2420 V(I8x16Swizzle) \
2421 V(I8x16Shl) \
2422 V(I8x16ShrS) \
2423 V(I8x16ShrU) \
2424 V(S128And) \
2425 V(S128Or) \
2426 V(S128Xor) \
2427 V(S128AndNot)
2428
2429#define SIMD_UNOP_LIST(V) \
2430 V(F64x2Abs) \
2431 V(F64x2Neg) \
2432 V(F64x2Sqrt) \
2433 V(F64x2Ceil) \
2434 V(F64x2Floor) \
2435 V(F64x2Trunc) \
2436 V(F64x2ConvertLowI32x4S) \
2437 V(F64x2ConvertLowI32x4U) \
2438 V(F64x2PromoteLowF32x4) \
2439 V(F32x4Abs) \
2440 V(F32x4Neg) \
2441 V(F32x4Sqrt) \
2442 V(F32x4SConvertI32x4) \
2443 V(F32x4UConvertI32x4) \
2444 V(F32x4Ceil) \
2445 V(F32x4Floor) \
2446 V(F32x4Trunc) \
2447 V(F32x4DemoteF64x2Zero) \
2448 V(I64x2Abs) \
2449 V(I64x2Neg) \
2450 V(I64x2SConvertI32x4Low) \
2451 V(I64x2SConvertI32x4High) \
2452 V(I64x2UConvertI32x4Low) \
2453 V(I64x2UConvertI32x4High) \
2454 V(I64x2AllTrue) \
2455 V(I64x2BitMask) \
2456 V(I32x4Neg) \
2457 V(I32x4Abs) \
2458 V(I32x4SConvertF32x4) \
2459 V(I32x4UConvertF32x4) \
2460 V(I32x4SConvertI16x8Low) \
2461 V(I32x4SConvertI16x8High) \
2462 V(I32x4UConvertI16x8Low) \
2463 V(I32x4UConvertI16x8High) \
2464 V(I32x4ExtAddPairwiseI16x8S) \
2465 V(I32x4ExtAddPairwiseI16x8U) \
2466 V(I32x4TruncSatF64x2SZero) \
2467 V(I32x4TruncSatF64x2UZero) \
2468 V(I32x4AllTrue) \
2469 V(I32x4BitMask) \
2470 V(I16x8Neg) \
2471 V(I16x8Abs) \
2472 V(I16x8AllTrue) \
2473 V(I16x8BitMask) \
2474 V(I8x16Neg) \
2475 V(I8x16Abs) \
2476 V(I8x16Popcnt) \
2477 V(I8x16AllTrue) \
2478 V(I8x16BitMask) \
2479 V(I16x8SConvertI8x16Low) \
2480 V(I16x8SConvertI8x16High) \
2481 V(I16x8UConvertI8x16Low) \
2482 V(I16x8UConvertI8x16High) \
2483 V(I16x8ExtAddPairwiseI8x16S) \
2484 V(I16x8ExtAddPairwiseI8x16U) \
2485 V(S128Not) \
2486 V(V128AnyTrue)
2487
2488#define SIMD_VISIT_SPLAT(Type, T, LaneSize) \
2489 void InstructionSelectorT::Visit##Type##Splat(OpIndex node) { \
2490 PPCOperandGeneratorT g(this); \
2491 Emit(kPPC_##T##Splat | LaneSizeField::encode(LaneSize), \
2492 g.DefineAsRegister(node), g.UseRegister(this->input_at(node, 0))); \
2493 }
2494SIMD_VISIT_SPLAT(F64x2, F, 64)
2495SIMD_VISIT_SPLAT(F32x4, F, 32)
2496SIMD_VISIT_SPLAT(I64x2, I, 64)
2497SIMD_VISIT_SPLAT(I32x4, I, 32)
2498SIMD_VISIT_SPLAT(I16x8, I, 16)
2499SIMD_VISIT_SPLAT(I8x16, I, 8)
2500#undef SIMD_VISIT_SPLAT
2501
2502#define SIMD_VISIT_EXTRACT_LANE(Type, T, Sign, LaneSize) \
2503 void InstructionSelectorT::Visit##Type##ExtractLane##Sign(OpIndex node) { \
2504 PPCOperandGeneratorT g(this); \
2505 int32_t lane; \
2506 const Operation& op = this->Get(node); \
2507 lane = op.template Cast<Simd128ExtractLaneOp>().lane; \
2508 Emit(kPPC_##T##ExtractLane##Sign | LaneSizeField::encode(LaneSize), \
2509 g.DefineAsRegister(node), g.UseRegister(this->input_at(node, 0)), \
2510 g.UseImmediate(lane)); \
2511 }
2512SIMD_VISIT_EXTRACT_LANE(F64x2, F, , 64)
2513SIMD_VISIT_EXTRACT_LANE(F32x4, F, , 32)
2514SIMD_VISIT_EXTRACT_LANE(I64x2, I, , 64)
2515SIMD_VISIT_EXTRACT_LANE(I32x4, I, , 32)
2516SIMD_VISIT_EXTRACT_LANE(I16x8, I, U, 16)
2517SIMD_VISIT_EXTRACT_LANE(I16x8, I, S, 16)
2518SIMD_VISIT_EXTRACT_LANE(I8x16, I, U, 8)
2519SIMD_VISIT_EXTRACT_LANE(I8x16, I, S, 8)
2520#undef SIMD_VISIT_EXTRACT_LANE
2521
2522#define SIMD_VISIT_REPLACE_LANE(Type, T, LaneSize) \
2523 void InstructionSelectorT::Visit##Type##ReplaceLane(OpIndex node) { \
2524 PPCOperandGeneratorT g(this); \
2525 int32_t lane; \
2526 const Operation& op = this->Get(node); \
2527 lane = op.template Cast<Simd128ReplaceLaneOp>().lane; \
2528 Emit(kPPC_##T##ReplaceLane | LaneSizeField::encode(LaneSize), \
2529 g.DefineSameAsFirst(node), g.UseRegister(this->input_at(node, 0)), \
2530 g.UseImmediate(lane), g.UseRegister(this->input_at(node, 1))); \
2531 }
2532SIMD_VISIT_REPLACE_LANE(F64x2, F, 64)
2533SIMD_VISIT_REPLACE_LANE(F32x4, F, 32)
2534SIMD_VISIT_REPLACE_LANE(I64x2, I, 64)
2535SIMD_VISIT_REPLACE_LANE(I32x4, I, 32)
2536SIMD_VISIT_REPLACE_LANE(I16x8, I, 16)
2537SIMD_VISIT_REPLACE_LANE(I8x16, I, 8)
2538#undef SIMD_VISIT_REPLACE_LANE
2539
2540#define SIMD_VISIT_BINOP(Opcode) \
2541 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2542 PPCOperandGeneratorT g(this); \
2543 InstructionOperand temps[] = {g.TempRegister()}; \
2544 Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
2545 g.UseRegister(this->input_at(node, 0)), \
2546 g.UseRegister(this->input_at(node, 1)), arraysize(temps), temps); \
2547 }
2549#undef SIMD_VISIT_BINOP
2550#undef SIMD_BINOP_LIST
2551
2552#define SIMD_VISIT_UNOP(Opcode) \
2553 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2554 PPCOperandGeneratorT g(this); \
2555 Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
2556 g.UseRegister(this->input_at(node, 0))); \
2557 }
2559#undef SIMD_VISIT_UNOP
2560#undef SIMD_UNOP_LIST
2561
2562#define SIMD_VISIT_QFMOP(Opcode) \
2563 void InstructionSelectorT::Visit##Opcode(OpIndex node) { \
2564 PPCOperandGeneratorT g(this); \
2565 Emit(kPPC_##Opcode, g.DefineSameAsFirst(node), \
2566 g.UseRegister(this->input_at(node, 0)), \
2567 g.UseRegister(this->input_at(node, 1)), \
2568 g.UseRegister(this->input_at(node, 2))); \
2569 }
2570SIMD_VISIT_QFMOP(F64x2Qfma)
2571SIMD_VISIT_QFMOP(F64x2Qfms)
2572SIMD_VISIT_QFMOP(F32x4Qfma)
2573SIMD_VISIT_QFMOP(F32x4Qfms)
2574#undef SIMD_VISIT_QFMOP
2575
2576#define SIMD_RELAXED_OP_LIST(V) \
2577 V(F64x2RelaxedMin, F64x2Pmin) \
2578 V(F64x2RelaxedMax, F64x2Pmax) \
2579 V(F32x4RelaxedMin, F32x4Pmin) \
2580 V(F32x4RelaxedMax, F32x4Pmax) \
2581 V(I32x4RelaxedTruncF32x4S, I32x4SConvertF32x4) \
2582 V(I32x4RelaxedTruncF32x4U, I32x4UConvertF32x4) \
2583 V(I32x4RelaxedTruncF64x2SZero, I32x4TruncSatF64x2SZero) \
2584 V(I32x4RelaxedTruncF64x2UZero, I32x4TruncSatF64x2UZero) \
2585 V(I16x8RelaxedQ15MulRS, I16x8Q15MulRSatS) \
2586 V(I8x16RelaxedLaneSelect, S128Select) \
2587 V(I16x8RelaxedLaneSelect, S128Select) \
2588 V(I32x4RelaxedLaneSelect, S128Select) \
2589 V(I64x2RelaxedLaneSelect, S128Select)
2590
2591#define SIMD_VISIT_RELAXED_OP(name, op) \
2592 void InstructionSelectorT::Visit##name(OpIndex node) { Visit##op(node); }
2594#undef SIMD_VISIT_RELAXED_OP
2595#undef SIMD_RELAXED_OP_LIST
2596
2597#define F16_OP_LIST(V) \
2598 V(F16x8Splat) \
2599 V(F16x8ExtractLane) \
2600 V(F16x8ReplaceLane) \
2601 V(F16x8Abs) \
2602 V(F16x8Neg) \
2603 V(F16x8Sqrt) \
2604 V(F16x8Floor) \
2605 V(F16x8Ceil) \
2606 V(F16x8Trunc) \
2607 V(F16x8NearestInt) \
2608 V(F16x8Add) \
2609 V(F16x8Sub) \
2610 V(F16x8Mul) \
2611 V(F16x8Div) \
2612 V(F16x8Min) \
2613 V(F16x8Max) \
2614 V(F16x8Pmin) \
2615 V(F16x8Pmax) \
2616 V(F16x8Eq) \
2617 V(F16x8Ne) \
2618 V(F16x8Lt) \
2619 V(F16x8Le) \
2620 V(F16x8SConvertI16x8) \
2621 V(F16x8UConvertI16x8) \
2622 V(I16x8SConvertF16x8) \
2623 V(I16x8UConvertF16x8) \
2624 V(F32x4PromoteLowF16x8) \
2625 V(F16x8DemoteF32x4Zero) \
2626 V(F16x8DemoteF64x2Zero) \
2627 V(F16x8Qfma) \
2628 V(F16x8Qfms)
2629
2630#define VISIT_F16_OP(name) \
2631 void InstructionSelectorT::Visit##name(OpIndex node) { UNIMPLEMENTED(); }
2633#undef VISIT_F16_OP
2634#undef F16_OP_LIST
2635#undef SIMD_TYPES
2636
2637#if V8_ENABLE_WEBASSEMBLY
2638void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) {
2639 uint8_t shuffle[kSimd128Size];
2640 bool is_swizzle;
2641 // TODO(nicohartmann@): Properly use view here once Turboshaft support is
2642 // implemented.
2643 auto view = this->simd_shuffle_view(node);
2644 CanonicalizeShuffle(view, shuffle, &is_swizzle);
2645 PPCOperandGeneratorT g(this);
2646 OpIndex input0 = view.input(0);
2647 OpIndex input1 = view.input(1);
2648 // Remap the shuffle indices to match IBM lane numbering.
2649 int max_index = 15;
2650 int total_lane_count = 2 * kSimd128Size;
2651 uint8_t shuffle_remapped[kSimd128Size];
2652 for (int i = 0; i < kSimd128Size; i++) {
2653 uint8_t current_index = shuffle[i];
2654 shuffle_remapped[i] = (current_index <= max_index
2655 ? max_index - current_index
2656 : total_lane_count - current_index + max_index);
2657 }
2658 Emit(kPPC_I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2659 g.UseRegister(input1),
2660 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped)),
2661 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 4)),
2662 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 8)),
2663 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 12)));
2664}
2665
2666void InstructionSelectorT::VisitSetStackPointer(OpIndex node) {
2667 OperandGenerator g(this);
2668 // TODO(miladfarca): Optimize by using UseAny.
2669 auto input = g.UseRegister(this->input_at(node, 0));
2670 Emit(kArchSetStackPointer, 0, nullptr, 1, &input);
2671}
2672
2673#else
2674void InstructionSelectorT::VisitI8x16Shuffle(OpIndex node) { UNREACHABLE(); }
2675#endif // V8_ENABLE_WEBASSEMBLY
2676
2677void InstructionSelectorT::VisitS128Zero(OpIndex node) {
2678 PPCOperandGeneratorT g(this);
2679 Emit(kPPC_S128Zero, g.DefineAsRegister(node));
2680}
2681
2682void InstructionSelectorT::VisitS128Select(OpIndex node) {
2683 PPCOperandGeneratorT g(this);
2684 Emit(kPPC_S128Select, g.DefineAsRegister(node),
2685 g.UseRegister(this->input_at(node, 0)),
2686 g.UseRegister(this->input_at(node, 1)),
2687 g.UseRegister(this->input_at(node, 2)));
2688}
2689
2690// This is a replica of SimdShuffle::Pack4Lanes. However, above function will
2691// not be available on builds with webassembly disabled, hence we need to have
2692// it declared locally as it is used on other visitors such as S128Const.
2693static int32_t Pack4Lanes(const uint8_t* shuffle) {
2694 int32_t result = 0;
2695 for (int i = 3; i >= 0; --i) {
2696 result <<= 8;
2697 result |= shuffle[i];
2698 }
2699 return result;
2700}
2701
2702void InstructionSelectorT::VisitS128Const(OpIndex node) {
2703 PPCOperandGeneratorT g(this);
2704 uint32_t val[kSimd128Size / sizeof(uint32_t)];
2705 const Simd128ConstantOp& constant =
2706 this->Get(node).template Cast<Simd128ConstantOp>();
2707 memcpy(val, constant.value, kSimd128Size);
2708 // If all bytes are zeros, avoid emitting code for generic constants.
2709 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
2710 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
2711 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
2712 InstructionOperand dst = g.DefineAsRegister(node);
2713 if (all_zeros) {
2714 Emit(kPPC_S128Zero, dst);
2715 } else if (all_ones) {
2716 Emit(kPPC_S128AllOnes, dst);
2717 } else {
2718 // We have to use Pack4Lanes to reverse the bytes (lanes) on BE,
2719 // Which in this case is ineffective on LE.
2720 Emit(kPPC_S128Const, g.DefineAsRegister(node),
2721 g.UseImmediate(Pack4Lanes(reinterpret_cast<uint8_t*>(&val[0]))),
2722 g.UseImmediate(Pack4Lanes(reinterpret_cast<uint8_t*>(&val[0]) + 4)),
2723 g.UseImmediate(Pack4Lanes(reinterpret_cast<uint8_t*>(&val[0]) + 8)),
2724 g.UseImmediate(Pack4Lanes(reinterpret_cast<uint8_t*>(&val[0]) + 12)));
2725 }
2726}
2727
2728void InstructionSelectorT::VisitI16x8DotI8x16I7x16S(OpIndex node) {
2729 PPCOperandGeneratorT g(this);
2730 Emit(kPPC_I16x8DotI8x16S, g.DefineAsRegister(node),
2731 g.UseUniqueRegister(this->input_at(node, 0)),
2732 g.UseUniqueRegister(this->input_at(node, 1)));
2733}
2734
2735void InstructionSelectorT::VisitI32x4DotI8x16I7x16AddS(OpIndex node) {
2736 PPCOperandGeneratorT g(this);
2737 Emit(kPPC_I32x4DotI8x16AddS, g.DefineAsRegister(node),
2738 g.UseUniqueRegister(this->input_at(node, 0)),
2739 g.UseUniqueRegister(this->input_at(node, 1)),
2740 g.UseUniqueRegister(this->input_at(node, 2)));
2741}
2742
2743void InstructionSelectorT::EmitPrepareResults(
2744 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
2745 OpIndex node) {
2746 PPCOperandGeneratorT g(this);
2747
2748 for (PushParameter output : *results) {
2749 if (!output.location.IsCallerFrameSlot()) continue;
2750 // Skip any alignment holes in nodes.
2751 if (output.node.valid()) {
2752 DCHECK(!call_descriptor->IsCFunctionCall());
2753 if (output.location.GetType() == MachineType::Float32()) {
2754 MarkAsFloat32(output.node);
2755 } else if (output.location.GetType() == MachineType::Float64()) {
2756 MarkAsFloat64(output.node);
2757 } else if (output.location.GetType() == MachineType::Simd128()) {
2758 MarkAsSimd128(output.node);
2759 }
2760 int offset = call_descriptor->GetOffsetToReturns();
2761 int reverse_slot = -output.location.GetLocation() - offset;
2762 Emit(kPPC_Peek, g.DefineAsRegister(output.node),
2763 g.UseImmediate(reverse_slot));
2764 }
2765 }
2766}
2767
2768void InstructionSelectorT::VisitLoadLane(OpIndex node) {
2769 PPCOperandGeneratorT g(this);
2770 InstructionCode opcode = kArchNop;
2771 const Simd128LaneMemoryOp& load =
2772 this->Get(node).template Cast<Simd128LaneMemoryOp>();
2773 switch (load.lane_kind) {
2774 case Simd128LaneMemoryOp::LaneKind::k8:
2775 opcode = kPPC_S128Load8Lane;
2776 break;
2777 case Simd128LaneMemoryOp::LaneKind::k16:
2778 opcode = kPPC_S128Load16Lane;
2779 break;
2780 case Simd128LaneMemoryOp::LaneKind::k32:
2781 opcode = kPPC_S128Load32Lane;
2782 break;
2783 case Simd128LaneMemoryOp::LaneKind::k64:
2784 opcode = kPPC_S128Load64Lane;
2785 break;
2786 }
2787 Emit(opcode | AddressingModeField::encode(kMode_MRR),
2788 g.DefineSameAsFirst(node), g.UseRegister(load.value()),
2789 g.UseRegister(load.base()), g.UseRegister(load.index()),
2790 g.UseImmediate(load.lane));
2791}
2792
2793void InstructionSelectorT::VisitLoadTransform(OpIndex node) {
2794 PPCOperandGeneratorT g(this);
2795 ArchOpcode opcode;
2796 const Simd128LoadTransformOp& op =
2797 this->Get(node).template Cast<Simd128LoadTransformOp>();
2798 OpIndex base = op.base();
2799 OpIndex index = op.index();
2800
2801 switch (op.transform_kind) {
2802 case Simd128LoadTransformOp::TransformKind::k8Splat:
2803 opcode = kPPC_S128Load8Splat;
2804 break;
2805 case Simd128LoadTransformOp::TransformKind::k16Splat:
2806 opcode = kPPC_S128Load16Splat;
2807 break;
2808 case Simd128LoadTransformOp::TransformKind::k32Splat:
2809 opcode = kPPC_S128Load32Splat;
2810 break;
2811 case Simd128LoadTransformOp::TransformKind::k64Splat:
2812 opcode = kPPC_S128Load64Splat;
2813 break;
2814 case Simd128LoadTransformOp::TransformKind::k8x8S:
2815 opcode = kPPC_S128Load8x8S;
2816 break;
2817 case Simd128LoadTransformOp::TransformKind::k8x8U:
2818 opcode = kPPC_S128Load8x8U;
2819 break;
2820 case Simd128LoadTransformOp::TransformKind::k16x4S:
2821 opcode = kPPC_S128Load16x4S;
2822 break;
2823 case Simd128LoadTransformOp::TransformKind::k16x4U:
2824 opcode = kPPC_S128Load16x4U;
2825 break;
2826 case Simd128LoadTransformOp::TransformKind::k32x2S:
2827 opcode = kPPC_S128Load32x2S;
2828 break;
2829 case Simd128LoadTransformOp::TransformKind::k32x2U:
2830 opcode = kPPC_S128Load32x2U;
2831 break;
2832 case Simd128LoadTransformOp::TransformKind::k32Zero:
2833 opcode = kPPC_S128Load32Zero;
2834 break;
2835 case Simd128LoadTransformOp::TransformKind::k64Zero:
2836 opcode = kPPC_S128Load64Zero;
2837 break;
2838 default:
2839 UNIMPLEMENTED();
2840 }
2841 Emit(opcode | AddressingModeField::encode(kMode_MRR),
2842 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
2843}
2844
2845void InstructionSelectorT::VisitStoreLane(OpIndex node) {
2846 PPCOperandGeneratorT g(this);
2847 InstructionCode opcode = kArchNop;
2848 InstructionOperand inputs[4];
2849 const Simd128LaneMemoryOp& store =
2850 this->Get(node).template Cast<Simd128LaneMemoryOp>();
2851 switch (store.lane_kind) {
2852 case Simd128LaneMemoryOp::LaneKind::k8:
2853 opcode = kPPC_S128Store8Lane;
2854 break;
2855 case Simd128LaneMemoryOp::LaneKind::k16:
2856 opcode = kPPC_S128Store16Lane;
2857 break;
2858 case Simd128LaneMemoryOp::LaneKind::k32:
2859 opcode = kPPC_S128Store32Lane;
2860 break;
2861 case Simd128LaneMemoryOp::LaneKind::k64:
2862 opcode = kPPC_S128Store64Lane;
2863 break;
2864 }
2865 inputs[0] = g.UseRegister(store.value());
2866 inputs[1] = g.UseRegister(store.base());
2867 inputs[2] = g.UseRegister(store.index());
2868 inputs[3] = g.UseImmediate(store.lane);
2869 Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, 4, inputs);
2870}
2871
2872void InstructionSelectorT::AddOutputToSelectContinuation(OperandGenerator* g,
2873 int first_input_index,
2874 OpIndex node) {
2875 UNREACHABLE();
2876}
2877
2878void InstructionSelectorT::VisitFloat32RoundTiesEven(OpIndex node) {
2879 UNREACHABLE();
2880}
2881
2882void InstructionSelectorT::VisitFloat64RoundTiesEven(OpIndex node) {
2883 UNREACHABLE();
2884}
2885
2886void InstructionSelectorT::VisitF64x2NearestInt(OpIndex node) { UNREACHABLE(); }
2887
2888void InstructionSelectorT::VisitF32x4NearestInt(OpIndex node) { UNREACHABLE(); }
2889
2890MachineOperatorBuilder::Flags
2891InstructionSelector::SupportedMachineOperatorFlags() {
2892 return MachineOperatorBuilder::kFloat32RoundDown |
2893 MachineOperatorBuilder::kFloat64RoundDown |
2894 MachineOperatorBuilder::kFloat32RoundUp |
2895 MachineOperatorBuilder::kFloat64RoundUp |
2896 MachineOperatorBuilder::kFloat32RoundTruncate |
2897 MachineOperatorBuilder::kFloat64RoundTruncate |
2898 MachineOperatorBuilder::kFloat64RoundTiesAway |
2899 MachineOperatorBuilder::kWord32Popcnt |
2900 MachineOperatorBuilder::kWord64Popcnt;
2901 // We omit kWord32ShiftIsSafe as s[rl]w use 0x3F as a mask rather than 0x1F.
2902}
2903
2904MachineOperatorBuilder::AlignmentRequirements
2905InstructionSelector::AlignmentRequirements() {
2906 return MachineOperatorBuilder::AlignmentRequirements::
2907 FullUnalignedAccessSupport();
2908}
2909
2910} // namespace compiler
2911} // namespace internal
2912} // namespace v8
#define F(name, str)
Builtins::Kind kind
Definition builtins.cc:40
static constexpr U encode(T value)
Definition bit-field.h:55
static bool IsSupported(CpuFeature f)
RootsTable & roots_table()
Definition isolate.h:1250
constexpr bool IsSigned() const
constexpr MachineRepresentation representation() const
Tagged_t ReadOnlyRootPtr(RootIndex index)
bool IsRootHandle(IndirectHandle< T > handle, RootIndex *index) const
Definition roots-inl.h:65
static constexpr bool IsReadOnly(RootIndex root_index)
Definition roots.h:623
static FlagsContinuationT ForSet(FlagsCondition condition, turboshaft::OpIndex result)
int AllocateSpillSlot(int width, int alignment=0, bool is_tagged=false)
Definition frame.h:138
void VisitWordCompareZero(turboshaft::OpIndex user, turboshaft::OpIndex value, FlagsContinuation *cont)
FlagsCondition GetComparisonFlagCondition(const turboshaft::ComparisonOp &op) const
Instruction * Emit(InstructionCode opcode, InstructionOperand output, size_t temp_count=0, InstructionOperand *temps=nullptr)
bool CanCover(turboshaft::OpIndex user, turboshaft::OpIndex node) const
Instruction * EmitWithContinuation(InstructionCode opcode, FlagsContinuation *cont)
void VisitStackPointerGreaterThan(turboshaft::OpIndex node, FlagsContinuation *cont)
void ConsumeEqualZero(turboshaft::OpIndex *user, turboshaft::OpIndex *value, FlagsContinuation *cont)
turboshaft::OptionalOpIndex FindProjection(turboshaft::OpIndex node, size_t projection_index)
void VisitLoad(turboshaft::OpIndex node, turboshaft::OpIndex value, InstructionCode opcode)
void VisitFloat64Ieee754Binop(turboshaft::OpIndex, InstructionCode code)
void VisitFloat64Ieee754Unop(turboshaft::OpIndex, InstructionCode code)
InstructionOperand DefineAsRegister(turboshaft::OpIndex node)
InstructionOperand UseUniqueRegister(turboshaft::OpIndex node)
InstructionOperand UseRegister(turboshaft::OpIndex node)
bool CanBeImmediate(OpIndex node, ImmediateMode mode)
bool CanBeImmediate(int64_t value, ImmediateMode mode)
InstructionOperand UseOperand(OpIndex node, ImmediateMode mode)
MachineRepresentation representation() const
turboshaft::RegisterRepresentation ts_result_rep() const
V8_INLINE const Operation & Get(OpIndex i) const
Definition graph.h:618
static constexpr MemoryRepresentation AnyTagged()
static constexpr MemoryRepresentation Float16()
static constexpr MemoryRepresentation AnyUncompressedTagged()
static constexpr MemoryRepresentation UncompressedTaggedPointer()
static constexpr MemoryRepresentation Uint32()
static constexpr MemoryRepresentation TaggedSigned()
static constexpr MemoryRepresentation Int32()
static constexpr MemoryRepresentation Int64()
static constexpr MemoryRepresentation Simd128()
static constexpr MemoryRepresentation SandboxedPointer()
static constexpr MemoryRepresentation Uint16()
static constexpr MemoryRepresentation ProtectedPointer()
static constexpr MemoryRepresentation TaggedPointer()
static constexpr MemoryRepresentation Uint8()
static constexpr MemoryRepresentation Simd256()
static constexpr MemoryRepresentation Int16()
static constexpr MemoryRepresentation IndirectPointer()
static constexpr MemoryRepresentation Uint64()
static constexpr MemoryRepresentation Float32()
static constexpr MemoryRepresentation UncompressedTaggedSigned()
static constexpr MemoryRepresentation Float64()
const Operation & Get(V< AnyOrNone > op_idx) const
bool MatchIntegralWord64Constant(V< Any > matched, uint64_t *constant) const
const underlying_operation_t< Op > & Cast(V< AnyOrNone > op_idx) const
bool MatchSignedIntegralConstant(V< Any > matched, int64_t *constant) const
bool MatchIntegralWord32Constant(V< Any > matched, uint32_t *constant) const
bool MatchUnsignedIntegralConstant(V< Any > matched, uint64_t *constant) const
static constexpr RegisterRepresentation Compressed()
static constexpr RegisterRepresentation Simd128()
static constexpr RegisterRepresentation Word32()
static constexpr RegisterRepresentation Float64()
static constexpr RegisterRepresentation Float32()
static constexpr RegisterRepresentation Word64()
static constexpr RegisterRepresentation Tagged()
Handle< Code > code
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
Isolate * isolate
#define VISIT_ATOMIC_BINOP(op)
int32_t offset
#define SIMD_VISIT_SPLAT(Type)
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign)
#define SIMD_VISIT_REPLACE_LANE(Type)
#define SIMD_VISIT_UNOP(Name, instruction)
#define SIMD_VISIT_BINOP(Name, instruction)
#define SIMD_RELAXED_OP_LIST(V)
#define SIMD_VISIT_RELAXED_OP(Name)
#define F16_OP_LIST(V)
#define VISIT_F16_OP(name)
#define SIMD_VISIT_QFMOP(Opcode)
Node * node
ZoneVector< RpoNumber > & result
uint32_t const mask
#define SmiWordOffset(offset)
#define SIMD_UNOP_LIST(V)
#define SIMD_BINOP_LIST(V)
int int32_t
Definition unicode.cc:40
constexpr unsigned CountTrailingZeros64(uint64_t value)
Definition bits.h:164
constexpr unsigned CountTrailingZeros32(uint32_t value)
Definition bits.h:161
constexpr unsigned CountLeadingZeros64(uint64_t value)
Definition bits.h:125
constexpr unsigned CountPopulation(T value)
Definition bits.h:26
constexpr unsigned CountLeadingZeros32(uint32_t value)
Definition bits.h:122
auto Reversed(T &t)
Definition iterator.h:105
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word64()> kWord64BitwiseAnd
Definition opmasks.h:171
ShiftMask::For< ShiftOp::Kind::kShiftLeft, WordRepresentation::Word64()> kWord64ShiftLeft
Definition opmasks.h:227
WordBinopMask::For< WordBinopOp::Kind::kBitwiseAnd, WordRepresentation::Word32()> kWord32BitwiseAnd
Definition opmasks.h:159
ChangeOpMask::For< ChangeOp::Kind::kSignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToInt32OverflowToMin
Definition opmasks.h:281
ShiftMask::For< ShiftOp::Kind::kShiftRightLogical, WordRepresentation::Word64()> kWord64ShiftRightLogical
Definition opmasks.h:232
WordBinopMask::For< WordBinopOp::Kind::kSub, WordRepresentation::Word64()> kWord64Sub
Definition opmasks.h:167
WordBinopMask::For< WordBinopOp::Kind::kSub, WordRepresentation::Word32()> kWord32Sub
Definition opmasks.h:148
ChangeOpMask::For< ChangeOp::Kind::kUnsignedFloatTruncateOverflowToMin, ChangeOp::Assumption::kNoAssumption, RegisterRepresentation::Float32(), RegisterRepresentation::Word32()> kTruncateFloat32ToUint32OverflowToMin
Definition opmasks.h:286
ShiftMask::For< ShiftOp::Kind::kShiftLeft, WordRepresentation::Word32()> kWord32ShiftLeft
Definition opmasks.h:214
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
Definition graph.h:1231
static void VisitLogical(InstructionSelectorT *selector, Zone *zone, OpIndex node, WordRepresentation rep, ArchOpcode opcode, bool left_can_cover, bool right_can_cover, ImmediateMode imm_mode)
void VisitAtomicExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
static bool IsContiguousMask32(uint32_t value, int *mb, int *me)
static void VisitRRO(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
static void VisitRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
ArchOpcode SelectLoadOpcode(MemoryRepresentation loaded_rep, RegisterRepresentation result_rep, ImmediateMode *mode)
void VisitRRR(InstructionSelectorT *selector, ArchOpcode opcode, OpIndex node)
void EmitInt64MulWithOverflow(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
static Instruction * VisitCompare(InstructionSelectorT *selector, InstructionCode opcode, InstructionOperand left, InstructionOperand right, FlagsContinuationT *cont)
void VisitAtomicBinaryOperation(InstructionSelectorT *selector, OpIndex node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, ArchOpcode uint16_op, ArchOpcode int32_op, ArchOpcode uint32_op, ArchOpcode int64_op, ArchOpcode uint64_op)
static void VisitLoadCommon(InstructionSelectorT *selector, OpIndex node, ImmediateMode mode, InstructionCode opcode)
void VisitFloat32Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
static int32_t Pack4Lanes(const uint8_t *shuffle)
void VisitStoreCommon(InstructionSelectorT *selector, OpIndex node, StoreRepresentation store_rep, std::optional< AtomicMemoryOrder > atomic_order)
static void VisitBinop(InstructionSelectorT *selector, turboshaft::OpIndex node, InstructionCode opcode, bool has_reverse_opcode, InstructionCode reverse_opcode, FlagsContinuationT *cont)
Instruction * VisitWordCompare(InstructionSelectorT *selector, OpIndex node, InstructionCode opcode, FlagsContinuationT *cont, bool commutative)
static bool CompareLogical(FlagsContinuationT *cont)
void VisitFloat64Compare(InstructionSelectorT *selector, OpIndex node, FlagsContinuationT *cont)
RecordWriteMode WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)
static bool IsContiguousMask64(uint64_t value, int *mb, int *me)
void VisitAtomicCompareExchange(InstructionSelectorT *selector, OpIndex node, ArchOpcode opcode)
bool TryCast(Tagged< From > value, Tagged< To > *out)
Definition casting.h:77
constexpr int kSimd128Size
Definition globals.h:706
constexpr bool CanBeTaggedOrCompressedOrIndirectPointer(MachineRepresentation rep)
switch(set_by_)
Definition flags.cc:3669
const int kStackFrameExtraParamSlot
Address Tagged_t
Definition globals.h:547
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr bool SmiValuesAre31Bits()
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
Operation
Definition operation.h:43
#define I(name, number_of_args, result_size)
Definition runtime.cc:36
#define shr(value, bits)
Definition sha-256.cc:31
#define UNREACHABLE()
Definition logging.h:67
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define UNIMPLEMENTED()
Definition logging.h:66
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
#define arraysize(array)
Definition macros.h:67
bool is_load_root_register(turboshaft::OpIndex node) const
turboshaft::OpIndex input_at(turboshaft::OpIndex node, size_t index) const
base::Vector< const turboshaft::OpIndex > inputs(turboshaft::OpIndex node) const
turboshaft::Opcode opcode(turboshaft::OpIndex node) const
static turboshaft::OpIndex value(turboshaft::OptionalOpIndex node)
underlying_operation_t< Op > & Cast()
Definition operations.h:980
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001