v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
code-generator-mips64.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
15#include "src/compiler/osr.h"
17
18namespace v8 {
19namespace internal {
20namespace compiler {
21
22#define __ masm()->
23
24#define TRACE(...) PrintF(__VA_ARGS__)
25
26// Adds Mips-specific methods to convert InstructionOperands.
28 public:
31
33 return ToSingleRegister(instr_->OutputAt(index));
34 }
35
37 return ToSingleRegister(instr_->InputAt(index));
38 }
39
41 // Single (Float) and Double register namespace is same on MIPS,
42 // both are typedefs of FPURegister.
43 return ToDoubleRegister(op);
44 }
45
47 if (instr_->InputAt(index)->IsImmediate()) {
48 DCHECK_EQ(0, InputInt32(index));
49 return zero_reg;
50 }
51 return InputRegister(index);
52 }
53
55 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
56
57 return InputDoubleRegister(index);
58 }
59
61 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
62
63 return InputSingleRegister(index);
64 }
65
66 Operand InputImmediate(size_t index) {
67 Constant constant = ToConstant(instr_->InputAt(index));
68 switch (constant.type()) {
70 return Operand(constant.ToInt32());
72 return Operand(constant.ToInt64());
74 return Operand::EmbeddedNumber(constant.ToFloat32());
76 return Operand::EmbeddedNumber(constant.ToFloat64().value());
80 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
81 // maybe not done on arm due to const pool ??
82 break;
84 UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
85 }
87 }
88
89 Operand InputOperand(size_t index) {
90 InstructionOperand* op = instr_->InputAt(index);
91 if (op->IsRegister()) {
92 return Operand(ToRegister(op));
93 }
94 return InputImmediate(index);
95 }
96
97 MemOperand MemoryOperand(size_t* first_index) {
98 const size_t index = *first_index;
100 case kMode_None:
101 break;
102 case kMode_Root:
103 *first_index += 1;
104 return MemOperand(kRootRegister, InputInt32(index));
105 case kMode_MRI:
106 *first_index += 2;
107 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
108 case kMode_MRR:
109 // TODO(plind): r6 address mode, to be implemented ...
110 UNREACHABLE();
111 }
112 UNREACHABLE();
113 }
114
115 MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
116
122
123 MemOperand SlotToMemOperand(int slot) const {
125 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
126 }
127};
128
129static inline bool HasRegisterInput(Instruction* instr, size_t index) {
130 return instr->InputAt(index)->IsRegister();
131}
132
133namespace {
134
135class OutOfLineRecordWrite final : public OutOfLineCode {
136 public:
137 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
138 Register value, Register scratch0, Register scratch1,
139 RecordWriteMode mode, StubCallMode stub_mode)
140 : OutOfLineCode(gen),
141 object_(object),
142 index_(index),
143 value_(value),
144 scratch0_(scratch0),
145 scratch1_(scratch1),
146 mode_(mode),
147#if V8_ENABLE_WEBASSEMBLY
148 stub_mode_(stub_mode),
149#endif // V8_ENABLE_WEBASSEMBLY
150 must_save_lr_(!gen->frame_access_state()->has_frame()),
151 zone_(gen->zone()) {
152 DCHECK(!AreAliased(object, index, scratch0, scratch1));
153 DCHECK(!AreAliased(value, index, scratch0, scratch1));
154 }
155
156 void Generate() final {
157 __ CheckPageFlag(value_, scratch0_,
159 exit());
160 __ Daddu(scratch1_, object_, index_);
161 SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
164 if (must_save_lr_) {
165 // We need to save and restore ra if the frame was elided.
166 __ Push(ra);
167 }
169 __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
170#if V8_ENABLE_WEBASSEMBLY
171 } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
172 // A direct call to a wasm runtime stub defined in this module.
173 // Just encode the stub index. This will be patched when the code
174 // is added to the native module and copied into wasm code space.
175 __ CallRecordWriteStubSaveRegisters(object_, scratch1_, save_fp_mode,
176 StubCallMode::kCallWasmRuntimeStub);
177#endif // V8_ENABLE_WEBASSEMBLY
178 } else {
179 __ CallRecordWriteStubSaveRegisters(object_, scratch1_, save_fp_mode);
180 }
181 if (must_save_lr_) {
182 __ Pop(ra);
183 }
184 }
185
186 private:
187 Register const object_;
188 Register const index_;
189 Register const value_;
190 Register const scratch0_;
191 Register const scratch1_;
193#if V8_ENABLE_WEBASSEMBLY
194 StubCallMode const stub_mode_;
195#endif // V8_ENABLE_WEBASSEMBLY
198};
199
200#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
201 class ool_name final : public OutOfLineCode { \
202 public: \
203 ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
204 : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
205 \
206 void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
207 \
208 private: \
209 T const dst_; \
210 T const src1_; \
211 T const src2_; \
212 }
213
214CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
215CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
216CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
217CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
218
219#undef CREATE_OOL_CLASS
220
221Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
222 switch (condition) {
223 case kEqual:
224 return eq;
225 case kNotEqual:
226 return ne;
227 case kSignedLessThan:
228 return lt;
230 return ge;
232 return le;
234 return gt;
236 return lo;
238 return hs;
240 return ls;
242 return hi;
243 case kUnorderedEqual:
245 break;
246 default:
247 break;
248 }
249 UNREACHABLE();
250}
251
252Condition FlagsConditionToConditionTst(FlagsCondition condition) {
253 switch (condition) {
254 case kNotEqual:
255 return ne;
256 case kEqual:
257 return eq;
258 default:
259 break;
260 }
261 UNREACHABLE();
262}
263
264Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
265 switch (condition) {
266 case kOverflow:
267 return ne;
268 case kNotOverflow:
269 return eq;
270 default:
271 break;
272 }
273 UNREACHABLE();
274}
275
276FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
278 switch (condition) {
279 case kEqual:
280 *predicate = true;
281 return EQ;
282 case kNotEqual:
283 *predicate = false;
284 return EQ;
286 case kFloatLessThan:
287 *predicate = true;
288 return OLT;
290 *predicate = false;
291 return OLT;
294 *predicate = true;
295 return OLE;
297 *predicate = false;
298 return OLE;
300 *predicate = false;
301 return ULE;
303 *predicate = false;
304 return ULT;
306 *predicate = true;
307 return ULT;
309 *predicate = false;
310 return OLE;
312 *predicate = false;
313 return OLT;
315 *predicate = true;
316 return ULE;
317 case kUnorderedEqual:
319 *predicate = true;
320 break;
321 default:
322 *predicate = true;
323 break;
324 }
325 UNREACHABLE();
326}
327
328} // namespace
329
330#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
331 do { \
332 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
333 __ sync(); \
334 } while (0)
335
336#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
337 do { \
338 __ sync(); \
339 __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
340 __ sync(); \
341 } while (0)
342
343#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
344 do { \
345 Label binop; \
346 __ Daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
347 __ sync(); \
348 __ bind(&binop); \
349 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
350 __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
351 Operand(i.InputRegister(2))); \
352 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
353 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
354 __ sync(); \
355 } while (0)
356
357#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
358 size, bin_instr, representation) \
359 do { \
360 Label binop; \
361 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
362 if (representation == 32) { \
363 __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
364 } else { \
365 DCHECK_EQ(representation, 64); \
366 __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \
367 } \
368 __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
369 Operand(i.TempRegister(3))); \
370 __ sll(i.TempRegister(3), i.TempRegister(3), 3); \
371 __ sync(); \
372 __ bind(&binop); \
373 __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
374 __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
375 size, sign_extend); \
376 __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
377 Operand(i.InputRegister(2))); \
378 __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
379 size); \
380 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
381 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
382 __ sync(); \
383 } while (0)
384
385#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \
386 do { \
387 Label exchange; \
388 __ sync(); \
389 __ bind(&exchange); \
390 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
391 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
392 __ mov(i.TempRegister(1), i.InputRegister(2)); \
393 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
394 __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \
395 __ sync(); \
396 } while (0)
397
398#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \
399 load_linked, store_conditional, sign_extend, size, representation) \
400 do { \
401 Label exchange; \
402 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
403 if (representation == 32) { \
404 __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
405 } else { \
406 DCHECK_EQ(representation, 64); \
407 __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
408 } \
409 __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
410 Operand(i.TempRegister(1))); \
411 __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
412 __ sync(); \
413 __ bind(&exchange); \
414 __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
415 __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
416 size, sign_extend); \
417 __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
418 size); \
419 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
420 __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
421 __ sync(); \
422 } while (0)
423
424#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
425 store_conditional) \
426 do { \
427 Label compareExchange; \
428 Label exit; \
429 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
430 __ sync(); \
431 __ bind(&compareExchange); \
432 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
433 __ BranchShort(&exit, ne, i.InputRegister(2), \
434 Operand(i.OutputRegister(0))); \
435 __ mov(i.TempRegister(2), i.InputRegister(3)); \
436 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
437 __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
438 Operand(zero_reg)); \
439 __ bind(&exit); \
440 __ sync(); \
441 } while (0)
442
443#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
444 load_linked, store_conditional, sign_extend, size, representation) \
445 do { \
446 Label compareExchange; \
447 Label exit; \
448 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
449 if (representation == 32) { \
450 __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
451 } else { \
452 DCHECK_EQ(representation, 64); \
453 __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
454 } \
455 __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
456 Operand(i.TempRegister(1))); \
457 __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
458 __ sync(); \
459 __ bind(&compareExchange); \
460 __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
461 __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
462 size, sign_extend); \
463 __ ExtractBits(i.TempRegister(2), i.InputRegister(2), zero_reg, size, \
464 sign_extend); \
465 __ BranchShort(&exit, ne, i.TempRegister(2), \
466 Operand(i.OutputRegister(0))); \
467 __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
468 size); \
469 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
470 __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
471 Operand(zero_reg)); \
472 __ bind(&exit); \
473 __ sync(); \
474 } while (0)
475
476#define ASSEMBLE_IEEE754_BINOP(name) \
477 do { \
478 FrameScope scope(masm(), StackFrame::MANUAL); \
479 __ PrepareCallCFunction(0, 2, kScratchReg); \
480 __ MovToFloatParameters(i.InputDoubleRegister(0), \
481 i.InputDoubleRegister(1)); \
482 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
483 /* Move the result in the double result register. */ \
484 __ MovFromFloatResult(i.OutputDoubleRegister()); \
485 } while (0)
486
487#define ASSEMBLE_IEEE754_UNOP(name) \
488 do { \
489 FrameScope scope(masm(), StackFrame::MANUAL); \
490 __ PrepareCallCFunction(0, 1, kScratchReg); \
491 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
492 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
493 /* Move the result in the double result register. */ \
494 __ MovFromFloatResult(i.OutputDoubleRegister()); \
495 } while (0)
496
497#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \
498 do { \
499 __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
500 i.InputSimd128Register(1)); \
501 } while (0)
502
504 __ mov(sp, fp);
505 __ Pop(ra, fp);
506}
507
509 if (frame_access_state()->has_frame()) {
512 }
514}
515
516namespace {
517
518void AdjustStackPointerForTailCall(MacroAssembler* masm,
519 FrameAccessState* state,
520 int new_slot_above_sp,
521 bool allow_shrinkage = true) {
522 int current_sp_offset = state->GetSPToFPSlotCount() +
524 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
525 if (stack_slot_delta > 0) {
526 masm->Dsubu(sp, sp, stack_slot_delta * kSystemPointerSize);
527 state->IncreaseSPDelta(stack_slot_delta);
528 } else if (allow_shrinkage && stack_slot_delta < 0) {
529 masm->Daddu(sp, sp, -stack_slot_delta * kSystemPointerSize);
530 state->IncreaseSPDelta(stack_slot_delta);
531 }
532}
533
534} // namespace
535
537 int first_unused_slot_offset) {
538 AdjustStackPointerForTailCall(masm(), frame_access_state(),
539 first_unused_slot_offset, false);
540}
541
543 int first_unused_slot_offset) {
544 AdjustStackPointerForTailCall(masm(), frame_access_state(),
545 first_unused_slot_offset);
546}
547
548// Check that {kJavaScriptCallCodeStartRegister} is correct.
550 __ ComputeCodeStartAddress(kScratchReg);
551 __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
553}
554
555#ifdef V8_ENABLE_LEAPTIERING
556// Check that {kJavaScriptCallDispatchHandleRegister} is correct.
557void CodeGenerator::AssembleDispatchHandleRegisterCheck() {
558 DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
560 // We currently don't check this for JS builtins as those are sometimes
561 // called directly (e.g. from other builtins) and not through the dispatch
562 // table. This is fine as builtin functions don't use the dispatch handle,
563 // but we could enable this check in the future if we make sure to pass the
564 // kInvalidDispatchHandle whenever we do a direct call to a JS builtin.
566 return;
567 }
568
569 UseScratchRegisterScope temps(masm());
570 Register actual_parameter_count = temps.Acquire();
571 // For now, we only ensure that the register references a valid dispatch
572 // entry with the correct parameter count. In the future, we may also be able
573 // to check that the entry points back to this code.
574 {
575 UseScratchRegisterScope temps(masm());
576 Register scratch = temps.Acquire();
577 __ LoadParameterCountFromJSDispatchTable(
578 actual_parameter_count, kJavaScriptCallDispatchHandleRegister, scratch);
579 }
580 __ Assert(eq, AbortReason::kWrongFunctionDispatchHandle,
581 actual_parameter_count, Operand(parameter_count_));
582}
583#endif // V8_ENABLE_LEAPTIERING
584
586// Assembles an instruction after register allocation, producing machine code.
587
589 Instruction* instr) {
590 MipsOperandConverter i(this, instr);
591 InstructionCode opcode = instr->opcode();
592 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
593 switch (arch_opcode) {
594 case kArchCallCodeObject: {
595 if (instr->InputAt(0)->IsImmediate()) {
596 __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
597 } else {
598 Register reg = i.InputRegister(0);
600 i.InputCodeEntrypointTag(instr->CodeEnrypointTagInputIndex());
602 instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
604 __ CallCodeObject(reg, tag);
605 }
608 break;
609 }
610 case kArchCallBuiltinPointer: {
611 DCHECK(!instr->InputAt(0)->IsImmediate());
612 Register builtin_index = i.InputRegister(0);
613 Register target =
614 instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister)
616 : builtin_index;
617 __ CallBuiltinByIndex(builtin_index, target);
620 break;
621 }
622#if V8_ENABLE_WEBASSEMBLY
623 case kArchCallWasmFunction:
624 case kArchCallWasmFunctionIndirect: {
625 if (instr->InputAt(0)->IsImmediate()) {
626 DCHECK_EQ(arch_opcode, kArchCallWasmFunction);
627 Constant constant = i.ToConstant(instr->InputAt(0));
628 Address wasm_code = static_cast<Address>(constant.ToInt64());
629 __ Call(wasm_code, constant.rmode());
630 } else if (arch_opcode == kArchCallWasmFunctionIndirect) {
631 __ CallWasmCodePointer(i.InputRegister(0));
632 } else {
633 __ Call(i.InputRegister(0));
634 }
637 break;
638 }
639 case kArchTailCallWasm:
640 case kArchTailCallWasmIndirect: {
641 if (instr->InputAt(0)->IsImmediate()) {
642 DCHECK_EQ(arch_opcode, kArchTailCallWasm);
643 Constant constant = i.ToConstant(instr->InputAt(0));
644 Address wasm_code = static_cast<Address>(constant.ToInt64());
645 __ Jump(wasm_code, constant.rmode());
646 } else if (arch_opcode == kArchTailCallWasmIndirect) {
647 __ CallWasmCodePointer(i.InputRegister(0), CallJumpMode::kTailCall);
648 } else {
649 __ Jump(i.InputRegister(0));
650 }
653 break;
654 }
655#endif // V8_ENABLE_WEBASSEMBLY
656 case kArchTailCallCodeObject: {
657 if (instr->InputAt(0)->IsImmediate()) {
658 __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
659 } else {
660 Register reg = i.InputRegister(0);
662 i.InputCodeEntrypointTag(instr->CodeEnrypointTagInputIndex());
664 instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
666 __ JumpCodeObject(reg, tag);
667 }
670 break;
671 }
672 case kArchTailCallAddress: {
673 CHECK(!instr->InputAt(0)->IsImmediate());
674 Register reg = i.InputRegister(0);
676 instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
678 __ Jump(reg);
681 break;
682 }
683 case kArchCallJSFunction: {
684 Register func = i.InputRegister(0);
685 if (v8_flags.debug_code) {
686 // Check the function's context matches the context argument.
687 __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
688 __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
689 Operand(kScratchReg));
690 }
691 uint32_t num_arguments =
692 i.InputUint32(instr->JSCallArgumentCountInputIndex());
693 __ CallJSFunction(func, num_arguments);
696 break;
697 }
698 case kArchPrepareCallCFunction: {
699 int const num_parameters = MiscField::decode(instr->opcode());
700 __ PrepareCallCFunction(num_parameters, kScratchReg);
701 // Frame alignment requires using FP-relative frame addressing.
703 break;
704 }
705 case kArchSaveCallerRegisters: {
706 fp_mode_ =
707 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
710 // kReturnRegister0 should have been saved before entering the stub.
711 int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
713 DCHECK_EQ(0, frame_access_state()->sp_delta());
717 break;
718 }
719 case kArchRestoreCallerRegisters: {
721 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
724 // Don't overwrite the returned value.
725 int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
727 DCHECK_EQ(0, frame_access_state()->sp_delta());
730 break;
731 }
732 case kArchPrepareTailCall:
734 break;
735 case kArchCallCFunctionWithFrameState:
736 case kArchCallCFunction: {
737 int const num_gp_parameters = ParamField::decode(instr->opcode());
738 int const num_fp_parameters = FPParamField::decode(instr->opcode());
739 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes;
740 Label return_location;
741#if V8_ENABLE_WEBASSEMBLY
742 bool isWasmCapiFunction =
743 linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
744 if (isWasmCapiFunction) {
745 // Put the return address in a stack slot.
746 __ LoadAddressPCRelative(kScratchReg, &return_location);
747 __ sd(kScratchReg,
748 MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
749 set_isolate_data_slots = SetIsolateDataSlots::kNo;
750 }
751#endif // V8_ENABLE_WEBASSEMBLY
752 int pc_offset;
753 if (instr->InputAt(0)->IsImmediate()) {
754 ExternalReference ref = i.InputExternalReference(0);
755 pc_offset = __ CallCFunction(ref, num_gp_parameters, num_fp_parameters,
756 set_isolate_data_slots, &return_location);
757 } else {
758 Register func = i.InputRegister(0);
759 pc_offset = __ CallCFunction(func, num_gp_parameters, num_fp_parameters,
760 set_isolate_data_slots, &return_location);
761 }
762 RecordSafepoint(instr->reference_map(), pc_offset);
763
764 bool const needs_frame_state =
765 (arch_opcode == kArchCallCFunctionWithFrameState);
766 if (needs_frame_state) {
768 }
769
771 // Ideally, we should decrement SP delta to match the change of stack
772 // pointer in CallCFunction. However, for certain architectures (e.g.
773 // ARM), there may be more strict alignment requirement, causing old SP
774 // to be saved on the stack. In those cases, we can not calculate the SP
775 // delta statically.
778 // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
779 // Here, we assume the sequence to be:
780 // kArchSaveCallerRegisters;
781 // kArchCallCFunction;
782 // kArchRestoreCallerRegisters;
783 int bytes =
784 __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
786 }
787 break;
788 }
789 case kArchJmp:
790 AssembleArchJump(i.InputRpo(0));
791 break;
792 case kArchBinarySearchSwitch:
794 break;
795 case kArchTableSwitch:
797 break;
798 case kArchAbortCSADcheck:
799 DCHECK(i.InputRegister(0) == a0);
800 {
801 // We don't actually want to generate a pile of code for this, so just
802 // claim there is a stack frame, without generating one.
803 FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
804 __ CallBuiltin(Builtin::kAbortCSADcheck);
805 }
806 __ stop();
807 break;
808 case kArchDebugBreak:
809 __ DebugBreak();
810 break;
811 case kArchComment:
812 __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)),
814 break;
815 case kArchNop:
816 case kArchThrowTerminator:
817 // don't emit code for nops.
818 break;
819 case kArchDeoptimize: {
820 DeoptimizationExit* exit =
822 __ Branch(exit->label());
823 break;
824 }
825 case kArchRet:
826 AssembleReturn(instr->InputAt(0));
827 break;
828#if V8_ENABLE_WEBASSEMBLY
829 case kArchStackPointer:
830 // The register allocator expects an allocatable register for the output,
831 // we cannot use sp directly.
832 __ mov(i.OutputRegister(), sp);
833 break;
834 case kArchSetStackPointer: {
835 DCHECK(instr->InputAt(0)->IsRegister());
836 __ mov(sp, i.InputRegister(0));
837 break;
838 }
839#endif // V8_ENABLE_WEBASSEMBLY
840 case kArchStackPointerGreaterThan: {
841 Register lhs_register = sp;
842 uint32_t offset;
844 lhs_register = i.TempRegister(1);
845 __ Dsubu(lhs_register, sp, offset);
846 }
847 __ Sltu(i.TempRegister(0), i.InputRegister(0), lhs_register);
848 break;
849 }
850 case kArchStackCheckOffset:
851 __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
852 break;
853 case kArchFramePointer:
854 __ mov(i.OutputRegister(), fp);
855 break;
856 case kArchParentFramePointer:
857 if (frame_access_state()->has_frame()) {
858 __ Ld(i.OutputRegister(), MemOperand(fp, 0));
859 } else {
860 __ mov(i.OutputRegister(), fp);
861 }
862 break;
863 case kArchTruncateDoubleToI:
864 __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
865 i.InputDoubleRegister(0), DetermineStubCallMode());
866 break;
867 case kArchStoreWithWriteBarrier: // Fall through.
868 case kArchAtomicStoreWithWriteBarrier: {
870 Register object = i.InputRegister(0);
871 Register index = i.InputRegister(1);
872 Register value = i.InputRegister(2);
873 Register scratch0 = i.TempRegister(0);
874 Register scratch1 = i.TempRegister(1);
875 auto ool = zone()->New<OutOfLineRecordWrite>(this, object, index, value,
876 scratch0, scratch1, mode,
878 __ Daddu(kScratchReg, object, index);
879 if (arch_opcode == kArchStoreWithWriteBarrier) {
880 __ Sd(value, MemOperand(kScratchReg));
881 } else {
882 DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
883 __ sync();
884 __ Sd(value, MemOperand(kScratchReg));
885 __ sync();
886 }
888 __ JumpIfSmi(value, ool->exit());
889 }
890 __ CheckPageFlag(object, scratch0,
892 ool->entry());
893 __ bind(ool->exit());
894 break;
895 }
896 case kArchStoreIndirectWithWriteBarrier:
897 UNREACHABLE();
898 case kArchStackSlot: {
899 FrameOffset offset =
900 frame_access_state()->GetFrameOffset(i.InputInt32(0));
901 Register base_reg = offset.from_stack_pointer() ? sp : fp;
902 __ Daddu(i.OutputRegister(), base_reg, Operand(offset.offset()));
903 if (v8_flags.debug_code) {
904 // Verify that the output_register is properly aligned
905 __ And(kScratchReg, i.OutputRegister(),
906 Operand(kSystemPointerSize - 1));
907 __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
908 Operand(zero_reg));
909 }
910 break;
911 }
912 case kIeee754Float64Acos:
914 break;
915 case kIeee754Float64Acosh:
917 break;
918 case kIeee754Float64Asin:
920 break;
921 case kIeee754Float64Asinh:
923 break;
924 case kIeee754Float64Atan:
926 break;
927 case kIeee754Float64Atanh:
929 break;
930 case kIeee754Float64Atan2:
932 break;
933 case kIeee754Float64Cos:
935 break;
936 case kIeee754Float64Cosh:
938 break;
939 case kIeee754Float64Cbrt:
941 break;
942 case kIeee754Float64Exp:
944 break;
945 case kIeee754Float64Expm1:
947 break;
948 case kIeee754Float64Log:
950 break;
951 case kIeee754Float64Log1p:
953 break;
954 case kIeee754Float64Log2:
956 break;
957 case kIeee754Float64Log10:
959 break;
960 case kIeee754Float64Pow:
962 break;
963 case kIeee754Float64Sin:
965 break;
966 case kIeee754Float64Sinh:
968 break;
969 case kIeee754Float64Tan:
971 break;
972 case kIeee754Float64Tanh:
974 break;
975 case kMips64Add:
976 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
977 break;
978 case kMips64Dadd:
979 __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
980 break;
981 case kMips64DaddOvf:
982 __ DaddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
984 break;
985 case kMips64Sub:
986 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
987 break;
988 case kMips64Dsub:
989 __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
990 break;
991 case kMips64DsubOvf:
992 __ DsubOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
994 break;
995 case kMips64Mul:
996 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
997 break;
998 case kMips64MulOvf:
999 __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
1000 kScratchReg);
1001 break;
1002 case kMips64DMulOvf:
1003 __ DMulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
1004 kScratchReg);
1005 break;
1006 case kMips64MulHigh:
1007 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1008 break;
1009 case kMips64MulHighU:
1010 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1011 break;
1012 case kMips64DMulHigh:
1013 __ Dmulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1014 break;
1015 case kMips64DMulHighU:
1016 __ Dmulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1017 break;
1018 case kMips64Div:
1019 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1020 if (kArchVariant == kMips64r6) {
1021 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1022 } else {
1023 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1024 }
1025 break;
1026 case kMips64DivU:
1027 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1028 if (kArchVariant == kMips64r6) {
1029 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1030 } else {
1031 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1032 }
1033 break;
1034 case kMips64Mod:
1035 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1036 break;
1037 case kMips64ModU:
1038 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1039 break;
1040 case kMips64Dmul:
1041 __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1042 break;
1043 case kMips64Ddiv:
1044 __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1045 if (kArchVariant == kMips64r6) {
1046 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1047 } else {
1048 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1049 }
1050 break;
1051 case kMips64DdivU:
1052 __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1053 if (kArchVariant == kMips64r6) {
1054 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1055 } else {
1056 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1057 }
1058 break;
1059 case kMips64Dmod:
1060 __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1061 break;
1062 case kMips64DmodU:
1063 __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1064 break;
1065 case kMips64Dlsa:
1066 DCHECK(instr->InputAt(2)->IsImmediate());
1067 __ Dlsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1068 i.InputInt8(2));
1069 break;
1070 case kMips64Lsa:
1071 DCHECK(instr->InputAt(2)->IsImmediate());
1072 __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1073 i.InputInt8(2));
1074 break;
1075 case kMips64And:
1076 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1077 break;
1078 case kMips64And32:
1079 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1080 break;
1081 case kMips64Or:
1082 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1083 break;
1084 case kMips64Or32:
1085 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1086 break;
1087 case kMips64Nor:
1088 if (instr->InputAt(1)->IsRegister()) {
1089 __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1090 } else {
1091 DCHECK_EQ(0, i.InputOperand(1).immediate());
1092 __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
1093 }
1094 break;
1095 case kMips64Nor32:
1096 if (instr->InputAt(1)->IsRegister()) {
1097 __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1098 } else {
1099 DCHECK_EQ(0, i.InputOperand(1).immediate());
1100 __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
1101 }
1102 break;
1103 case kMips64Xor:
1104 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1105 break;
1106 case kMips64Xor32:
1107 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1108 __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
1109 break;
1110 case kMips64Clz:
1111 __ Clz(i.OutputRegister(), i.InputRegister(0));
1112 break;
1113 case kMips64Dclz:
1114 __ dclz(i.OutputRegister(), i.InputRegister(0));
1115 break;
1116 case kMips64Ctz: {
1117 Register src = i.InputRegister(0);
1118 Register dst = i.OutputRegister();
1119 __ Ctz(dst, src);
1120 } break;
1121 case kMips64Dctz: {
1122 Register src = i.InputRegister(0);
1123 Register dst = i.OutputRegister();
1124 __ Dctz(dst, src);
1125 } break;
1126 case kMips64Popcnt: {
1127 Register src = i.InputRegister(0);
1128 Register dst = i.OutputRegister();
1129 __ Popcnt(dst, src);
1130 } break;
1131 case kMips64Dpopcnt: {
1132 Register src = i.InputRegister(0);
1133 Register dst = i.OutputRegister();
1134 __ Dpopcnt(dst, src);
1135 } break;
1136 case kMips64Shl:
1137 if (instr->InputAt(1)->IsRegister()) {
1138 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1139 } else {
1140 int64_t imm = i.InputOperand(1).immediate();
1141 __ sll(i.OutputRegister(), i.InputRegister(0),
1142 static_cast<uint16_t>(imm));
1143 }
1144 break;
1145 case kMips64Shr:
1146 if (instr->InputAt(1)->IsRegister()) {
1147 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1148 } else {
1149 int64_t imm = i.InputOperand(1).immediate();
1150 __ srl(i.OutputRegister(), i.InputRegister(0),
1151 static_cast<uint16_t>(imm));
1152 }
1153 break;
1154 case kMips64Sar:
1155 if (instr->InputAt(1)->IsRegister()) {
1156 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1157 } else {
1158 int64_t imm = i.InputOperand(1).immediate();
1159 __ sra(i.OutputRegister(), i.InputRegister(0),
1160 static_cast<uint16_t>(imm));
1161 }
1162 break;
1163 case kMips64Ext:
1164 __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1165 i.InputInt8(2));
1166 break;
1167 case kMips64Ins:
1168 if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1169 __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1170 } else {
1171 __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1172 i.InputInt8(2));
1173 }
1174 break;
1175 case kMips64Dext: {
1176 __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1177 i.InputInt8(2));
1178 break;
1179 }
1180 case kMips64Dins:
1181 if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1182 __ Dins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1183 } else {
1184 __ Dins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1185 i.InputInt8(2));
1186 }
1187 break;
1188 case kMips64Dshl:
1189 if (instr->InputAt(1)->IsRegister()) {
1190 __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1191 } else {
1192 int64_t imm = i.InputOperand(1).immediate();
1193 if (imm < 32) {
1194 __ dsll(i.OutputRegister(), i.InputRegister(0),
1195 static_cast<uint16_t>(imm));
1196 } else {
1197 __ dsll32(i.OutputRegister(), i.InputRegister(0),
1198 static_cast<uint16_t>(imm - 32));
1199 }
1200 }
1201 break;
1202 case kMips64Dshr:
1203 if (instr->InputAt(1)->IsRegister()) {
1204 __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1205 } else {
1206 int64_t imm = i.InputOperand(1).immediate();
1207 if (imm < 32) {
1208 __ dsrl(i.OutputRegister(), i.InputRegister(0),
1209 static_cast<uint16_t>(imm));
1210 } else {
1211 __ dsrl32(i.OutputRegister(), i.InputRegister(0),
1212 static_cast<uint16_t>(imm - 32));
1213 }
1214 }
1215 break;
1216 case kMips64Dsar:
1217 if (instr->InputAt(1)->IsRegister()) {
1218 __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1219 } else {
1220 int64_t imm = i.InputOperand(1).immediate();
1221 if (imm < 32) {
1222 __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
1223 } else {
1224 __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
1225 }
1226 }
1227 break;
1228 case kMips64Ror:
1229 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1230 break;
1231 case kMips64Dror:
1232 __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1233 break;
1234 case kMips64Tst:
1235 __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
1236 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1237 break;
1238 case kMips64Cmp:
1239 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1240 break;
1241 case kMips64Mov:
1242 // TODO(plind): Should we combine mov/li like this, or use separate instr?
1243 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
1244 if (HasRegisterInput(instr, 0)) {
1245 __ mov(i.OutputRegister(), i.InputRegister(0));
1246 } else {
1247 __ li(i.OutputRegister(), i.InputOperand(0));
1248 }
1249 break;
1250
1251 case kMips64CmpS: {
1252 FPURegister left = i.InputOrZeroSingleRegister(0);
1253 FPURegister right = i.InputOrZeroSingleRegister(1);
1254 bool predicate;
1256 FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
1257
1258 if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1259 !__ IsDoubleZeroRegSet()) {
1260 __ Move(kDoubleRegZero, 0.0);
1261 }
1262
1263 __ CompareF32(cc, left, right);
1264 } break;
1265 case kMips64AddS:
1266 // TODO(plind): add special case: combine mult & add.
1267 __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1268 i.InputDoubleRegister(1));
1269 break;
1270 case kMips64SubS:
1271 __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1272 i.InputDoubleRegister(1));
1273 break;
1274 case kMips64MulS:
1275 // TODO(plind): add special case: right op is -1.0, see arm port.
1276 __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1277 i.InputDoubleRegister(1));
1278 break;
1279 case kMips64DivS:
1280 __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1281 i.InputDoubleRegister(1));
1282 break;
1283 case kMips64AbsS:
1284 if (kArchVariant == kMips64r6) {
1285 __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1286 } else {
1287 __ mfc1(kScratchReg, i.InputSingleRegister(0));
1288 __ Dins(kScratchReg, zero_reg, 31, 1);
1289 __ mtc1(kScratchReg, i.OutputSingleRegister());
1290 }
1291 break;
1292 case kMips64NegS:
1293 __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1294 break;
1295 case kMips64SqrtS: {
1296 __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1297 break;
1298 }
1299 case kMips64MaxS:
1300 __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1301 i.InputDoubleRegister(1));
1302 break;
1303 case kMips64MinS:
1304 __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1305 i.InputDoubleRegister(1));
1306 break;
1307 case kMips64CmpD: {
1308 FPURegister left = i.InputOrZeroDoubleRegister(0);
1309 FPURegister right = i.InputOrZeroDoubleRegister(1);
1310 bool predicate;
1312 FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
1313 if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1314 !__ IsDoubleZeroRegSet()) {
1315 __ Move(kDoubleRegZero, 0.0);
1316 }
1317 __ CompareF64(cc, left, right);
1318 } break;
1319 case kMips64AddD:
1320 // TODO(plind): add special case: combine mult & add.
1321 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1322 i.InputDoubleRegister(1));
1323 break;
1324 case kMips64SubD:
1325 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1326 i.InputDoubleRegister(1));
1327 break;
1328 case kMips64MulD:
1329 // TODO(plind): add special case: right op is -1.0, see arm port.
1330 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1331 i.InputDoubleRegister(1));
1332 break;
1333 case kMips64DivD:
1334 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1335 i.InputDoubleRegister(1));
1336 break;
1337 case kMips64ModD: {
1338 // TODO(bmeurer): We should really get rid of this special instruction,
1339 // and generate a CallAddress instruction instead.
1340 FrameScope scope(masm(), StackFrame::MANUAL);
1341 __ PrepareCallCFunction(0, 2, kScratchReg);
1342 __ MovToFloatParameters(i.InputDoubleRegister(0),
1343 i.InputDoubleRegister(1));
1344 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1345 // Move the result in the double result register.
1346 __ MovFromFloatResult(i.OutputDoubleRegister());
1347 break;
1348 }
1349 case kMips64AbsD:
1350 if (kArchVariant == kMips64r6) {
1351 __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1352 } else {
1353 __ dmfc1(kScratchReg, i.InputDoubleRegister(0));
1354 __ Dins(kScratchReg, zero_reg, 63, 1);
1355 __ dmtc1(kScratchReg, i.OutputDoubleRegister());
1356 }
1357 break;
1358 case kMips64NegD:
1359 __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1360 break;
1361 case kMips64SqrtD: {
1362 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1363 break;
1364 }
1365 case kMips64MaxD:
1366 __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1367 i.InputDoubleRegister(1));
1368 break;
1369 case kMips64MinD:
1370 __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1371 i.InputDoubleRegister(1));
1372 break;
1373 case kMips64Float64RoundDown: {
1374 __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1375 break;
1376 }
1377 case kMips64Float32RoundDown: {
1378 __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1379 break;
1380 }
1381 case kMips64Float64RoundTruncate: {
1382 __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1383 break;
1384 }
1385 case kMips64Float32RoundTruncate: {
1386 __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1387 break;
1388 }
1389 case kMips64Float64RoundUp: {
1390 __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1391 break;
1392 }
1393 case kMips64Float32RoundUp: {
1394 __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1395 break;
1396 }
1397 case kMips64Float64RoundTiesEven: {
1398 __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1399 break;
1400 }
1401 case kMips64Float32RoundTiesEven: {
1402 __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1403 break;
1404 }
1405 case kMips64Float32Max: {
1406 FPURegister dst = i.OutputSingleRegister();
1407 FPURegister src1 = i.InputSingleRegister(0);
1408 FPURegister src2 = i.InputSingleRegister(1);
1409 auto ool = zone()->New<OutOfLineFloat32Max>(this, dst, src1, src2);
1410 __ Float32Max(dst, src1, src2, ool->entry());
1411 __ bind(ool->exit());
1412 break;
1413 }
1414 case kMips64Float64Max: {
1415 FPURegister dst = i.OutputDoubleRegister();
1416 FPURegister src1 = i.InputDoubleRegister(0);
1417 FPURegister src2 = i.InputDoubleRegister(1);
1418 auto ool = zone()->New<OutOfLineFloat64Max>(this, dst, src1, src2);
1419 __ Float64Max(dst, src1, src2, ool->entry());
1420 __ bind(ool->exit());
1421 break;
1422 }
1423 case kMips64Float32Min: {
1424 FPURegister dst = i.OutputSingleRegister();
1425 FPURegister src1 = i.InputSingleRegister(0);
1426 FPURegister src2 = i.InputSingleRegister(1);
1427 auto ool = zone()->New<OutOfLineFloat32Min>(this, dst, src1, src2);
1428 __ Float32Min(dst, src1, src2, ool->entry());
1429 __ bind(ool->exit());
1430 break;
1431 }
1432 case kMips64Float64Min: {
1433 FPURegister dst = i.OutputDoubleRegister();
1434 FPURegister src1 = i.InputDoubleRegister(0);
1435 FPURegister src2 = i.InputDoubleRegister(1);
1436 auto ool = zone()->New<OutOfLineFloat64Min>(this, dst, src1, src2);
1437 __ Float64Min(dst, src1, src2, ool->entry());
1438 __ bind(ool->exit());
1439 break;
1440 }
1441 case kMips64Float64SilenceNaN:
1442 __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1443 break;
1444 case kMips64CvtSD:
1445 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
1446 break;
1447 case kMips64CvtDS:
1448 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
1449 break;
1450 case kMips64CvtDW: {
1451 FPURegister scratch = kScratchDoubleReg;
1452 __ mtc1(i.InputRegister(0), scratch);
1453 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
1454 break;
1455 }
1456 case kMips64CvtSW: {
1457 FPURegister scratch = kScratchDoubleReg;
1458 __ mtc1(i.InputRegister(0), scratch);
1459 __ cvt_s_w(i.OutputDoubleRegister(), scratch);
1460 break;
1461 }
1462 case kMips64CvtSUw: {
1463 __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
1464 break;
1465 }
1466 case kMips64CvtSL: {
1467 FPURegister scratch = kScratchDoubleReg;
1468 __ dmtc1(i.InputRegister(0), scratch);
1469 __ cvt_s_l(i.OutputDoubleRegister(), scratch);
1470 break;
1471 }
1472 case kMips64CvtDL: {
1473 FPURegister scratch = kScratchDoubleReg;
1474 __ dmtc1(i.InputRegister(0), scratch);
1475 __ cvt_d_l(i.OutputDoubleRegister(), scratch);
1476 break;
1477 }
1478 case kMips64CvtDUw: {
1479 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
1480 break;
1481 }
1482 case kMips64CvtDUl: {
1483 __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
1484 break;
1485 }
1486 case kMips64CvtSUl: {
1487 __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
1488 break;
1489 }
1490 case kMips64FloorWD: {
1491 FPURegister scratch = kScratchDoubleReg;
1492 __ floor_w_d(scratch, i.InputDoubleRegister(0));
1493 __ mfc1(i.OutputRegister(), scratch);
1494 break;
1495 }
1496 case kMips64CeilWD: {
1497 FPURegister scratch = kScratchDoubleReg;
1498 __ ceil_w_d(scratch, i.InputDoubleRegister(0));
1499 __ mfc1(i.OutputRegister(), scratch);
1500 break;
1501 }
1502 case kMips64RoundWD: {
1503 FPURegister scratch = kScratchDoubleReg;
1504 __ round_w_d(scratch, i.InputDoubleRegister(0));
1505 __ mfc1(i.OutputRegister(), scratch);
1506 break;
1507 }
1508 case kMips64TruncWD: {
1509 FPURegister scratch = kScratchDoubleReg;
1510 // Other arches use round to zero here, so we follow.
1511 __ trunc_w_d(scratch, i.InputDoubleRegister(0));
1512 __ mfc1(i.OutputRegister(), scratch);
1513 if (instr->OutputCount() > 1) {
1514 // Check for inputs below INT32_MIN and NaN.
1515 __ li(i.OutputRegister(1), 1);
1516 __ Move(scratch, static_cast<double>(INT32_MIN));
1517 __ CompareF64(LE, scratch, i.InputDoubleRegister(0));
1518 __ LoadZeroIfNotFPUCondition(i.OutputRegister(1));
1519 __ Move(scratch, static_cast<double>(INT32_MAX) + 1);
1520 __ CompareF64(LE, scratch, i.InputDoubleRegister(0));
1521 __ LoadZeroIfFPUCondition(i.OutputRegister(1));
1522 }
1523 break;
1524 }
1525 case kMips64FloorWS: {
1526 FPURegister scratch = kScratchDoubleReg;
1527 __ floor_w_s(scratch, i.InputDoubleRegister(0));
1528 __ mfc1(i.OutputRegister(), scratch);
1529 break;
1530 }
1531 case kMips64CeilWS: {
1532 FPURegister scratch = kScratchDoubleReg;
1533 __ ceil_w_s(scratch, i.InputDoubleRegister(0));
1534 __ mfc1(i.OutputRegister(), scratch);
1535 break;
1536 }
1537 case kMips64RoundWS: {
1538 FPURegister scratch = kScratchDoubleReg;
1539 __ round_w_s(scratch, i.InputDoubleRegister(0));
1540 __ mfc1(i.OutputRegister(), scratch);
1541 break;
1542 }
1543 case kMips64TruncWS: {
1544 FPURegister scratch = kScratchDoubleReg;
1545 bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
1546 __ trunc_w_s(scratch, i.InputDoubleRegister(0));
1547 __ mfc1(i.OutputRegister(), scratch);
1548 if (set_overflow_to_min_i32) {
1549 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1550 // because INT32_MIN allows easier out-of-bounds detection.
1551 __ addiu(kScratchReg, i.OutputRegister(), 1);
1552 __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
1553 __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
1554 }
1555 break;
1556 }
1557 case kMips64TruncLS: {
1558 FPURegister scratch = kScratchDoubleReg;
1560
1561 bool load_status = instr->OutputCount() > 1;
1562 // Other arches use round to zero here, so we follow.
1563 __ trunc_l_s(scratch, i.InputDoubleRegister(0));
1564 __ dmfc1(i.OutputRegister(), scratch);
1565 if (load_status) {
1566 __ cfc1(result, FCSR);
1567 // Check for overflow and NaNs.
1570 __ Slt(result, zero_reg, result);
1571 __ xori(result, result, 1);
1572 __ mov(i.OutputRegister(1), result);
1573 }
1574 break;
1575 }
1576 case kMips64TruncLD: {
1577 FPURegister scratch = kScratchDoubleReg;
1579
1580 bool set_overflow_to_min_i64 = MiscField::decode(instr->opcode());
1581 bool load_status = instr->OutputCount() > 1;
1582 DCHECK_IMPLIES(set_overflow_to_min_i64, instr->OutputCount() == 1);
1583 // Other arches use round to zero here, so we follow.
1584 __ trunc_l_d(scratch, i.InputDoubleRegister(0));
1585 __ dmfc1(i.OutputRegister(0), scratch);
1586 if (load_status) {
1587 __ cfc1(result, FCSR);
1588 // Check for overflow and NaNs.
1591 __ Slt(result, zero_reg, result);
1592 __ xori(result, result, 1);
1593 __ mov(i.OutputRegister(1), result);
1594 }
1595 if (set_overflow_to_min_i64) {
1596 // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
1597 // because INT64_MIN allows easier out-of-bounds detection.
1598 __ Daddu(kScratchReg, i.OutputRegister(), 1);
1599 __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
1600 __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
1601 }
1602 break;
1603 }
1604 case kMips64TruncUwD: {
1605 FPURegister scratch = kScratchDoubleReg;
1606 __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
1607 if (instr->OutputCount() > 1) {
1608 __ li(i.OutputRegister(1), 1);
1609 __ Move(scratch, static_cast<double>(-1.0));
1610 __ CompareF64(LT, scratch, i.InputDoubleRegister(0));
1611 __ LoadZeroIfNotFPUCondition(i.OutputRegister(1));
1612 __ Move(scratch, static_cast<double>(UINT32_MAX) + 1);
1613 __ CompareF64(LE, scratch, i.InputDoubleRegister(0));
1614 __ LoadZeroIfFPUCondition(i.OutputRegister(1));
1615 }
1616 break;
1617 }
1618 case kMips64TruncUwS: {
1619 FPURegister scratch = kScratchDoubleReg;
1620 bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
1621 __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
1622 if (set_overflow_to_min_i32) {
1623 // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1624 // because 0 allows easier out-of-bounds detection.
1625 __ addiu(kScratchReg, i.OutputRegister(), 1);
1626 __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
1627 }
1628 break;
1629 }
1630 case kMips64TruncUlS: {
1631 FPURegister scratch = kScratchDoubleReg;
1632 Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
1633 __ Trunc_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch,
1634 result);
1635 break;
1636 }
1637 case kMips64TruncUlD: {
1638 FPURegister scratch = kScratchDoubleReg;
1639 Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
1640 __ Trunc_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), scratch,
1641 result);
1642 break;
1643 }
1644 case kMips64BitcastDL:
1645 __ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0));
1646 break;
1647 case kMips64BitcastLD:
1648 __ dmtc1(i.InputRegister(0), i.OutputDoubleRegister());
1649 break;
1650 case kMips64Float64ExtractLowWord32:
1651 __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
1652 break;
1653 case kMips64Float64ExtractHighWord32:
1654 __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1655 break;
1656 case kMips64Float64FromWord32Pair:
1657 __ Move(i.OutputDoubleRegister(), i.InputRegister(1), i.InputRegister(0));
1658 break;
1659 case kMips64Float64InsertLowWord32:
1660 __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
1661 break;
1662 case kMips64Float64InsertHighWord32:
1663 __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1664 break;
1665 // ... more basic instructions ...
1666
1667 case kMips64Seb:
1668 __ seb(i.OutputRegister(), i.InputRegister(0));
1669 break;
1670 case kMips64Seh:
1671 __ seh(i.OutputRegister(), i.InputRegister(0));
1672 break;
1673 case kMips64Lbu:
1674 __ Lbu(i.OutputRegister(), i.MemoryOperand());
1675 break;
1676 case kMips64Lb:
1677 __ Lb(i.OutputRegister(), i.MemoryOperand());
1678 break;
1679 case kMips64Sb: {
1680 size_t index = 0;
1681 MemOperand mem = i.MemoryOperand(&index);
1682 __ Sb(i.InputOrZeroRegister(index), mem);
1683 break;
1684 }
1685 case kMips64Lhu:
1686 __ Lhu(i.OutputRegister(), i.MemoryOperand());
1687 break;
1688 case kMips64Ulhu:
1689 __ Ulhu(i.OutputRegister(), i.MemoryOperand());
1690 break;
1691 case kMips64Lh:
1692 __ Lh(i.OutputRegister(), i.MemoryOperand());
1693 break;
1694 case kMips64Ulh:
1695 __ Ulh(i.OutputRegister(), i.MemoryOperand());
1696 break;
1697 case kMips64Sh: {
1698 size_t index = 0;
1699 MemOperand mem = i.MemoryOperand(&index);
1700 __ Sh(i.InputOrZeroRegister(index), mem);
1701 break;
1702 }
1703 case kMips64Ush: {
1704 size_t index = 0;
1705 MemOperand mem = i.MemoryOperand(&index);
1706 __ Ush(i.InputOrZeroRegister(index), mem, kScratchReg);
1707 break;
1708 }
1709 case kMips64Lw:
1710 __ Lw(i.OutputRegister(), i.MemoryOperand());
1711 break;
1712 case kMips64Ulw:
1713 __ Ulw(i.OutputRegister(), i.MemoryOperand());
1714 break;
1715 case kMips64Lwu:
1716 __ Lwu(i.OutputRegister(), i.MemoryOperand());
1717 break;
1718 case kMips64Ulwu:
1719 __ Ulwu(i.OutputRegister(), i.MemoryOperand());
1720 break;
1721 case kMips64Ld:
1722 __ Ld(i.OutputRegister(), i.MemoryOperand());
1723 break;
1724 case kMips64Uld:
1725 __ Uld(i.OutputRegister(), i.MemoryOperand());
1726 break;
1727 case kMips64Sw: {
1728 size_t index = 0;
1729 MemOperand mem = i.MemoryOperand(&index);
1730 __ Sw(i.InputOrZeroRegister(index), mem);
1731 break;
1732 }
1733 case kMips64Usw: {
1734 size_t index = 0;
1735 MemOperand mem = i.MemoryOperand(&index);
1736 __ Usw(i.InputOrZeroRegister(index), mem);
1737 break;
1738 }
1739 case kMips64Sd: {
1740 size_t index = 0;
1741 MemOperand mem = i.MemoryOperand(&index);
1742 __ Sd(i.InputOrZeroRegister(index), mem);
1743 break;
1744 }
1745 case kMips64Usd: {
1746 size_t index = 0;
1747 MemOperand mem = i.MemoryOperand(&index);
1748 __ Usd(i.InputOrZeroRegister(index), mem);
1749 break;
1750 }
1751 case kMips64Lwc1: {
1752 __ Lwc1(i.OutputSingleRegister(), i.MemoryOperand());
1753 break;
1754 }
1755 case kMips64Ulwc1: {
1756 __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
1757 break;
1758 }
1759 case kMips64Swc1: {
1760 size_t index = 0;
1761 MemOperand operand = i.MemoryOperand(&index);
1762 FPURegister ft = i.InputOrZeroSingleRegister(index);
1763 if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1764 __ Move(kDoubleRegZero, 0.0);
1765 }
1766 __ Swc1(ft, operand);
1767 break;
1768 }
1769 case kMips64Uswc1: {
1770 size_t index = 0;
1771 MemOperand operand = i.MemoryOperand(&index);
1772 FPURegister ft = i.InputOrZeroSingleRegister(index);
1773 if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1774 __ Move(kDoubleRegZero, 0.0);
1775 }
1776 __ Uswc1(ft, operand, kScratchReg);
1777 break;
1778 }
1779 case kMips64Ldc1:
1780 __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
1781 break;
1782 case kMips64Uldc1:
1783 __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
1784 break;
1785 case kMips64Sdc1: {
1786 size_t index = 0;
1787 MemOperand operand = i.MemoryOperand(&index);
1788 FPURegister ft = i.InputOrZeroDoubleRegister(index);
1789 if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1790 __ Move(kDoubleRegZero, 0.0);
1791 }
1792 __ Sdc1(ft, operand);
1793 break;
1794 }
1795 case kMips64Usdc1: {
1796 size_t index = 0;
1797 MemOperand operand = i.MemoryOperand(&index);
1798 FPURegister ft = i.InputOrZeroDoubleRegister(index);
1799 if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1800 __ Move(kDoubleRegZero, 0.0);
1801 }
1802 __ Usdc1(ft, operand, kScratchReg);
1803 break;
1804 }
1805 case kMips64Sync: {
1806 __ sync();
1807 break;
1808 }
1809 case kMips64Push:
1810 if (instr->InputAt(0)->IsFPRegister()) {
1811 __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1812 __ Subu(sp, sp, Operand(kDoubleSize));
1814 } else {
1815 __ Push(i.InputRegister(0));
1817 }
1818 break;
1819 case kMips64Peek: {
1820 int reverse_slot = i.InputInt32(0);
1821 int offset =
1822 FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
1823 if (instr->OutputAt(0)->IsFPRegister()) {
1824 LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
1825 if (op->representation() == MachineRepresentation::kFloat64) {
1826 __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
1827 } else if (op->representation() == MachineRepresentation::kFloat32) {
1828 __ Lwc1(
1829 i.OutputSingleRegister(0),
1830 MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset));
1831 } else {
1832 DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
1833 __ ld_b(i.OutputSimd128Register(), MemOperand(fp, offset));
1834 }
1835 } else {
1836 __ Ld(i.OutputRegister(0), MemOperand(fp, offset));
1837 }
1838 break;
1839 }
1840 case kMips64StackClaim: {
1841 __ Dsubu(sp, sp, Operand(i.InputInt32(0)));
1842 frame_access_state()->IncreaseSPDelta(i.InputInt32(0) /
1844 break;
1845 }
1846 case kMips64StoreToStackSlot: {
1847 if (instr->InputAt(0)->IsFPRegister()) {
1848 if (instr->InputAt(0)->IsSimd128Register()) {
1849 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
1850 __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1)));
1851 } else {
1852 __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
1853 }
1854 } else {
1855 __ Sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
1856 }
1857 break;
1858 }
1859 case kMips64ByteSwap64: {
1860 __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8);
1861 break;
1862 }
1863 case kMips64ByteSwap32: {
1864 __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
1865 break;
1866 }
1867 case kMips64S128LoadSplat: {
1868 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
1869 auto sz = static_cast<MSASize>(MiscField::decode(instr->opcode()));
1870 __ LoadSplat(sz, i.OutputSimd128Register(), i.MemoryOperand());
1871 break;
1872 }
1873 case kMips64S128Load8x8S: {
1874 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
1875 Simd128Register dst = i.OutputSimd128Register();
1877 __ Ld(kScratchReg, i.MemoryOperand());
1878 __ fill_d(dst, kScratchReg);
1879 __ clti_s_b(scratch, dst, 0);
1880 __ ilvr_b(dst, scratch, dst);
1881 break;
1882 }
1883 case kMips64S128Load8x8U: {
1884 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
1885 Simd128Register dst = i.OutputSimd128Register();
1887 __ Ld(kScratchReg, i.MemoryOperand());
1888 __ fill_d(dst, kScratchReg);
1889 __ ilvr_b(dst, kSimd128RegZero, dst);
1890 break;
1891 }
1892 case kMips64S128Load16x4S: {
1893 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
1894 Simd128Register dst = i.OutputSimd128Register();
1896 __ Ld(kScratchReg, i.MemoryOperand());
1897 __ fill_d(dst, kScratchReg);
1898 __ clti_s_h(scratch, dst, 0);
1899 __ ilvr_h(dst, scratch, dst);
1900 break;
1901 }
1902 case kMips64S128Load16x4U: {
1903 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
1904 Simd128Register dst = i.OutputSimd128Register();
1906 __ Ld(kScratchReg, i.MemoryOperand());
1907 __ fill_d(dst, kScratchReg);
1908 __ ilvr_h(dst, kSimd128RegZero, dst);
1909 break;
1910 }
1911 case kMips64S128Load32x2S: {
1912 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
1913 Simd128Register dst = i.OutputSimd128Register();
1915 __ Ld(kScratchReg, i.MemoryOperand());
1916 __ fill_d(dst, kScratchReg);
1917 __ clti_s_w(scratch, dst, 0);
1918 __ ilvr_w(dst, scratch, dst);
1919 break;
1920 }
1921 case kMips64S128Load32x2U: {
1922 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
1923 Simd128Register dst = i.OutputSimd128Register();
1925 __ Ld(kScratchReg, i.MemoryOperand());
1926 __ fill_d(dst, kScratchReg);
1927 __ ilvr_w(dst, kSimd128RegZero, dst);
1928 break;
1929 }
1930 case kMips64S128Load32Zero: {
1931 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
1932 Simd128Register dst = i.OutputSimd128Register();
1933 __ xor_v(dst, dst, dst);
1934 __ Lwu(kScratchReg, i.MemoryOperand());
1935 __ insert_w(dst, 0, kScratchReg);
1936 break;
1937 }
1938 case kMips64S128Load64Zero: {
1939 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
1940 Simd128Register dst = i.OutputSimd128Register();
1941 __ xor_v(dst, dst, dst);
1942 __ Ld(kScratchReg, i.MemoryOperand());
1943 __ insert_d(dst, 0, kScratchReg);
1944 break;
1945 }
1946 case kMips64S128LoadLane: {
1947 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
1948 Simd128Register dst = i.OutputSimd128Register();
1949 DCHECK_EQ(dst, i.InputSimd128Register(0));
1950 auto sz = static_cast<MSASize>(MiscField::decode(instr->opcode()));
1951 __ LoadLane(sz, dst, i.InputUint8(1), i.MemoryOperand(2));
1952 break;
1953 }
1954 case kMips64S128StoreLane: {
1955 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
1956 Simd128Register src = i.InputSimd128Register(0);
1957 auto sz = static_cast<MSASize>(MiscField::decode(instr->opcode()));
1958 __ StoreLane(sz, src, i.InputUint8(1), i.MemoryOperand(2));
1959 break;
1960 }
1961 case kAtomicLoadInt8:
1964 break;
1965 case kAtomicLoadUint8:
1967 break;
1968 case kAtomicLoadInt16:
1971 break;
1972 case kAtomicLoadUint16:
1974 break;
1975 case kAtomicLoadWord32:
1978 else
1980 break;
1981 case kMips64Word64AtomicLoadUint64:
1983 break;
1984 case kAtomicStoreWord8:
1986 break;
1987 case kAtomicStoreWord16:
1989 break;
1990 case kAtomicStoreWord32:
1992 break;
1993 case kMips64StoreCompressTagged:
1994 case kMips64Word64AtomicStoreWord64:
1996 break;
1997 case kAtomicExchangeInt8:
1999 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
2000 break;
2001 case kAtomicExchangeUint8:
2002 switch (AtomicWidthField::decode(opcode)) {
2004 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
2005 break;
2007 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
2008 break;
2009 }
2010 break;
2011 case kAtomicExchangeInt16:
2013 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
2014 break;
2015 case kAtomicExchangeUint16:
2016 switch (AtomicWidthField::decode(opcode)) {
2018 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
2019 break;
2021 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
2022 break;
2023 }
2024 break;
2025 case kAtomicExchangeWord32:
2026 switch (AtomicWidthField::decode(opcode)) {
2029 break;
2031 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
2032 break;
2033 }
2034 break;
2035 case kMips64Word64AtomicExchangeUint64:
2037 break;
2038 case kAtomicCompareExchangeInt8:
2041 break;
2042 case kAtomicCompareExchangeUint8:
2043 switch (AtomicWidthField::decode(opcode)) {
2046 break;
2048 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
2049 break;
2050 }
2051 break;
2052 case kAtomicCompareExchangeInt16:
2055 break;
2056 case kAtomicCompareExchangeUint16:
2057 switch (AtomicWidthField::decode(opcode)) {
2059 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
2060 break;
2062 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
2063 break;
2064 }
2065 break;
2066 case kAtomicCompareExchangeWord32:
2067 switch (AtomicWidthField::decode(opcode)) {
2069 __ sll(i.InputRegister(2), i.InputRegister(2), 0);
2071 break;
2073 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
2074 break;
2075 }
2076 break;
2077 case kMips64Word64AtomicCompareExchangeUint64:
2079 break;
2080#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
2081 case kAtomic##op##Int8: \
2082 DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
2083 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \
2084 break; \
2085 case kAtomic##op##Uint8: \
2086 switch (AtomicWidthField::decode(opcode)) { \
2087 case AtomicWidth::kWord32: \
2088 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \
2089 break; \
2090 case AtomicWidth::kWord64: \
2091 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64); \
2092 break; \
2093 } \
2094 break; \
2095 case kAtomic##op##Int16: \
2096 DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
2097 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \
2098 break; \
2099 case kAtomic##op##Uint16: \
2100 switch (AtomicWidthField::decode(opcode)) { \
2101 case AtomicWidth::kWord32: \
2102 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \
2103 break; \
2104 case AtomicWidth::kWord64: \
2105 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64); \
2106 break; \
2107 } \
2108 break; \
2109 case kAtomic##op##Word32: \
2110 switch (AtomicWidthField::decode(opcode)) { \
2111 case AtomicWidth::kWord32: \
2112 ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
2113 break; \
2114 case AtomicWidth::kWord64: \
2115 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64); \
2116 break; \
2117 } \
2118 break; \
2119 case kMips64Word64Atomic##op##Uint64: \
2120 ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
2121 break;
2122 ATOMIC_BINOP_CASE(Add, Addu, Daddu)
2123 ATOMIC_BINOP_CASE(Sub, Subu, Dsubu)
2124 ATOMIC_BINOP_CASE(And, And, And)
2125 ATOMIC_BINOP_CASE(Or, Or, Or)
2126 ATOMIC_BINOP_CASE(Xor, Xor, Xor)
2127#undef ATOMIC_BINOP_CASE
2128 case kMips64AssertEqual:
2129 __ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
2130 i.InputRegister(0), Operand(i.InputRegister(1)));
2131 break;
2132 case kMips64S128Const: {
2133 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2134 Simd128Register dst = i.OutputSimd128Register();
2135 uint64_t imm1 = make_uint64(i.InputUint32(1), i.InputUint32(0));
2136 uint64_t imm2 = make_uint64(i.InputUint32(3), i.InputUint32(2));
2137 __ li(kScratchReg, imm1);
2138 __ insert_d(dst, 0, kScratchReg);
2139 __ li(kScratchReg, imm2);
2140 __ insert_d(dst, 1, kScratchReg);
2141 break;
2142 }
2143 case kMips64S128Zero: {
2144 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2145 Simd128Register dst = i.OutputSimd128Register();
2146 __ xor_v(dst, dst, dst);
2147 break;
2148 }
2149 case kMips64S128AllOnes: {
2150 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2151 Simd128Register dst = i.OutputSimd128Register();
2152 __ ceq_d(dst, dst, dst);
2153 break;
2154 }
2155 case kMips64I32x4Splat: {
2156 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2157 __ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
2158 break;
2159 }
2160 case kMips64I32x4ExtractLane: {
2161 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2162 __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
2163 i.InputInt8(1));
2164 break;
2165 }
2166 case kMips64I32x4ReplaceLane: {
2167 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2168 Simd128Register src = i.InputSimd128Register(0);
2169 Simd128Register dst = i.OutputSimd128Register();
2170 if (src != dst) {
2171 __ move_v(dst, src);
2172 }
2173 __ insert_w(dst, i.InputInt8(1), i.InputRegister(2));
2174 break;
2175 }
2176 case kMips64I32x4Add: {
2177 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2178 __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2179 i.InputSimd128Register(1));
2180 break;
2181 }
2182 case kMips64I32x4Sub: {
2183 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2184 __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2185 i.InputSimd128Register(1));
2186 break;
2187 }
2188 case kMips64F64x2Abs: {
2189 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2190 __ bclri_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63);
2191 break;
2192 }
2193 case kMips64F64x2Neg: {
2194 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2195 __ bnegi_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63);
2196 break;
2197 }
2198 case kMips64F64x2Sqrt: {
2199 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2200 __ fsqrt_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
2201 break;
2202 }
2203 case kMips64F64x2Add: {
2204 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2206 break;
2207 }
2208 case kMips64F64x2Sub: {
2209 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2211 break;
2212 }
2213 case kMips64F64x2Mul: {
2214 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2216 break;
2217 }
2218 case kMips64F64x2Div: {
2219 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2221 break;
2222 }
2223 case kMips64F64x2Min: {
2224 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2225 Simd128Register dst = i.OutputSimd128Register();
2226 Simd128Register src0 = i.InputSimd128Register(0);
2227 Simd128Register src1 = i.InputSimd128Register(1);
2230
2231 // If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
2232 // scratch1 = (src0 == src1) ? (src0 | src1) : (src1 | src1).
2233 __ fseq_d(scratch0, src0, src1);
2234 __ bsel_v(scratch0, src1, src0);
2235 __ or_v(scratch1, scratch0, src1);
2236 // scratch0 = isNaN(src0) ? src0 : scratch1.
2237 __ fseq_d(scratch0, src0, src0);
2238 __ bsel_v(scratch0, src0, scratch1);
2239 // scratch1 = (src0 < scratch0) ? src0 : scratch0.
2240 __ fslt_d(scratch1, src0, scratch0);
2241 __ bsel_v(scratch1, scratch0, src0);
2242 // Canonicalize the result.
2243 __ fmin_d(dst, scratch1, scratch1);
2244 break;
2245 }
2246 case kMips64F64x2Max: {
2247 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2248 Simd128Register dst = i.OutputSimd128Register();
2249 Simd128Register src0 = i.InputSimd128Register(0);
2250 Simd128Register src1 = i.InputSimd128Register(1);
2253
2254 // If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
2255 // scratch1 = (src0 == src1) ? (src0 & src1) : (src1 & src1).
2256 __ fseq_d(scratch0, src0, src1);
2257 __ bsel_v(scratch0, src1, src0);
2258 __ and_v(scratch1, scratch0, src1);
2259 // scratch0 = isNaN(src0) ? src0 : scratch1.
2260 __ fseq_d(scratch0, src0, src0);
2261 __ bsel_v(scratch0, src0, scratch1);
2262 // scratch1 = (scratch0 < src0) ? src0 : scratch0.
2263 __ fslt_d(scratch1, scratch0, src0);
2264 __ bsel_v(scratch1, scratch0, src0);
2265 // Canonicalize the result.
2266 __ fmax_d(dst, scratch1, scratch1);
2267 break;
2268 }
2269 case kMips64F64x2Eq: {
2270 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2271 __ fceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2272 i.InputSimd128Register(1));
2273 break;
2274 }
2275 case kMips64F64x2Ne: {
2276 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2277 __ fcune_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2278 i.InputSimd128Register(1));
2279 break;
2280 }
2281 case kMips64F64x2Lt: {
2282 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2283 __ fclt_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2284 i.InputSimd128Register(1));
2285 break;
2286 }
2287 case kMips64F64x2Le: {
2288 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2289 __ fcle_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2290 i.InputSimd128Register(1));
2291 break;
2292 }
2293 case kMips64F64x2Splat: {
2294 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2295 __ Move(kScratchReg, i.InputDoubleRegister(0));
2296 __ fill_d(i.OutputSimd128Register(), kScratchReg);
2297 break;
2298 }
2299 case kMips64F64x2ExtractLane: {
2300 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2301 __ copy_s_d(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
2302 __ Move(i.OutputDoubleRegister(), kScratchReg);
2303 break;
2304 }
2305 case kMips64F64x2ReplaceLane: {
2306 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2307 Simd128Register src = i.InputSimd128Register(0);
2308 Simd128Register dst = i.OutputSimd128Register();
2309 __ Move(kScratchReg, i.InputDoubleRegister(2));
2310 if (dst != src) {
2311 __ move_v(dst, src);
2312 }
2313 __ insert_d(dst, i.InputInt8(1), kScratchReg);
2314 break;
2315 }
2316 case kMips64I64x2Splat: {
2317 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2318 __ fill_d(i.OutputSimd128Register(), i.InputRegister(0));
2319 break;
2320 }
2321 case kMips64I64x2ExtractLane: {
2322 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2323 __ copy_s_d(i.OutputRegister(), i.InputSimd128Register(0),
2324 i.InputInt8(1));
2325 break;
2326 }
2327 case kMips64F64x2Pmin: {
2328 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2329 Simd128Register dst = i.OutputSimd128Register();
2330 Simd128Register lhs = i.InputSimd128Register(0);
2331 Simd128Register rhs = i.InputSimd128Register(1);
2332 // dst = rhs < lhs ? rhs : lhs
2333 __ fclt_d(dst, rhs, lhs);
2334 __ bsel_v(dst, lhs, rhs);
2335 break;
2336 }
2337 case kMips64F64x2Pmax: {
2338 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2339 Simd128Register dst = i.OutputSimd128Register();
2340 Simd128Register lhs = i.InputSimd128Register(0);
2341 Simd128Register rhs = i.InputSimd128Register(1);
2342 // dst = lhs < rhs ? rhs : lhs
2343 __ fclt_d(dst, lhs, rhs);
2344 __ bsel_v(dst, lhs, rhs);
2345 break;
2346 }
2347 case kMips64F64x2Ceil: {
2348 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2349 __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
2351 break;
2352 }
2353 case kMips64F64x2Floor: {
2354 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2355 __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
2357 break;
2358 }
2359 case kMips64F64x2Trunc: {
2360 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2361 __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
2362 kRoundToZero);
2363 break;
2364 }
2365 case kMips64F64x2NearestInt: {
2366 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2367 __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
2369 break;
2370 }
2371 case kMips64F64x2ConvertLowI32x4S: {
2372 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2374 __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
2375 __ slli_d(kSimd128RegZero, kSimd128RegZero, 32);
2376 __ srai_d(kSimd128RegZero, kSimd128RegZero, 32);
2377 __ ffint_s_d(i.OutputSimd128Register(), kSimd128RegZero);
2378 break;
2379 }
2380 case kMips64F64x2ConvertLowI32x4U: {
2381 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2383 __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
2384 __ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero);
2385 break;
2386 }
2387 case kMips64F64x2PromoteLowF32x4: {
2388 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2389 __ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
2390 break;
2391 }
2392 case kMips64I64x2ReplaceLane: {
2393 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2394 Simd128Register src = i.InputSimd128Register(0);
2395 Simd128Register dst = i.OutputSimd128Register();
2396 if (src != dst) {
2397 __ move_v(dst, src);
2398 }
2399 __ insert_d(dst, i.InputInt8(1), i.InputRegister(2));
2400 break;
2401 }
2402 case kMips64I64x2Add: {
2403 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2404 __ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2405 i.InputSimd128Register(1));
2406 break;
2407 }
2408 case kMips64I64x2Sub: {
2409 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2410 __ subv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2411 i.InputSimd128Register(1));
2412 break;
2413 }
2414 case kMips64I64x2Mul: {
2415 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2416 __ mulv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2417 i.InputSimd128Register(1));
2418 break;
2419 }
2420 case kMips64I64x2Neg: {
2421 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2423 __ subv_d(i.OutputSimd128Register(), kSimd128RegZero,
2424 i.InputSimd128Register(0));
2425 break;
2426 }
2427 case kMips64I64x2Shl: {
2428 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2429 if (instr->InputAt(1)->IsRegister()) {
2430 __ fill_d(kSimd128ScratchReg, i.InputRegister(1));
2431 __ sll_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2433 } else {
2434 __ slli_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2435 i.InputInt6(1));
2436 }
2437 break;
2438 }
2439 case kMips64I64x2ShrS: {
2440 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2441 if (instr->InputAt(1)->IsRegister()) {
2442 __ fill_d(kSimd128ScratchReg, i.InputRegister(1));
2443 __ sra_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2445 } else {
2446 __ srai_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2447 i.InputInt6(1));
2448 }
2449 break;
2450 }
2451 case kMips64I64x2ShrU: {
2452 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2453 if (instr->InputAt(1)->IsRegister()) {
2454 __ fill_d(kSimd128ScratchReg, i.InputRegister(1));
2455 __ srl_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2457 } else {
2458 __ srli_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2459 i.InputInt6(1));
2460 }
2461 break;
2462 }
2463 case kMips64I64x2BitMask: {
2464 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2465 Register dst = i.OutputRegister();
2466 Simd128Register src = i.InputSimd128Register(0);
2469 __ srli_d(scratch0, src, 63);
2470 __ shf_w(scratch1, scratch0, 0x02);
2471 __ slli_d(scratch1, scratch1, 1);
2472 __ or_v(scratch0, scratch0, scratch1);
2473 __ copy_u_b(dst, scratch0, 0);
2474 break;
2475 }
2476 case kMips64I64x2Eq: {
2477 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2478 __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2479 i.InputSimd128Register(1));
2480 break;
2481 }
2482 case kMips64I64x2Ne: {
2483 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2484 __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2485 i.InputSimd128Register(1));
2486 __ nor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
2487 i.OutputSimd128Register());
2488 break;
2489 }
2490 case kMips64I64x2GtS: {
2491 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2492 __ clt_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
2493 i.InputSimd128Register(0));
2494 break;
2495 }
2496 case kMips64I64x2GeS: {
2497 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2498 __ cle_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
2499 i.InputSimd128Register(0));
2500 break;
2501 }
2502 case kMips64I64x2Abs: {
2503 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2505 __ add_a_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2507 break;
2508 }
2509 case kMips64I64x2SConvertI32x4Low: {
2510 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2511 Simd128Register dst = i.OutputSimd128Register();
2512 Simd128Register src = i.InputSimd128Register(0);
2513 __ ilvr_w(kSimd128ScratchReg, src, src);
2514 __ slli_d(dst, kSimd128ScratchReg, 32);
2515 __ srai_d(dst, dst, 32);
2516 break;
2517 }
2518 case kMips64I64x2SConvertI32x4High: {
2519 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2520 Simd128Register dst = i.OutputSimd128Register();
2521 Simd128Register src = i.InputSimd128Register(0);
2522 __ ilvl_w(kSimd128ScratchReg, src, src);
2523 __ slli_d(dst, kSimd128ScratchReg, 32);
2524 __ srai_d(dst, dst, 32);
2525 break;
2526 }
2527 case kMips64I64x2UConvertI32x4Low: {
2528 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2530 __ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero,
2531 i.InputSimd128Register(0));
2532 break;
2533 }
2534 case kMips64I64x2UConvertI32x4High: {
2535 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2537 __ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero,
2538 i.InputSimd128Register(0));
2539 break;
2540 }
2541 case kMips64ExtMulLow: {
2542 auto dt = static_cast<MSADataType>(MiscField::decode(instr->opcode()));
2543 __ ExtMulLow(dt, i.OutputSimd128Register(), i.InputSimd128Register(0),
2544 i.InputSimd128Register(1));
2545 break;
2546 }
2547 case kMips64ExtMulHigh: {
2548 auto dt = static_cast<MSADataType>(MiscField::decode(instr->opcode()));
2549 __ ExtMulHigh(dt, i.OutputSimd128Register(), i.InputSimd128Register(0),
2550 i.InputSimd128Register(1));
2551 break;
2552 }
2553 case kMips64ExtAddPairwise: {
2554 auto dt = static_cast<MSADataType>(MiscField::decode(instr->opcode()));
2555 __ ExtAddPairwise(dt, i.OutputSimd128Register(),
2556 i.InputSimd128Register(0));
2557 break;
2558 }
2559 case kMips64F32x4Splat: {
2560 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2561 __ FmoveLow(kScratchReg, i.InputSingleRegister(0));
2562 __ fill_w(i.OutputSimd128Register(), kScratchReg);
2563 break;
2564 }
2565 case kMips64F32x4ExtractLane: {
2566 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2567 __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
2568 __ FmoveLow(i.OutputSingleRegister(), kScratchReg);
2569 break;
2570 }
2571 case kMips64F32x4ReplaceLane: {
2572 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2573 Simd128Register src = i.InputSimd128Register(0);
2574 Simd128Register dst = i.OutputSimd128Register();
2575 __ FmoveLow(kScratchReg, i.InputSingleRegister(2));
2576 if (dst != src) {
2577 __ move_v(dst, src);
2578 }
2579 __ insert_w(dst, i.InputInt8(1), kScratchReg);
2580 break;
2581 }
2582 case kMips64F32x4SConvertI32x4: {
2583 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2584 __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2585 break;
2586 }
2587 case kMips64F32x4UConvertI32x4: {
2588 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2589 __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2590 break;
2591 }
2592 case kMips64I32x4Mul: {
2593 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2594 __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2595 i.InputSimd128Register(1));
2596 break;
2597 }
2598 case kMips64I32x4MaxS: {
2599 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2600 __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2601 i.InputSimd128Register(1));
2602 break;
2603 }
2604 case kMips64I32x4MinS: {
2605 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2606 __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2607 i.InputSimd128Register(1));
2608 break;
2609 }
2610 case kMips64I32x4Eq: {
2611 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2612 __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2613 i.InputSimd128Register(1));
2614 break;
2615 }
2616 case kMips64I32x4Ne: {
2617 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2618 Simd128Register dst = i.OutputSimd128Register();
2619 __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2620 __ nor_v(dst, dst, dst);
2621 break;
2622 }
2623 case kMips64I32x4Shl: {
2624 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2625 if (instr->InputAt(1)->IsRegister()) {
2626 __ fill_w(kSimd128ScratchReg, i.InputRegister(1));
2627 __ sll_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2629 } else {
2630 __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2631 i.InputInt5(1));
2632 }
2633 break;
2634 }
2635 case kMips64I32x4ShrS: {
2636 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2637 if (instr->InputAt(1)->IsRegister()) {
2638 __ fill_w(kSimd128ScratchReg, i.InputRegister(1));
2639 __ sra_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2641 } else {
2642 __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2643 i.InputInt5(1));
2644 }
2645 break;
2646 }
2647 case kMips64I32x4ShrU: {
2648 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2649 if (instr->InputAt(1)->IsRegister()) {
2650 __ fill_w(kSimd128ScratchReg, i.InputRegister(1));
2651 __ srl_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2653 } else {
2654 __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2655 i.InputInt5(1));
2656 }
2657 break;
2658 }
2659 case kMips64I32x4MaxU: {
2660 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2661 __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2662 i.InputSimd128Register(1));
2663 break;
2664 }
2665 case kMips64I32x4MinU: {
2666 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2667 __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2668 i.InputSimd128Register(1));
2669 break;
2670 }
2671 case kMips64S128Select: {
2672 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2673 DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0));
2674 __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
2675 i.InputSimd128Register(1));
2676 break;
2677 }
2678 case kMips64S128AndNot: {
2679 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2681 dst = i.OutputSimd128Register(),
2682 src0 = i.InputSimd128Register(0),
2683 src1 = i.InputSimd128Register(1);
2684 __ nor_v(scratch, src1, src1);
2685 __ and_v(dst, scratch, src0);
2686 break;
2687 }
2688 case kMips64F32x4Abs: {
2689 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2690 __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
2691 break;
2692 }
2693 case kMips64F32x4Neg: {
2694 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2695 __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
2696 break;
2697 }
2698 case kMips64F32x4Add: {
2699 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2700 __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2701 i.InputSimd128Register(1));
2702 break;
2703 }
2704 case kMips64F32x4Sub: {
2705 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2706 __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2707 i.InputSimd128Register(1));
2708 break;
2709 }
2710 case kMips64F32x4Mul: {
2711 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2712 __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2713 i.InputSimd128Register(1));
2714 break;
2715 }
2716 case kMips64F32x4Div: {
2717 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2718 __ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2719 i.InputSimd128Register(1));
2720 break;
2721 }
2722 case kMips64F32x4Max: {
2723 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2724 Simd128Register dst = i.OutputSimd128Register();
2725 Simd128Register src0 = i.InputSimd128Register(0);
2726 Simd128Register src1 = i.InputSimd128Register(1);
2729
2730 // If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
2731 // scratch1 = (src0 == src1) ? (src0 & src1) : (src1 & src1).
2732 __ fseq_w(scratch0, src0, src1);
2733 __ bsel_v(scratch0, src1, src0);
2734 __ and_v(scratch1, scratch0, src1);
2735 // scratch0 = isNaN(src0) ? src0 : scratch1.
2736 __ fseq_w(scratch0, src0, src0);
2737 __ bsel_v(scratch0, src0, scratch1);
2738 // scratch1 = (scratch0 < src0) ? src0 : scratch0.
2739 __ fslt_w(scratch1, scratch0, src0);
2740 __ bsel_v(scratch1, scratch0, src0);
2741 // Canonicalize the result.
2742 __ fmax_w(dst, scratch1, scratch1);
2743 break;
2744 }
2745 case kMips64F32x4Min: {
2746 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2747 Simd128Register dst = i.OutputSimd128Register();
2748 Simd128Register src0 = i.InputSimd128Register(0);
2749 Simd128Register src1 = i.InputSimd128Register(1);
2752
2753 // If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
2754 // scratch1 = (src0 == src1) ? (src0 | src1) : (src1 | src1).
2755 __ fseq_w(scratch0, src0, src1);
2756 __ bsel_v(scratch0, src1, src0);
2757 __ or_v(scratch1, scratch0, src1);
2758 // scratch0 = isNaN(src0) ? src0 : scratch1.
2759 __ fseq_w(scratch0, src0, src0);
2760 __ bsel_v(scratch0, src0, scratch1);
2761 // scratch1 = (src0 < scratch0) ? src0 : scratch0.
2762 __ fslt_w(scratch1, src0, scratch0);
2763 __ bsel_v(scratch1, scratch0, src0);
2764 // Canonicalize the result.
2765 __ fmin_w(dst, scratch1, scratch1);
2766 break;
2767 }
2768 case kMips64F32x4Eq: {
2769 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2770 __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2771 i.InputSimd128Register(1));
2772 break;
2773 }
2774 case kMips64F32x4Ne: {
2775 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2776 __ fcune_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2777 i.InputSimd128Register(1));
2778 break;
2779 }
2780 case kMips64F32x4Lt: {
2781 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2782 __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2783 i.InputSimd128Register(1));
2784 break;
2785 }
2786 case kMips64F32x4Le: {
2787 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2788 __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2789 i.InputSimd128Register(1));
2790 break;
2791 }
2792 case kMips64F32x4Pmin: {
2793 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2794 Simd128Register dst = i.OutputSimd128Register();
2795 Simd128Register lhs = i.InputSimd128Register(0);
2796 Simd128Register rhs = i.InputSimd128Register(1);
2797 // dst = rhs < lhs ? rhs : lhs
2798 __ fclt_w(dst, rhs, lhs);
2799 __ bsel_v(dst, lhs, rhs);
2800 break;
2801 }
2802 case kMips64F32x4Pmax: {
2803 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2804 Simd128Register dst = i.OutputSimd128Register();
2805 Simd128Register lhs = i.InputSimd128Register(0);
2806 Simd128Register rhs = i.InputSimd128Register(1);
2807 // dst = lhs < rhs ? rhs : lhs
2808 __ fclt_w(dst, lhs, rhs);
2809 __ bsel_v(dst, lhs, rhs);
2810 break;
2811 }
2812 case kMips64F32x4Ceil: {
2813 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2814 __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
2816 break;
2817 }
2818 case kMips64F32x4Floor: {
2819 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2820 __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
2822 break;
2823 }
2824 case kMips64F32x4Trunc: {
2825 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2826 __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
2827 kRoundToZero);
2828 break;
2829 }
2830 case kMips64F32x4NearestInt: {
2831 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2832 __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
2834 break;
2835 }
2836 case kMips64F32x4DemoteF64x2Zero: {
2837 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2839 __ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero,
2840 i.InputSimd128Register(0));
2841 break;
2842 }
2843 case kMips64I32x4SConvertF32x4: {
2844 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2845 __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2846 break;
2847 }
2848 case kMips64I32x4UConvertF32x4: {
2849 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2850 __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2851 break;
2852 }
2853 case kMips64F32x4Sqrt: {
2854 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2855 __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2856 break;
2857 }
2858 case kMips64I32x4Neg: {
2859 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2861 __ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
2862 i.InputSimd128Register(0));
2863 break;
2864 }
2865 case kMips64I32x4GtS: {
2866 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2867 __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2868 i.InputSimd128Register(0));
2869 break;
2870 }
2871 case kMips64I32x4GeS: {
2872 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2873 __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2874 i.InputSimd128Register(0));
2875 break;
2876 }
2877 case kMips64I32x4GtU: {
2878 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2879 __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2880 i.InputSimd128Register(0));
2881 break;
2882 }
2883 case kMips64I32x4GeU: {
2884 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2885 __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2886 i.InputSimd128Register(0));
2887 break;
2888 }
2889 case kMips64I32x4Abs: {
2890 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2892 __ asub_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2894 break;
2895 }
2896 case kMips64I32x4BitMask: {
2897 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2898 Register dst = i.OutputRegister();
2899 Simd128Register src = i.InputSimd128Register(0);
2902 __ srli_w(scratch0, src, 31);
2903 __ srli_d(scratch1, scratch0, 31);
2904 __ or_v(scratch0, scratch0, scratch1);
2905 __ shf_w(scratch1, scratch0, 0x0E);
2906 __ slli_d(scratch1, scratch1, 2);
2907 __ or_v(scratch0, scratch0, scratch1);
2908 __ copy_u_b(dst, scratch0, 0);
2909 break;
2910 }
2911 case kMips64I32x4DotI16x8S: {
2912 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2913 __ dotp_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2914 i.InputSimd128Register(1));
2915 break;
2916 }
2917 case kMips64I32x4TruncSatF64x2SZero: {
2918 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2920 __ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0));
2922 __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
2924 break;
2925 }
2926 case kMips64I32x4TruncSatF64x2UZero: {
2927 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2929 __ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0));
2931 __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
2933 break;
2934 }
2935 case kMips64I16x8Splat: {
2936 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2937 __ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
2938 break;
2939 }
2940 case kMips64I16x8ExtractLaneU: {
2941 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2942 __ copy_u_h(i.OutputRegister(), i.InputSimd128Register(0),
2943 i.InputInt8(1));
2944 break;
2945 }
2946 case kMips64I16x8ExtractLaneS: {
2947 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2948 __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
2949 i.InputInt8(1));
2950 break;
2951 }
2952 case kMips64I16x8ReplaceLane: {
2953 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2954 Simd128Register src = i.InputSimd128Register(0);
2955 Simd128Register dst = i.OutputSimd128Register();
2956 if (src != dst) {
2957 __ move_v(dst, src);
2958 }
2959 __ insert_h(dst, i.InputInt8(1), i.InputRegister(2));
2960 break;
2961 }
2962 case kMips64I16x8Neg: {
2963 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2965 __ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
2966 i.InputSimd128Register(0));
2967 break;
2968 }
2969 case kMips64I16x8Shl: {
2970 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2971 if (instr->InputAt(1)->IsRegister()) {
2972 __ fill_h(kSimd128ScratchReg, i.InputRegister(1));
2973 __ sll_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2975 } else {
2976 __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2977 i.InputInt4(1));
2978 }
2979 break;
2980 }
2981 case kMips64I16x8ShrS: {
2982 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2983 if (instr->InputAt(1)->IsRegister()) {
2984 __ fill_h(kSimd128ScratchReg, i.InputRegister(1));
2985 __ sra_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2987 } else {
2988 __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2989 i.InputInt4(1));
2990 }
2991 break;
2992 }
2993 case kMips64I16x8ShrU: {
2994 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
2995 if (instr->InputAt(1)->IsRegister()) {
2996 __ fill_h(kSimd128ScratchReg, i.InputRegister(1));
2997 __ srl_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2999 } else {
3000 __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3001 i.InputInt4(1));
3002 }
3003 break;
3004 }
3005 case kMips64I16x8Add: {
3006 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3007 __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3008 i.InputSimd128Register(1));
3009 break;
3010 }
3011 case kMips64I16x8AddSatS: {
3012 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3013 __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3014 i.InputSimd128Register(1));
3015 break;
3016 }
3017 case kMips64I16x8Sub: {
3018 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3019 __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3020 i.InputSimd128Register(1));
3021 break;
3022 }
3023 case kMips64I16x8SubSatS: {
3024 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3025 __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3026 i.InputSimd128Register(1));
3027 break;
3028 }
3029 case kMips64I16x8Mul: {
3030 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3031 __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3032 i.InputSimd128Register(1));
3033 break;
3034 }
3035 case kMips64I16x8MaxS: {
3036 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3037 __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3038 i.InputSimd128Register(1));
3039 break;
3040 }
3041 case kMips64I16x8MinS: {
3042 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3043 __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3044 i.InputSimd128Register(1));
3045 break;
3046 }
3047 case kMips64I16x8Eq: {
3048 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3049 __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3050 i.InputSimd128Register(1));
3051 break;
3052 }
3053 case kMips64I16x8Ne: {
3054 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3055 Simd128Register dst = i.OutputSimd128Register();
3056 __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
3057 __ nor_v(dst, dst, dst);
3058 break;
3059 }
3060 case kMips64I16x8GtS: {
3061 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3062 __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
3063 i.InputSimd128Register(0));
3064 break;
3065 }
3066 case kMips64I16x8GeS: {
3067 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3068 __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
3069 i.InputSimd128Register(0));
3070 break;
3071 }
3072 case kMips64I16x8AddSatU: {
3073 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3074 __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3075 i.InputSimd128Register(1));
3076 break;
3077 }
3078 case kMips64I16x8SubSatU: {
3079 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3080 __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3081 i.InputSimd128Register(1));
3082 break;
3083 }
3084 case kMips64I16x8MaxU: {
3085 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3086 __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3087 i.InputSimd128Register(1));
3088 break;
3089 }
3090 case kMips64I16x8MinU: {
3091 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3092 __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3093 i.InputSimd128Register(1));
3094 break;
3095 }
3096 case kMips64I16x8GtU: {
3097 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3098 __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
3099 i.InputSimd128Register(0));
3100 break;
3101 }
3102 case kMips64I16x8GeU: {
3103 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3104 __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
3105 i.InputSimd128Register(0));
3106 break;
3107 }
3108 case kMips64I16x8RoundingAverageU: {
3109 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3110 __ aver_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
3111 i.InputSimd128Register(0));
3112 break;
3113 }
3114 case kMips64I16x8Abs: {
3115 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3117 __ asub_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3119 break;
3120 }
3121 case kMips64I16x8BitMask: {
3122 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3123 Register dst = i.OutputRegister();
3124 Simd128Register src = i.InputSimd128Register(0);
3127 __ srli_h(scratch0, src, 15);
3128 __ srli_w(scratch1, scratch0, 15);
3129 __ or_v(scratch0, scratch0, scratch1);
3130 __ srli_d(scratch1, scratch0, 30);
3131 __ or_v(scratch0, scratch0, scratch1);
3132 __ shf_w(scratch1, scratch0, 0x0E);
3133 __ slli_d(scratch1, scratch1, 4);
3134 __ or_v(scratch0, scratch0, scratch1);
3135 __ copy_u_b(dst, scratch0, 0);
3136 break;
3137 }
3138 case kMips64I16x8Q15MulRSatS: {
3139 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3140 __ mulr_q_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3141 i.InputSimd128Register(1));
3142 break;
3143 }
3144 case kMips64I8x16Splat: {
3145 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3146 __ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
3147 break;
3148 }
3149 case kMips64I8x16ExtractLaneU: {
3150 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3151 __ copy_u_b(i.OutputRegister(), i.InputSimd128Register(0),
3152 i.InputInt8(1));
3153 break;
3154 }
3155 case kMips64I8x16ExtractLaneS: {
3156 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3157 __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
3158 i.InputInt8(1));
3159 break;
3160 }
3161 case kMips64I8x16ReplaceLane: {
3162 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3163 Simd128Register src = i.InputSimd128Register(0);
3164 Simd128Register dst = i.OutputSimd128Register();
3165 if (src != dst) {
3166 __ move_v(dst, src);
3167 }
3168 __ insert_b(dst, i.InputInt8(1), i.InputRegister(2));
3169 break;
3170 }
3171 case kMips64I8x16Neg: {
3172 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3174 __ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
3175 i.InputSimd128Register(0));
3176 break;
3177 }
3178 case kMips64I8x16Shl: {
3179 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3180 if (instr->InputAt(1)->IsRegister()) {
3181 __ fill_b(kSimd128ScratchReg, i.InputRegister(1));
3182 __ sll_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3184 } else {
3185 __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3186 i.InputInt3(1));
3187 }
3188 break;
3189 }
3190 case kMips64I8x16ShrS: {
3191 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3192 if (instr->InputAt(1)->IsRegister()) {
3193 __ fill_b(kSimd128ScratchReg, i.InputRegister(1));
3194 __ sra_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3196 } else {
3197 __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3198 i.InputInt3(1));
3199 }
3200 break;
3201 }
3202 case kMips64I8x16Add: {
3203 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3204 __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3205 i.InputSimd128Register(1));
3206 break;
3207 }
3208 case kMips64I8x16AddSatS: {
3209 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3210 __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3211 i.InputSimd128Register(1));
3212 break;
3213 }
3214 case kMips64I8x16Sub: {
3215 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3216 __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3217 i.InputSimd128Register(1));
3218 break;
3219 }
3220 case kMips64I8x16SubSatS: {
3221 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3222 __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3223 i.InputSimd128Register(1));
3224 break;
3225 }
3226 case kMips64I8x16MaxS: {
3227 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3228 __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3229 i.InputSimd128Register(1));
3230 break;
3231 }
3232 case kMips64I8x16MinS: {
3233 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3234 __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3235 i.InputSimd128Register(1));
3236 break;
3237 }
3238 case kMips64I8x16Eq: {
3239 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3240 __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3241 i.InputSimd128Register(1));
3242 break;
3243 }
3244 case kMips64I8x16Ne: {
3245 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3246 Simd128Register dst = i.OutputSimd128Register();
3247 __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
3248 __ nor_v(dst, dst, dst);
3249 break;
3250 }
3251 case kMips64I8x16GtS: {
3252 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3253 __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3254 i.InputSimd128Register(0));
3255 break;
3256 }
3257 case kMips64I8x16GeS: {
3258 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3259 __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3260 i.InputSimd128Register(0));
3261 break;
3262 }
3263 case kMips64I8x16ShrU: {
3264 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3265 if (instr->InputAt(1)->IsRegister()) {
3266 __ fill_b(kSimd128ScratchReg, i.InputRegister(1));
3267 __ srl_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3269 } else {
3270 __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3271 i.InputInt3(1));
3272 }
3273 break;
3274 }
3275 case kMips64I8x16AddSatU: {
3276 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3277 __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3278 i.InputSimd128Register(1));
3279 break;
3280 }
3281 case kMips64I8x16SubSatU: {
3282 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3283 __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3284 i.InputSimd128Register(1));
3285 break;
3286 }
3287 case kMips64I8x16MaxU: {
3288 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3289 __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3290 i.InputSimd128Register(1));
3291 break;
3292 }
3293 case kMips64I8x16MinU: {
3294 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3295 __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3296 i.InputSimd128Register(1));
3297 break;
3298 }
3299 case kMips64I8x16GtU: {
3300 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3301 __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3302 i.InputSimd128Register(0));
3303 break;
3304 }
3305 case kMips64I8x16GeU: {
3306 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3307 __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3308 i.InputSimd128Register(0));
3309 break;
3310 }
3311 case kMips64I8x16RoundingAverageU: {
3312 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3313 __ aver_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3314 i.InputSimd128Register(0));
3315 break;
3316 }
3317 case kMips64I8x16Abs: {
3318 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3320 __ asub_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3322 break;
3323 }
3324 case kMips64I8x16Popcnt: {
3325 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3326 __ pcnt_b(i.OutputSimd128Register(), i.InputSimd128Register(0));
3327 break;
3328 }
3329 case kMips64I8x16BitMask: {
3330 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3331 Register dst = i.OutputRegister();
3332 Simd128Register src = i.InputSimd128Register(0);
3335 __ srli_b(scratch0, src, 7);
3336 __ srli_h(scratch1, scratch0, 7);
3337 __ or_v(scratch0, scratch0, scratch1);
3338 __ srli_w(scratch1, scratch0, 14);
3339 __ or_v(scratch0, scratch0, scratch1);
3340 __ srli_d(scratch1, scratch0, 28);
3341 __ or_v(scratch0, scratch0, scratch1);
3342 __ shf_w(scratch1, scratch0, 0x0E);
3343 __ ilvev_b(scratch0, scratch1, scratch0);
3344 __ copy_u_h(dst, scratch0, 0);
3345 break;
3346 }
3347 case kMips64S128And: {
3348 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3349 __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
3350 i.InputSimd128Register(1));
3351 break;
3352 }
3353 case kMips64S128Or: {
3354 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3355 __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
3356 i.InputSimd128Register(1));
3357 break;
3358 }
3359 case kMips64S128Xor: {
3360 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3361 __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
3362 i.InputSimd128Register(1));
3363 break;
3364 }
3365 case kMips64S128Not: {
3366 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3367 __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
3368 i.InputSimd128Register(0));
3369 break;
3370 }
3371 case kMips64V128AnyTrue: {
3372 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3373 Register dst = i.OutputRegister();
3374 Label all_false;
3375 __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
3376 i.InputSimd128Register(0), USE_DELAY_SLOT);
3377 __ li(dst, 0l); // branch delay slot
3378 __ li(dst, 1);
3379 __ bind(&all_false);
3380 break;
3381 }
3382 case kMips64I64x2AllTrue: {
3383 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3384 Register dst = i.OutputRegister();
3385 Label all_true;
3386 __ BranchMSA(&all_true, MSA_BRANCH_D, all_not_zero,
3387 i.InputSimd128Register(0), USE_DELAY_SLOT);
3388 __ li(dst, 1); // branch delay slot
3389 __ li(dst, 0l);
3390 __ bind(&all_true);
3391 break;
3392 }
3393 case kMips64I32x4AllTrue: {
3394 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3395 Register dst = i.OutputRegister();
3396 Label all_true;
3397 __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
3398 i.InputSimd128Register(0), USE_DELAY_SLOT);
3399 __ li(dst, 1); // branch delay slot
3400 __ li(dst, 0l);
3401 __ bind(&all_true);
3402 break;
3403 }
3404 case kMips64I16x8AllTrue: {
3405 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3406 Register dst = i.OutputRegister();
3407 Label all_true;
3408 __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
3409 i.InputSimd128Register(0), USE_DELAY_SLOT);
3410 __ li(dst, 1); // branch delay slot
3411 __ li(dst, 0l);
3412 __ bind(&all_true);
3413 break;
3414 }
3415 case kMips64I8x16AllTrue: {
3416 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3417 Register dst = i.OutputRegister();
3418 Label all_true;
3419 __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
3420 i.InputSimd128Register(0), USE_DELAY_SLOT);
3421 __ li(dst, 1); // branch delay slot
3422 __ li(dst, 0l);
3423 __ bind(&all_true);
3424 break;
3425 }
3426 case kMips64MsaLd: {
3427 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3428 __ ld_b(i.OutputSimd128Register(), i.MemoryOperand());
3429 break;
3430 }
3431 case kMips64MsaSt: {
3432 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3433 __ st_b(i.InputSimd128Register(2), i.MemoryOperand());
3434 break;
3435 }
3436 case kMips64S32x4InterleaveRight: {
3437 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3438 Simd128Register dst = i.OutputSimd128Register(),
3439 src0 = i.InputSimd128Register(0),
3440 src1 = i.InputSimd128Register(1);
3441 // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3442 // dst = [5, 1, 4, 0]
3443 __ ilvr_w(dst, src1, src0);
3444 break;
3445 }
3446 case kMips64S32x4InterleaveLeft: {
3447 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3448 Simd128Register dst = i.OutputSimd128Register(),
3449 src0 = i.InputSimd128Register(0),
3450 src1 = i.InputSimd128Register(1);
3451 // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3452 // dst = [7, 3, 6, 2]
3453 __ ilvl_w(dst, src1, src0);
3454 break;
3455 }
3456 case kMips64S32x4PackEven: {
3457 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3458 Simd128Register dst = i.OutputSimd128Register(),
3459 src0 = i.InputSimd128Register(0),
3460 src1 = i.InputSimd128Register(1);
3461 // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3462 // dst = [6, 4, 2, 0]
3463 __ pckev_w(dst, src1, src0);
3464 break;
3465 }
3466 case kMips64S32x4PackOdd: {
3467 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3468 Simd128Register dst = i.OutputSimd128Register(),
3469 src0 = i.InputSimd128Register(0),
3470 src1 = i.InputSimd128Register(1);
3471 // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3472 // dst = [7, 5, 3, 1]
3473 __ pckod_w(dst, src1, src0);
3474 break;
3475 }
3476 case kMips64S32x4InterleaveEven: {
3477 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3478 Simd128Register dst = i.OutputSimd128Register(),
3479 src0 = i.InputSimd128Register(0),
3480 src1 = i.InputSimd128Register(1);
3481 // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3482 // dst = [6, 2, 4, 0]
3483 __ ilvev_w(dst, src1, src0);
3484 break;
3485 }
3486 case kMips64S32x4InterleaveOdd: {
3487 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3488 Simd128Register dst = i.OutputSimd128Register(),
3489 src0 = i.InputSimd128Register(0),
3490 src1 = i.InputSimd128Register(1);
3491 // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3492 // dst = [7, 3, 5, 1]
3493 __ ilvod_w(dst, src1, src0);
3494 break;
3495 }
3496 case kMips64S32x4Shuffle: {
3497 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3498 Simd128Register dst = i.OutputSimd128Register(),
3499 src0 = i.InputSimd128Register(0),
3500 src1 = i.InputSimd128Register(1);
3501
3502 int32_t shuffle = i.InputInt32(2);
3503
3504 if (src0 == src1) {
3505 // Unary S32x4 shuffles are handled with shf.w instruction
3506 unsigned lane = shuffle & 0xFF;
3507 if (v8_flags.debug_code) {
3508 // range of all four lanes, for unary instruction,
3509 // should belong to the same range, which can be one of these:
3510 // [0, 3] or [4, 7]
3511 if (lane >= 4) {
3512 int32_t shuffle_helper = shuffle;
3513 for (int i = 0; i < 4; ++i) {
3514 lane = shuffle_helper & 0xFF;
3515 CHECK_GE(lane, 4);
3516 shuffle_helper >>= 8;
3517 }
3518 }
3519 }
3520 uint32_t i8 = 0;
3521 for (int i = 0; i < 4; i++) {
3522 lane = shuffle & 0xFF;
3523 if (lane >= 4) {
3524 lane -= 4;
3525 }
3526 DCHECK_GT(4, lane);
3527 i8 |= lane << (2 * i);
3528 shuffle >>= 8;
3529 }
3530 __ shf_w(dst, src0, i8);
3531 } else {
3532 // For binary shuffles use vshf.w instruction
3533 if (dst == src0) {
3534 __ move_v(kSimd128ScratchReg, src0);
3535 src0 = kSimd128ScratchReg;
3536 } else if (dst == src1) {
3537 __ move_v(kSimd128ScratchReg, src1);
3538 src1 = kSimd128ScratchReg;
3539 }
3540
3541 __ li(kScratchReg, i.InputInt32(2));
3542 __ insert_w(dst, 0, kScratchReg);
3544 __ ilvr_b(dst, kSimd128RegZero, dst);
3545 __ ilvr_h(dst, kSimd128RegZero, dst);
3546 __ vshf_w(dst, src1, src0);
3547 }
3548 break;
3549 }
3550 case kMips64S16x8InterleaveRight: {
3551 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3552 Simd128Register dst = i.OutputSimd128Register(),
3553 src0 = i.InputSimd128Register(0),
3554 src1 = i.InputSimd128Register(1);
3555 // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3556 // dst = [11, 3, 10, 2, 9, 1, 8, 0]
3557 __ ilvr_h(dst, src1, src0);
3558 break;
3559 }
3560 case kMips64S16x8InterleaveLeft: {
3561 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3562 Simd128Register dst = i.OutputSimd128Register(),
3563 src0 = i.InputSimd128Register(0),
3564 src1 = i.InputSimd128Register(1);
3565 // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3566 // dst = [15, 7, 14, 6, 13, 5, 12, 4]
3567 __ ilvl_h(dst, src1, src0);
3568 break;
3569 }
3570 case kMips64S16x8PackEven: {
3571 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3572 Simd128Register dst = i.OutputSimd128Register(),
3573 src0 = i.InputSimd128Register(0),
3574 src1 = i.InputSimd128Register(1);
3575 // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3576 // dst = [14, 12, 10, 8, 6, 4, 2, 0]
3577 __ pckev_h(dst, src1, src0);
3578 break;
3579 }
3580 case kMips64S16x8PackOdd: {
3581 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3582 Simd128Register dst = i.OutputSimd128Register(),
3583 src0 = i.InputSimd128Register(0),
3584 src1 = i.InputSimd128Register(1);
3585 // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3586 // dst = [15, 13, 11, 9, 7, 5, 3, 1]
3587 __ pckod_h(dst, src1, src0);
3588 break;
3589 }
3590 case kMips64S16x8InterleaveEven: {
3591 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3592 Simd128Register dst = i.OutputSimd128Register(),
3593 src0 = i.InputSimd128Register(0),
3594 src1 = i.InputSimd128Register(1);
3595 // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3596 // dst = [14, 6, 12, 4, 10, 2, 8, 0]
3597 __ ilvev_h(dst, src1, src0);
3598 break;
3599 }
3600 case kMips64S16x8InterleaveOdd: {
3601 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3602 Simd128Register dst = i.OutputSimd128Register(),
3603 src0 = i.InputSimd128Register(0),
3604 src1 = i.InputSimd128Register(1);
3605 // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3606 // dst = [15, 7, ... 11, 3, 9, 1]
3607 __ ilvod_h(dst, src1, src0);
3608 break;
3609 }
3610 case kMips64S16x4Reverse: {
3611 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3612 // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3]
3613 // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
3614 __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
3615 break;
3616 }
3617 case kMips64S16x2Reverse: {
3618 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3619 // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1]
3620 // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
3621 __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
3622 break;
3623 }
3624 case kMips64S8x16InterleaveRight: {
3625 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3626 Simd128Register dst = i.OutputSimd128Register(),
3627 src0 = i.InputSimd128Register(0),
3628 src1 = i.InputSimd128Register(1);
3629 // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3630 // dst = [23, 7, ... 17, 1, 16, 0]
3631 __ ilvr_b(dst, src1, src0);
3632 break;
3633 }
3634 case kMips64S8x16InterleaveLeft: {
3635 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3636 Simd128Register dst = i.OutputSimd128Register(),
3637 src0 = i.InputSimd128Register(0),
3638 src1 = i.InputSimd128Register(1);
3639 // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3640 // dst = [31, 15, ... 25, 9, 24, 8]
3641 __ ilvl_b(dst, src1, src0);
3642 break;
3643 }
3644 case kMips64S8x16PackEven: {
3645 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3646 Simd128Register dst = i.OutputSimd128Register(),
3647 src0 = i.InputSimd128Register(0),
3648 src1 = i.InputSimd128Register(1);
3649 // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3650 // dst = [30, 28, ... 6, 4, 2, 0]
3651 __ pckev_b(dst, src1, src0);
3652 break;
3653 }
3654 case kMips64S8x16PackOdd: {
3655 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3656 Simd128Register dst = i.OutputSimd128Register(),
3657 src0 = i.InputSimd128Register(0),
3658 src1 = i.InputSimd128Register(1);
3659 // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3660 // dst = [31, 29, ... 7, 5, 3, 1]
3661 __ pckod_b(dst, src1, src0);
3662 break;
3663 }
3664 case kMips64S8x16InterleaveEven: {
3665 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3666 Simd128Register dst = i.OutputSimd128Register(),
3667 src0 = i.InputSimd128Register(0),
3668 src1 = i.InputSimd128Register(1);
3669 // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3670 // dst = [30, 14, ... 18, 2, 16, 0]
3671 __ ilvev_b(dst, src1, src0);
3672 break;
3673 }
3674 case kMips64S8x16InterleaveOdd: {
3675 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3676 Simd128Register dst = i.OutputSimd128Register(),
3677 src0 = i.InputSimd128Register(0),
3678 src1 = i.InputSimd128Register(1);
3679 // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3680 // dst = [31, 15, ... 19, 3, 17, 1]
3681 __ ilvod_b(dst, src1, src0);
3682 break;
3683 }
3684 case kMips64S8x16Concat: {
3685 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3686 Simd128Register dst = i.OutputSimd128Register();
3687 DCHECK(dst == i.InputSimd128Register(0));
3688 __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
3689 break;
3690 }
3691 case kMips64I8x16Shuffle: {
3692 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3693 Simd128Register dst = i.OutputSimd128Register(),
3694 src0 = i.InputSimd128Register(0),
3695 src1 = i.InputSimd128Register(1);
3696
3697 if (dst == src0) {
3698 __ move_v(kSimd128ScratchReg, src0);
3699 src0 = kSimd128ScratchReg;
3700 } else if (dst == src1) {
3701 __ move_v(kSimd128ScratchReg, src1);
3702 src1 = kSimd128ScratchReg;
3703 }
3704
3705 int64_t control_low =
3706 static_cast<int64_t>(i.InputInt32(3)) << 32 | i.InputInt32(2);
3707 int64_t control_hi =
3708 static_cast<int64_t>(i.InputInt32(5)) << 32 | i.InputInt32(4);
3709 __ li(kScratchReg, control_low);
3710 __ insert_d(dst, 0, kScratchReg);
3711 __ li(kScratchReg, control_hi);
3712 __ insert_d(dst, 1, kScratchReg);
3713 __ vshf_b(dst, src1, src0);
3714 break;
3715 }
3716 case kMips64I8x16Swizzle: {
3717 Simd128Register dst = i.OutputSimd128Register(),
3718 tbl = i.InputSimd128Register(0),
3719 ctl = i.InputSimd128Register(1);
3720 DCHECK(dst != ctl && dst != tbl);
3721 Simd128Register zeroReg = i.TempSimd128Register(0);
3722 __ xor_v(zeroReg, zeroReg, zeroReg);
3723 __ move_v(dst, ctl);
3724 __ vshf_b(dst, zeroReg, tbl);
3725 break;
3726 }
3727 case kMips64S8x8Reverse: {
3728 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3729 // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
3730 // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7]
3731 // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1
3732 // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B
3733 __ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1);
3734 __ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B);
3735 break;
3736 }
3737 case kMips64S8x4Reverse: {
3738 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3739 // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3]
3740 // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
3741 __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
3742 break;
3743 }
3744 case kMips64S8x2Reverse: {
3745 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3746 // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1]
3747 // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
3748 __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
3749 break;
3750 }
3751 case kMips64I32x4SConvertI16x8Low: {
3752 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3753 Simd128Register dst = i.OutputSimd128Register();
3754 Simd128Register src = i.InputSimd128Register(0);
3755 __ ilvr_h(kSimd128ScratchReg, src, src);
3756 __ slli_w(dst, kSimd128ScratchReg, 16);
3757 __ srai_w(dst, dst, 16);
3758 break;
3759 }
3760 case kMips64I32x4SConvertI16x8High: {
3761 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3762 Simd128Register dst = i.OutputSimd128Register();
3763 Simd128Register src = i.InputSimd128Register(0);
3764 __ ilvl_h(kSimd128ScratchReg, src, src);
3765 __ slli_w(dst, kSimd128ScratchReg, 16);
3766 __ srai_w(dst, dst, 16);
3767 break;
3768 }
3769 case kMips64I32x4UConvertI16x8Low: {
3770 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3772 __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
3773 i.InputSimd128Register(0));
3774 break;
3775 }
3776 case kMips64I32x4UConvertI16x8High: {
3777 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3779 __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
3780 i.InputSimd128Register(0));
3781 break;
3782 }
3783 case kMips64I16x8SConvertI8x16Low: {
3784 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3785 Simd128Register dst = i.OutputSimd128Register();
3786 Simd128Register src = i.InputSimd128Register(0);
3787 __ ilvr_b(kSimd128ScratchReg, src, src);
3788 __ slli_h(dst, kSimd128ScratchReg, 8);
3789 __ srai_h(dst, dst, 8);
3790 break;
3791 }
3792 case kMips64I16x8SConvertI8x16High: {
3793 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3794 Simd128Register dst = i.OutputSimd128Register();
3795 Simd128Register src = i.InputSimd128Register(0);
3796 __ ilvl_b(kSimd128ScratchReg, src, src);
3797 __ slli_h(dst, kSimd128ScratchReg, 8);
3798 __ srai_h(dst, dst, 8);
3799 break;
3800 }
3801 case kMips64I16x8SConvertI32x4: {
3802 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3803 Simd128Register dst = i.OutputSimd128Register();
3804 Simd128Register src0 = i.InputSimd128Register(0);
3805 Simd128Register src1 = i.InputSimd128Register(1);
3806 __ sat_s_w(kSimd128ScratchReg, src0, 15);
3807 __ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
3808 __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
3809 break;
3810 }
3811 case kMips64I16x8UConvertI32x4: {
3812 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3813 Simd128Register dst = i.OutputSimd128Register();
3814 Simd128Register src0 = i.InputSimd128Register(0);
3815 Simd128Register src1 = i.InputSimd128Register(1);
3817 __ max_s_w(kSimd128ScratchReg, kSimd128RegZero, src0);
3819 __ max_s_w(dst, kSimd128RegZero, src1);
3820 __ sat_u_w(dst, dst, 15);
3821 __ pckev_h(dst, dst, kSimd128ScratchReg);
3822 break;
3823 }
3824 case kMips64I16x8UConvertI8x16Low: {
3825 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3827 __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
3828 i.InputSimd128Register(0));
3829 break;
3830 }
3831 case kMips64I16x8UConvertI8x16High: {
3832 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3834 __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
3835 i.InputSimd128Register(0));
3836 break;
3837 }
3838 case kMips64I8x16SConvertI16x8: {
3839 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3840 Simd128Register dst = i.OutputSimd128Register();
3841 Simd128Register src0 = i.InputSimd128Register(0);
3842 Simd128Register src1 = i.InputSimd128Register(1);
3843 __ sat_s_h(kSimd128ScratchReg, src0, 7);
3844 __ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
3845 __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
3846 break;
3847 }
3848 case kMips64I8x16UConvertI16x8: {
3849 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
3850 Simd128Register dst = i.OutputSimd128Register();
3851 Simd128Register src0 = i.InputSimd128Register(0);
3852 Simd128Register src1 = i.InputSimd128Register(1);
3854 __ max_s_h(kSimd128ScratchReg, kSimd128RegZero, src0);
3856 __ max_s_h(dst, kSimd128RegZero, src1);
3857 __ sat_u_h(dst, dst, 7);
3858 __ pckev_b(dst, dst, kSimd128ScratchReg);
3859 break;
3860 }
3861 }
3862 return kSuccess;
3863}
3864
3865#define UNSUPPORTED_COND(opcode, condition) \
3866 StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
3867 << "\""; \
3868 UNIMPLEMENTED();
3869
3870void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
3871 Instruction* instr, FlagsCondition condition,
3872 Label* tlabel, Label* flabel, bool fallthru) {
3873#undef __
3874#define __ masm->
3875 MipsOperandConverter i(gen, instr);
3876
3877 // MIPS does not have condition code flags, so compare and branch are
3878 // implemented differently than on the other arch's. The compare operations
3879 // emit mips pseudo-instructions, which are handled here by branch
3880 // instructions that do the actual comparison. Essential that the input
3881 // registers to compare pseudo-op are not modified before this branch op, as
3882 // they are tested here.
3883
3884 if (instr->arch_opcode() == kMips64Tst) {
3885 Condition cc = FlagsConditionToConditionTst(condition);
3886 __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg));
3887 } else if (instr->arch_opcode() == kMips64Dadd ||
3888 instr->arch_opcode() == kMips64Dsub) {
3889 Condition cc = FlagsConditionToConditionOvf(condition);
3890 __ dsra32(kScratchReg, i.OutputRegister(), 0);
3891 __ sra(kScratchReg2, i.OutputRegister(), 31);
3892 __ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg));
3893 } else if (instr->arch_opcode() == kMips64DaddOvf ||
3894 instr->arch_opcode() == kMips64DsubOvf) {
3895 switch (condition) {
3896 // Overflow occurs if overflow register is negative
3897 case kOverflow:
3898 __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg));
3899 break;
3900 case kNotOverflow:
3901 __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg));
3902 break;
3903 default:
3905 }
3906 } else if (instr->arch_opcode() == kMips64MulOvf ||
3907 instr->arch_opcode() == kMips64DMulOvf) {
3908 // Overflow occurs if overflow register is not zero
3909 switch (condition) {
3910 case kOverflow:
3911 __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg));
3912 break;
3913 case kNotOverflow:
3914 __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg));
3915 break;
3916 default:
3918 }
3919 } else if (instr->arch_opcode() == kMips64Cmp) {
3920 Condition cc = FlagsConditionToConditionCmp(condition);
3921 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
3922 } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
3923 Condition cc = FlagsConditionToConditionCmp(condition);
3924 DCHECK((cc == ls) || (cc == hi));
3925 if (cc == ls) {
3926 __ xori(i.TempRegister(0), i.TempRegister(0), 1);
3927 }
3928 __ Branch(tlabel, ne, i.TempRegister(0), Operand(zero_reg));
3929 } else if (instr->arch_opcode() == kMips64CmpS ||
3930 instr->arch_opcode() == kMips64CmpD) {
3931 bool predicate;
3932 FlagsConditionToConditionCmpFPU(&predicate, condition);
3933 if (predicate) {
3934 __ BranchTrueF(tlabel);
3935 } else {
3936 __ BranchFalseF(tlabel);
3937 }
3938 } else {
3939 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
3940 instr->arch_opcode());
3941 UNIMPLEMENTED();
3942 }
3943 if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
3944#undef __
3945#define __ masm()->
3946}
3947
3948// Assembles branches after an instruction.
3949void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
3950 Label* tlabel = branch->true_label;
3951 Label* flabel = branch->false_label;
3952
3953 AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
3954 branch->fallthru);
3955}
3956
3957#undef UNSUPPORTED_COND
3958
3960 BranchInfo* branch) {
3961 AssembleArchBranch(instr, branch);
3962}
3963
3965 RpoNumber target) {
3966 __ Branch(GetLabel(target));
3967}
3968
3969#if V8_ENABLE_WEBASSEMBLY
3970void CodeGenerator::AssembleArchTrap(Instruction* instr,
3972 class OutOfLineTrap final : public OutOfLineCode {
3973 public:
3974 OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
3975 : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
3976 void Generate() final {
3977 MipsOperandConverter i(gen_, instr_);
3978 TrapId trap_id =
3979 static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
3980 GenerateCallToTrap(trap_id);
3981 }
3982
3983 private:
3984 void GenerateCallToTrap(TrapId trap_id) {
3985 gen_->AssembleSourcePosition(instr_);
3986 // A direct call to a wasm runtime stub defined in this module.
3987 // Just encode the stub index. This will be patched when the code
3988 // is added to the native module and copied into wasm code space.
3989 __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
3990 ReferenceMap* reference_map =
3991 gen_->zone()->New<ReferenceMap>(gen_->zone());
3992 gen_->RecordSafepoint(reference_map);
3993 if (v8_flags.debug_code) {
3994 __ stop();
3995 }
3996 }
3997 Instruction* instr_;
3998 CodeGenerator* gen_;
3999 };
4000 auto ool = zone()->New<OutOfLineTrap>(this, instr);
4001 Label* tlabel = ool->entry();
4002 AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
4003}
4004#endif // V8_ENABLE_WEBASSEMBLY
4005
4006// Assembles boolean materializations after an instruction.
4009 MipsOperandConverter i(this, instr);
4010
4011 // Materialize a full 32-bit 1 or 0 value. The result register is always the
4012 // last output of the instruction.
4013 DCHECK_NE(0u, instr->OutputCount());
4014 Register result = i.OutputRegister(instr->OutputCount() - 1);
4015 // MIPS does not have condition code flags, so compare and branch are
4016 // implemented differently than on the other arch's. The compare operations
4017 // emit mips pseudo-instructions, which are checked and handled here.
4018
4019 if (instr->arch_opcode() == kMips64Tst) {
4020 Condition cc = FlagsConditionToConditionTst(condition);
4021 if (cc == eq) {
4022 __ Sltu(result, kScratchReg, 1);
4023 } else {
4024 __ Sltu(result, zero_reg, kScratchReg);
4025 }
4026 return;
4027 } else if (instr->arch_opcode() == kMips64Dadd ||
4028 instr->arch_opcode() == kMips64Dsub) {
4029 Condition cc = FlagsConditionToConditionOvf(condition);
4030 // Check for overflow creates 1 or 0 for result.
4031 __ dsrl32(kScratchReg, i.OutputRegister(), 31);
4032 __ srl(kScratchReg2, i.OutputRegister(), 31);
4034 if (cc == eq) // Toggle result for not overflow.
4035 __ xori(result, result, 1);
4036 return;
4037 } else if (instr->arch_opcode() == kMips64DaddOvf ||
4038 instr->arch_opcode() == kMips64DsubOvf) {
4039 // Overflow occurs if overflow register is negative
4040 __ slt(result, kScratchReg, zero_reg);
4041 } else if (instr->arch_opcode() == kMips64MulOvf ||
4042 instr->arch_opcode() == kMips64DMulOvf) {
4043 // Overflow occurs if overflow register is not zero
4044 __ Sgtu(result, kScratchReg, zero_reg);
4045 } else if (instr->arch_opcode() == kMips64Cmp) {
4046 Condition cc = FlagsConditionToConditionCmp(condition);
4047 __ CompareWord(cc, result, i.InputRegister(0), i.InputOperand(1));
4048 return;
4049 } else if (instr->arch_opcode() == kMips64CmpD ||
4050 instr->arch_opcode() == kMips64CmpS) {
4051 FPURegister left = i.InputOrZeroDoubleRegister(0);
4052 FPURegister right = i.InputOrZeroDoubleRegister(1);
4053 if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
4054 !__ IsDoubleZeroRegSet()) {
4055 __ Move(kDoubleRegZero, 0.0);
4056 }
4057 bool predicate;
4058 FlagsConditionToConditionCmpFPU(&predicate, condition);
4059 if (kArchVariant != kMips64r6) {
4060 __ li(result, Operand(1));
4061 if (predicate) {
4062 __ Movf(result, zero_reg);
4063 } else {
4064 __ Movt(result, zero_reg);
4065 }
4066 } else {
4067 if (instr->arch_opcode() == kMips64CmpD) {
4068 __ dmfc1(result, kDoubleCompareReg);
4069 } else {
4070 DCHECK_EQ(kMips64CmpS, instr->arch_opcode());
4072 }
4073 if (predicate) {
4074 __ And(result, result, 1); // cmp returns all 1's/0's, use only LSB.
4075 } else {
4076 __ Addu(result, result, 1); // Toggle result for not equal.
4077 }
4078 }
4079 return;
4080 } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
4081 Condition cc = FlagsConditionToConditionCmp(condition);
4082 DCHECK((cc == ls) || (cc == hi));
4083 if (cc == ls) {
4084 __ xori(i.OutputRegister(), i.TempRegister(0), 1);
4085 }
4086 return;
4087 } else {
4088 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
4089 instr->arch_opcode());
4090 TRACE("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__,
4091 __LINE__);
4092 UNIMPLEMENTED();
4093 }
4094}
4095
4097 UNREACHABLE();
4098}
4099
4101 BranchInfo* branch) {
4102 UNREACHABLE();
4103}
4104
4106 MipsOperandConverter i(this, instr);
4107 Register input = i.InputRegister(0);
4108 std::vector<std::pair<int32_t, Label*>> cases;
4109 for (size_t index = 2; index < instr->InputCount(); index += 2) {
4110 cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
4111 }
4112
4113 UseScratchRegisterScope temps(masm());
4114 Register scratch = temps.Acquire();
4115 // The input register may contains dirty data in upper 32 bits, explicitly
4116 // sign-extend it here.
4117 __ sll(scratch, input, 0);
4118 AssembleArchBinarySearchSwitchRange(scratch, i.InputRpo(1), cases.data(),
4119 cases.data() + cases.size());
4120}
4121
4123 MipsOperandConverter i(this, instr);
4124 Register input = i.InputRegister(0);
4125 size_t const case_count = instr->InputCount() - 2;
4126
4127 UseScratchRegisterScope temps(masm());
4128 Register scratch = temps.Acquire();
4129 // The input register may contains dirty data in upper 32 bits, explicitly
4130 // sign-extend it here.
4131 __ sll(scratch, input, 0);
4132 __ Branch(GetLabel(i.InputRpo(1)), hs, scratch, Operand(case_count));
4133 __ GenerateSwitchTable(scratch, case_count, [&i, this](size_t index) {
4134 return GetLabel(i.InputRpo(index + 2));
4135 });
4136}
4137
4140 UNIMPLEMENTED();
4141}
4142
4143void CodeGenerator::FinishFrame(Frame* frame) {
4144 auto call_descriptor = linkage()->GetIncomingDescriptor();
4145
4146 const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4147 if (!saves_fpu.is_empty()) {
4148 int count = saves_fpu.Count();
4150 frame->AllocateSavedCalleeRegisterSlots(count *
4152 }
4153
4154 const RegList saves = call_descriptor->CalleeSavedRegisters();
4155 if (!saves.is_empty()) {
4156 int count = saves.Count();
4157 frame->AllocateSavedCalleeRegisterSlots(count);
4158 }
4159}
4160
4162 auto call_descriptor = linkage()->GetIncomingDescriptor();
4163
4164 if (frame_access_state()->has_frame()) {
4165 if (call_descriptor->IsCFunctionCall()) {
4166#if V8_ENABLE_WEBASSEMBLY
4167 if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
4168 __ StubPrologue(StackFrame::C_WASM_ENTRY);
4169 // Reserve stack space for saving the c_entry_fp later.
4170 __ Dsubu(sp, sp, Operand(kSystemPointerSize));
4171#else
4172 // For balance.
4173 if (false) {
4174#endif // V8_ENABLE_WEBASSEMBLY
4175 } else {
4176 __ Push(ra, fp);
4177 __ mov(fp, sp);
4178 }
4179 } else if (call_descriptor->IsJSFunctionCall()) {
4180 __ Prologue();
4181 } else {
4182 __ StubPrologue(info()->GetOutputStackFrameType());
4183#if V8_ENABLE_WEBASSEMBLY
4184 if (call_descriptor->IsAnyWasmFunctionCall() ||
4185 call_descriptor->IsWasmImportWrapper() ||
4186 call_descriptor->IsWasmCapiFunction()) {
4187 // For import wrappers and C-API functions, this stack slot is only used
4188 // for printing stack traces in V8. Also, it holds a WasmImportData
4189 // instead of the trusted instance data, which is taken care of in the
4190 // frames accessors.
4192 }
4193 if (call_descriptor->IsWasmCapiFunction()) {
4194 // Reserve space for saving the PC later.
4195 __ Dsubu(sp, sp, Operand(kSystemPointerSize));
4196 }
4197#endif // V8_ENABLE_WEBASSEMBLY
4198 }
4199 }
4200
4201 int required_slots =
4202 frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
4203
4204 if (info()->is_osr()) {
4205 // TurboFan OSR-compiled functions cannot be entered directly.
4206 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
4207
4208 // Unoptimized code jumps directly to this entrypoint while the unoptimized
4209 // frame is still on the stack. Optimized code uses OSR values directly from
4210 // the unoptimized frame. Thus, all that needs to be done is to allocate the
4211 // remaining stack slots.
4212 __ RecordComment("-- OSR entrypoint --");
4214 required_slots -= osr_helper()->UnoptimizedFrameSlots();
4215 }
4216
4217 const RegList saves = call_descriptor->CalleeSavedRegisters();
4218 const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4219
4220 if (required_slots > 0) {
4221 DCHECK(frame_access_state()->has_frame());
4222#if V8_ENABLE_WEBASSEMBLY
4223 if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
4224 // For WebAssembly functions with big frames we have to do the stack
4225 // overflow check before we construct the frame. Otherwise we may not
4226 // have enough space on the stack to call the runtime for the stack
4227 // overflow.
4228 Label done;
4229
4230 // If the frame is bigger than the stack, we throw the stack overflow
4231 // exception unconditionally. Thereby we can avoid the integer overflow
4232 // check in the condition code.
4233 if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
4234 __ LoadStackLimit(kScratchReg,
4235 MacroAssembler::StackLimitKind::kRealStackLimit);
4236 __ Daddu(kScratchReg, kScratchReg,
4237 Operand(required_slots * kSystemPointerSize));
4238 __ Branch(&done, uge, sp, Operand(kScratchReg));
4239 }
4240
4241 __ Call(static_cast<intptr_t>(Builtin::kWasmStackOverflow),
4243 // The call does not return, hence we can ignore any references and just
4244 // define an empty safepoint.
4245 ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
4246 RecordSafepoint(reference_map);
4247 if (v8_flags.debug_code) __ stop();
4248
4249 __ bind(&done);
4250 }
4251#endif // V8_ENABLE_WEBASSEMBLY
4252 }
4253
4254 const int returns = frame()->GetReturnSlotCount();
4255
4256 // Skip callee-saved and return slots, which are pushed below.
4257 required_slots -= saves.Count();
4258 required_slots -= saves_fpu.Count();
4259 required_slots -= returns;
4260 if (required_slots > 0) {
4261 __ Dsubu(sp, sp, Operand(required_slots * kSystemPointerSize));
4262 }
4263
4264 if (!saves_fpu.is_empty()) {
4265 // Save callee-saved FPU registers.
4266 __ MultiPushFPU(saves_fpu);
4267 DCHECK_EQ(kNumCalleeSavedFPU, saves_fpu.Count());
4268 }
4269
4270 if (!saves.is_empty()) {
4271 // Save callee-saved registers.
4272 __ MultiPush(saves);
4273 }
4274
4275 if (returns != 0) {
4276 // Create space for returns.
4277 __ Dsubu(sp, sp, Operand(returns * kSystemPointerSize));
4278 }
4279
4280 for (int spill_slot : frame()->tagged_slots()) {
4281 FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot);
4282 DCHECK(offset.from_frame_pointer());
4283 __ Sd(zero_reg, MemOperand(fp, offset.offset()));
4284 }
4285}
4286
4287void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
4288 auto call_descriptor = linkage()->GetIncomingDescriptor();
4289
4290 const int returns = frame()->GetReturnSlotCount();
4291 if (returns != 0) {
4292 __ Daddu(sp, sp, Operand(returns * kSystemPointerSize));
4293 }
4294
4295 // Restore GP registers.
4296 const RegList saves = call_descriptor->CalleeSavedRegisters();
4297 if (!saves.is_empty()) {
4298 __ MultiPop(saves);
4299 }
4300
4301 // Restore FPU registers.
4302 const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4303 if (!saves_fpu.is_empty()) {
4304 __ MultiPopFPU(saves_fpu);
4305 }
4306
4307 MipsOperandConverter g(this, nullptr);
4308
4309 const int parameter_slots =
4310 static_cast<int>(call_descriptor->ParameterSlotCount());
4311
4312 // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
4313 // Check RawMachineAssembler::PopAndReturn.
4314 if (parameter_slots != 0) {
4315 if (additional_pop_count->IsImmediate()) {
4316 DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
4317 } else if (v8_flags.debug_code) {
4318 __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
4319 g.ToRegister(additional_pop_count),
4320 Operand(static_cast<int64_t>(0)));
4321 }
4322 }
4323
4324 // Functions with JS linkage have at least one parameter (the receiver).
4325 // If {parameter_slots} == 0, it means it is a builtin with
4326 // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
4327 // itself.
4328 const bool drop_jsargs = frame_access_state()->has_frame() &&
4329 call_descriptor->IsJSFunctionCall() &&
4330 parameter_slots != 0;
4331
4332 if (call_descriptor->IsCFunctionCall()) {
4334 } else if (frame_access_state()->has_frame()) {
4335 // Canonicalize JSFunction return sites for now unless they have an variable
4336 // number of stack slot pops.
4337 if (additional_pop_count->IsImmediate() &&
4338 g.ToConstant(additional_pop_count).ToInt32() == 0) {
4339 if (return_label_.is_bound()) {
4340 __ Branch(&return_label_);
4341 return;
4342 } else {
4343 __ bind(&return_label_);
4344 }
4345 }
4346 if (drop_jsargs) {
4347 // Get the actual argument count
4349 }
4351 }
4352 if (drop_jsargs) {
4353 // We must pop all arguments from the stack (including the receiver). This
4354 // number of arguments is given by max(1 + argc_reg, parameter_slots).
4355 if (parameter_slots > 1) {
4356 __ li(kScratchReg, parameter_slots);
4357 __ slt(kScratchReg2, t0, kScratchReg);
4358 __ movn(t0, kScratchReg, kScratchReg2);
4359 }
4360 __ Dlsa(sp, sp, t0, kSystemPointerSizeLog2);
4361 } else if (additional_pop_count->IsImmediate()) {
4362 int additional_count = g.ToConstant(additional_pop_count).ToInt32();
4363 __ Drop(parameter_slots + additional_count);
4364 } else {
4365 Register pop_reg = g.ToRegister(additional_pop_count);
4366 __ Drop(parameter_slots);
4367 __ Dlsa(sp, sp, pop_reg, kSystemPointerSizeLog2);
4368 }
4369 __ Ret();
4370}
4371
4373
4375 ZoneDeque<DeoptimizationExit*>* exits) {}
4376
4377AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
4378 auto rep = LocationOperand::cast(source)->representation();
4379 int new_slots = ElementSizeInPointers(rep);
4380 MipsOperandConverter g(this, nullptr);
4381 int last_frame_slot_id =
4382 frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
4383 int sp_delta = frame_access_state_->sp_delta();
4384 int slot_id = last_frame_slot_id + sp_delta + new_slots;
4385 AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
4386 if (source->IsRegister()) {
4387 __ Push(g.ToRegister(source));
4388 frame_access_state()->IncreaseSPDelta(new_slots);
4389 } else if (source->IsStackSlot()) {
4390 UseScratchRegisterScope temps(masm());
4391 Register scratch = temps.Acquire();
4392 __ Ld(scratch, g.ToMemOperand(source));
4393 __ Push(scratch);
4394 frame_access_state()->IncreaseSPDelta(new_slots);
4395 } else {
4396 // No push instruction for this operand type. Bump the stack pointer and
4397 // assemble the move.
4398 __ Dsubu(sp, sp, Operand(new_slots * kSystemPointerSize));
4399 frame_access_state()->IncreaseSPDelta(new_slots);
4400 AssembleMove(source, &stack_slot);
4401 }
4402 temp_slots_ += new_slots;
4403 return stack_slot;
4404}
4405
4406void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
4407 MipsOperandConverter g(this, nullptr);
4408 int dropped_slots = ElementSizeInPointers(rep);
4409 if (dest->IsRegister()) {
4410 frame_access_state()->IncreaseSPDelta(-dropped_slots);
4411 __ Pop(g.ToRegister(dest));
4412 } else if (dest->IsStackSlot()) {
4413 frame_access_state()->IncreaseSPDelta(-dropped_slots);
4414 UseScratchRegisterScope temps(masm());
4415 Register scratch = temps.Acquire();
4416 __ Pop(scratch);
4417 __ Sd(scratch, g.ToMemOperand(dest));
4418 } else {
4419 int last_frame_slot_id =
4420 frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
4421 int sp_delta = frame_access_state_->sp_delta();
4422 int slot_id = last_frame_slot_id + sp_delta;
4423 AllocatedOperand stack_slot(LocationOperand::STACK_SLOT, rep, slot_id);
4424 AssembleMove(&stack_slot, dest);
4425 frame_access_state()->IncreaseSPDelta(-dropped_slots);
4426 __ Daddu(sp, sp, Operand(dropped_slots * kSystemPointerSize));
4427 }
4428 temp_slots_ -= dropped_slots;
4429}
4430
4432 if (temp_slots_ > 0) {
4434 __ Daddu(sp, sp, Operand(temp_slots_ * kSystemPointerSize));
4435 temp_slots_ = 0;
4436 }
4437}
4438
4439void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
4441 // Must be kept in sync with {MoveTempLocationTo}.
4442 DCHECK(!source->IsImmediate());
4443 move_cycle_.temps.emplace(masm());
4444 auto& temps = *move_cycle_.temps;
4445 // Temporarily exclude the reserved scratch registers while we pick one to
4446 // resolve the move cycle. Re-include them immediately afterwards as they
4447 // might be needed for the move to the temp location.
4448 temps.Exclude(move_cycle_.scratch_regs);
4449 if (!IsFloatingPoint(rep)) {
4450 if (temps.hasAvailable()) {
4451 Register scratch = move_cycle_.temps->Acquire();
4452 move_cycle_.scratch_reg.emplace(scratch);
4453 }
4454 }
4455
4456 temps.Include(move_cycle_.scratch_regs);
4457
4458 if (move_cycle_.scratch_reg.has_value()) {
4459 // A scratch register is available for this rep.
4460 // auto& scratch_reg = *move_cycle_.scratch_reg;
4461 AllocatedOperand scratch(LocationOperand::REGISTER, rep,
4462 move_cycle_.scratch_reg->code());
4463 AssembleMove(source, &scratch);
4464 } else {
4465 // The scratch registers are blocked by pending moves. Use the stack
4466 // instead.
4467 Push(source);
4468 }
4469}
4470
4471void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
4473 if (move_cycle_.scratch_reg.has_value()) {
4474 // auto& scratch_reg = *move_cycle_.scratch_reg;
4475 AllocatedOperand scratch(LocationOperand::REGISTER, rep,
4476 move_cycle_.scratch_reg->code());
4477 AssembleMove(&scratch, dest);
4478 } else {
4479 Pop(dest, rep);
4480 }
4481 // Restore the default state to release the {UseScratchRegisterScope} and to
4482 // prepare for the next cycle.
4483 move_cycle_ = MoveCycleState();
4484}
4485
4486void CodeGenerator::SetPendingMove(MoveOperands* move) {
4487 InstructionOperand* src = &move->source();
4488 InstructionOperand* dst = &move->destination();
4489 UseScratchRegisterScope temps(masm());
4490 if (src->IsConstant() && dst->IsFPLocationOperand()) {
4491 Register temp = temps.Acquire();
4493 } else if (src->IsAnyStackSlot() || dst->IsAnyStackSlot()) {
4494 MipsOperandConverter g(this, nullptr);
4495 bool src_need_scratch = false;
4496 bool dst_need_scratch = false;
4497 if (src->IsAnyStackSlot()) {
4498 MemOperand src_mem = g.ToMemOperand(src);
4499 src_need_scratch =
4500 (!is_int16(src_mem.offset())) || (((src_mem.offset() & 0b111) != 0) &&
4501 !is_int16(src_mem.offset() + 4));
4502 }
4503 if (dst->IsAnyStackSlot()) {
4504 MemOperand dst_mem = g.ToMemOperand(dst);
4505 dst_need_scratch =
4506 (!is_int16(dst_mem.offset())) || (((dst_mem.offset() & 0b111) != 0) &&
4507 !is_int16(dst_mem.offset() + 4));
4508 }
4509 if (src_need_scratch || dst_need_scratch) {
4510 Register temp = temps.Acquire();
4512 }
4513 }
4514}
4515
4516namespace {
4517
4518bool Is32BitOperand(InstructionOperand* operand) {
4519 DCHECK(operand->IsStackSlot() || operand->IsRegister());
4521 return mr == MachineRepresentation::kWord32 ||
4524}
4525
4526// When we need only 32 bits, move only 32 bits, otherwise the destination
4527// register' upper 32 bits may contain dirty data.
4528bool Use32BitMove(InstructionOperand* source, InstructionOperand* destination) {
4529 return Is32BitOperand(source) && Is32BitOperand(destination);
4530}
4531
4532} // namespace
4533
4534void CodeGenerator::AssembleMove(InstructionOperand* source,
4535 InstructionOperand* destination) {
4536 MipsOperandConverter g(this, nullptr);
4537 // Dispatch on the source and destination operand kinds. Not all
4538 // combinations are possible.
4539 if (source->IsRegister()) {
4540 DCHECK(destination->IsRegister() || destination->IsStackSlot());
4541 Register src = g.ToRegister(source);
4542 if (destination->IsRegister()) {
4543 __ mov(g.ToRegister(destination), src);
4544 } else {
4545 __ Sd(src, g.ToMemOperand(destination));
4546 }
4547 } else if (source->IsStackSlot()) {
4548 DCHECK(destination->IsRegister() || destination->IsStackSlot());
4549 MemOperand src = g.ToMemOperand(source);
4550 if (destination->IsRegister()) {
4551 if (Use32BitMove(source, destination)) {
4552 __ Lw(g.ToRegister(destination), src);
4553 } else {
4554 __ Ld(g.ToRegister(destination), src);
4555 }
4556 } else {
4557 Register temp = kScratchReg;
4558 __ Ld(temp, src);
4559 __ Sd(temp, g.ToMemOperand(destination));
4560 }
4561 } else if (source->IsConstant()) {
4562 Constant src = g.ToConstant(source);
4563 if (destination->IsRegister() || destination->IsStackSlot()) {
4564 Register dst =
4565 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
4566 switch (src.type()) {
4567 case Constant::kInt32:
4568 __ li(dst, Operand(src.ToInt32(), src.rmode()));
4569 break;
4570 case Constant::kFloat32:
4571 __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
4572 break;
4573 case Constant::kInt64:
4574 __ li(dst, Operand(src.ToInt64(), src.rmode()));
4575 break;
4576 case Constant::kFloat64:
4577 __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
4578 break;
4580 __ li(dst, src.ToExternalReference());
4581 break;
4582 case Constant::kHeapObject: {
4583 Handle<HeapObject> src_object = src.ToHeapObject();
4585 if (IsMaterializableFromRoot(src_object, &index)) {
4586 __ LoadRoot(dst, index);
4587 } else {
4588 __ li(dst, src_object);
4589 }
4590 break;
4591 }
4593 UNREACHABLE();
4595 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
4596 }
4597 if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
4598 } else if (src.type() == Constant::kFloat32) {
4599 if (destination->IsFPStackSlot()) {
4600 MemOperand dst = g.ToMemOperand(destination);
4601 if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) {
4602 __ Sd(zero_reg, dst);
4603 } else {
4604 __ li(kScratchReg, Operand(base::bit_cast<int32_t>(src.ToFloat32())));
4605 __ Sd(kScratchReg, dst);
4606 }
4607 } else {
4608 DCHECK(destination->IsFPRegister());
4609 FloatRegister dst = g.ToSingleRegister(destination);
4610 __ Move(dst, src.ToFloat32());
4611 }
4612 } else {
4613 DCHECK_EQ(Constant::kFloat64, src.type());
4614 DoubleRegister dst = destination->IsFPRegister()
4615 ? g.ToDoubleRegister(destination)
4617 __ Move(dst, src.ToFloat64().value());
4618 if (destination->IsFPStackSlot()) {
4619 __ Sdc1(dst, g.ToMemOperand(destination));
4620 }
4621 }
4622 } else if (source->IsFPRegister()) {
4625 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
4626 MSARegister src = g.ToSimd128Register(source);
4627 if (destination->IsSimd128Register()) {
4628 MSARegister dst = g.ToSimd128Register(destination);
4629 __ move_v(dst, src);
4630 } else {
4631 DCHECK(destination->IsSimd128StackSlot());
4632 __ st_b(src, g.ToMemOperand(destination));
4633 }
4634 } else {
4635 FPURegister src = g.ToDoubleRegister(source);
4636 if (destination->IsFPRegister()) {
4637 FPURegister dst = g.ToDoubleRegister(destination);
4638 __ Move(dst, src);
4639 } else {
4640 DCHECK(destination->IsFPStackSlot());
4641 __ Sdc1(src, g.ToMemOperand(destination));
4642 }
4643 }
4644 } else if (source->IsFPStackSlot()) {
4645 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
4646 MemOperand src = g.ToMemOperand(source);
4649 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
4650 if (destination->IsSimd128Register()) {
4651 __ ld_b(g.ToSimd128Register(destination), src);
4652 } else {
4653 DCHECK(destination->IsSimd128StackSlot());
4654 MSARegister temp = kSimd128ScratchReg;
4655 __ ld_b(temp, src);
4656 __ st_b(temp, g.ToMemOperand(destination));
4657 }
4658 } else {
4659 if (destination->IsFPRegister()) {
4660 __ Ldc1(g.ToDoubleRegister(destination), src);
4661 } else {
4662 DCHECK(destination->IsFPStackSlot());
4663 FPURegister temp = kScratchDoubleReg;
4664 __ Ldc1(temp, src);
4665 __ Sdc1(temp, g.ToMemOperand(destination));
4666 }
4667 }
4668 } else {
4669 UNREACHABLE();
4670 }
4671}
4672
4673void CodeGenerator::AssembleSwap(InstructionOperand* source,
4674 InstructionOperand* destination) {
4675 MipsOperandConverter g(this, nullptr);
4676 // Dispatch on the source and destination operand kinds. Not all
4677 // combinations are possible.
4678 if (source->IsRegister()) {
4679 // Register-register.
4680 Register temp = kScratchReg;
4681 Register src = g.ToRegister(source);
4682 if (destination->IsRegister()) {
4683 Register dst = g.ToRegister(destination);
4684 __ Move(temp, src);
4685 __ Move(src, dst);
4686 __ Move(dst, temp);
4687 } else {
4688 DCHECK(destination->IsStackSlot());
4689 MemOperand dst = g.ToMemOperand(destination);
4690 __ mov(temp, src);
4691 __ Ld(src, dst);
4692 __ Sd(temp, dst);
4693 }
4694 } else if (source->IsStackSlot()) {
4695 DCHECK(destination->IsStackSlot());
4696 Register temp_0 = kScratchReg;
4697 Register temp_1 = kScratchReg2;
4698 MemOperand src = g.ToMemOperand(source);
4699 MemOperand dst = g.ToMemOperand(destination);
4700 __ Ld(temp_0, src);
4701 __ Ld(temp_1, dst);
4702 __ Sd(temp_0, dst);
4703 __ Sd(temp_1, src);
4704 } else if (source->IsFPRegister()) {
4707 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
4708 MSARegister temp = kSimd128ScratchReg;
4709 MSARegister src = g.ToSimd128Register(source);
4710 if (destination->IsSimd128Register()) {
4711 MSARegister dst = g.ToSimd128Register(destination);
4712 __ move_v(temp, src);
4713 __ move_v(src, dst);
4714 __ move_v(dst, temp);
4715 } else {
4716 DCHECK(destination->IsSimd128StackSlot());
4717 MemOperand dst = g.ToMemOperand(destination);
4718 __ move_v(temp, src);
4719 __ ld_b(src, dst);
4720 __ st_b(temp, dst);
4721 }
4722 } else {
4723 FPURegister temp = kScratchDoubleReg;
4724 FPURegister src = g.ToDoubleRegister(source);
4725 if (destination->IsFPRegister()) {
4726 FPURegister dst = g.ToDoubleRegister(destination);
4727 __ Move(temp, src);
4728 __ Move(src, dst);
4729 __ Move(dst, temp);
4730 } else {
4731 DCHECK(destination->IsFPStackSlot());
4732 MemOperand dst = g.ToMemOperand(destination);
4733 __ Move(temp, src);
4734 __ Ldc1(src, dst);
4735 __ Sdc1(temp, dst);
4736 }
4737 }
4738 } else if (source->IsFPStackSlot()) {
4739 DCHECK(destination->IsFPStackSlot());
4740 Register temp_0 = kScratchReg;
4741 MemOperand src0 = g.ToMemOperand(source);
4742 MemOperand src1(src0.rm(), src0.offset() + kInt64Size);
4743 MemOperand dst0 = g.ToMemOperand(destination);
4744 MemOperand dst1(dst0.rm(), dst0.offset() + kInt64Size);
4747 CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
4748 MSARegister temp_1 = kSimd128ScratchReg;
4749 __ ld_b(temp_1, dst0); // Save destination in temp_1.
4750 __ Ld(temp_0, src0); // Then use temp_0 to copy source to destination.
4751 __ Sd(temp_0, dst0);
4752 __ Ld(temp_0, src1);
4753 __ Sd(temp_0, dst1);
4754 __ st_b(temp_1, src0);
4755 } else {
4756 FPURegister temp_1 = kScratchDoubleReg;
4757 __ Ldc1(temp_1, dst0); // Save destination in temp_1.
4758 __ Ld(temp_0, src0); // Then use temp_0 to copy source to destination.
4759 __ Sdc1(temp_1, src0);
4760 __ Sd(temp_0, dst0);
4761 }
4762 } else {
4763 // No other combinations are possible.
4764 UNREACHABLE();
4765 }
4766}
4767
4768void CodeGenerator::AssembleJumpTable(base::Vector<Label*> targets) {
4769 // On 64-bit MIPS we emit the jump tables inline.
4770 UNREACHABLE();
4771}
4772
4773#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
4774#undef ASSEMBLE_ATOMIC_STORE_INTEGER
4775#undef ASSEMBLE_ATOMIC_BINOP
4776#undef ASSEMBLE_ATOMIC_BINOP_EXT
4777#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
4778#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
4779#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
4780#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
4781#undef ASSEMBLE_IEEE754_BINOP
4782#undef ASSEMBLE_IEEE754_UNOP
4783#undef ASSEMBLE_F64X2_ARITHMETIC_BINOP
4784
4785#undef TRACE
4786#undef __
4787
4788} // namespace compiler
4789} // namespace internal
4790} // namespace v8
friend Zone
Definition asm-types.cc:195
#define Assert(condition)
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr bool IsBuiltinId(Builtin builtin)
Definition builtins.h:128
static constexpr int kFixedSlotCountAboveFp
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static Operand EmbeddedNumber(double number)
constexpr void set(RegisterT reg)
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
T * New(Args &&... args)
Definition zone.h:114
void MoveToTempLocation(InstructionOperand *src, MachineRepresentation rep) final
void AssembleTailCallAfterGap(Instruction *instr, int first_unused_stack_slot)
void AssembleReturn(InstructionOperand *pop)
void AssembleTailCallBeforeGap(Instruction *instr, int first_unused_stack_slot)
FrameAccessState * frame_access_state() const
CodeGenResult AssembleArchInstruction(Instruction *instr)
DeoptimizationExit * BuildTranslation(Instruction *instr, int pc_offset, size_t frame_state_offset, size_t immediate_args_count, OutputFrameStateCombine state_combine)
void AssembleArchBinarySearchSwitch(Instruction *instr)
void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target)
void AssembleArchBoolean(Instruction *instr, FlagsCondition condition)
void AssembleJumpTable(base::Vector< Label * > targets)
void AssembleArchBranch(Instruction *instr, BranchInfo *branch)
void AssembleMove(InstructionOperand *source, InstructionOperand *destination) final
void SetPendingMove(MoveOperands *move) final
bool ShouldApplyOffsetToStackCheck(Instruction *instr, uint32_t *offset)
void RecordSafepoint(ReferenceMap *references, int pc_offset=0)
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block, std::pair< int32_t, Label * > *begin, std::pair< int32_t, Label * > *end)
void PrepareForDeoptimizationExits(ZoneDeque< DeoptimizationExit * > *exits)
void AssembleArchTableSwitch(Instruction *instr)
bool IsMaterializableFromRoot(Handle< HeapObject > object, RootIndex *index_return)
void AssembleArchConditionalBranch(Instruction *instr, BranchInfo *branch)
AllocatedOperand Push(InstructionOperand *src) final
void MoveTempLocationTo(InstructionOperand *dst, MachineRepresentation rep) final
void AssembleArchDeoptBranch(Instruction *instr, BranchInfo *branch)
void RecordCallPosition(Instruction *instr)
void AssembleSwap(InstructionOperand *source, InstructionOperand *destination) final
void AssembleArchConditionalBoolean(Instruction *instr)
void RecordDeoptInfo(Instruction *instr, int pc_offset)
OptimizedCompilationInfo * info() const
void AssembleArchSelect(Instruction *instr, FlagsCondition condition)
void Pop(InstructionOperand *src, MachineRepresentation rep) final
FrameOffset GetFrameOffset(int spill_slot) const
Definition frame.cc:61
DoubleRegister ToDoubleRegister(InstructionOperand *op)
Constant ToConstant(InstructionOperand *op) const
Register ToRegister(InstructionOperand *op) const
const InstructionOperand * OutputAt(size_t i) const
InstructionCode opcode() const
const InstructionOperand * InputAt(size_t i) const
CallDescriptor * GetIncomingDescriptor() const
Definition linkage.h:405
MachineRepresentation representation() const
static LocationOperand * cast(InstructionOperand *op)
FloatRegister ToSingleRegister(InstructionOperand *op)
MemOperand ToMemOperand(InstructionOperand *op) const
MipsOperandConverter(CodeGenerator *gen, Instruction *instr)
static OutputFrameStateCombine Ignore()
#define ATOMIC_BINOP_CASE(op, inst)
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)
bool must_save_lr_
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, cmp_reg)
Zone * zone_
#define ASSEMBLE_IEEE754_UNOP(name)
Register const object_
#define ASSEMBLE_IEEE754_BINOP(name)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr)
Register const value_
#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op)
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, order)
RecordWriteMode const mode_
Register const scratch1_
Register const scratch0_
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
#define UNSUPPORTED_COND(opcode, condition)
#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( load_linked, store_conditional, sign_extend, size, representation)
Register const index_
#define V8_JS_LINKAGE_INCLUDES_DISPATCH_HANDLE_BOOL
Definition globals.h:161
@ kMips64r6
static const ArchVariants kArchVariant
int32_t offset
Instruction * instr
ZoneVector< RpoNumber > & result
#define TRACE(...)
Builtin builtin
LiftoffRegister reg
int pc_offset
SetIsolateDataSlots
InstructionOperand destination
v8::SourceLocation SourceLocation
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
uintptr_t Address
Definition memory.h:13
static bool HasRegisterInput(Instruction *instr, size_t index)
void AssembleBranchToLabels(CodeGenerator *gen, MacroAssembler *masm, Instruction *instr, FlagsCondition condition, Label *tlabel, Label *flabel, bool fallthru)
void Or(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void Xor(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
void And(LiftoffAssembler *lasm, Register dst, Register lhs, Register rhs)
constexpr Register no_reg
constexpr Register kRootRegister
constexpr VFPRoundingMode kRoundToNearest
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr VFPRoundingMode kRoundToMinusInf
constexpr int kInt64Size
Definition globals.h:402
constexpr DoubleRegister kDoubleCompareReg
V8_EXPORT_PRIVATE constexpr int ElementSizeInPointers(MachineRepresentation rep)
DwVfpRegister DoubleRegister
const uint32_t kFCSRInvalidOpCauseMask
void PrintF(const char *format,...)
Definition utils.cc:39
constexpr Simd128Register kSimd128RegZero
constexpr DoubleRegister kScratchDoubleReg
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr FPUControlRegister FCSR
constexpr Register kScratchReg2
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
constexpr Register kScratchReg
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
QwNeonRegister Simd128Register
MemOperand FieldMemOperand(Register object, int offset)
constexpr VFPRoundingMode kRoundToPlusInf
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr bool IsFloatingPoint(MachineRepresentation rep)
constexpr Simd128Register kSimd128ScratchReg
constexpr Register kReturnRegister0
const uint32_t kFCSROverflowCauseMask
constexpr Register kWasmImplicitArgRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJavaScriptCallCodeStartRegister
constexpr VFPRoundingMode kRoundToZero
return value
Definition map-inl.h:893
constexpr Register cp
const int kNumCalleeSavedFPU
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallDispatchHandleRegister
static int FrameSlotToFPOffset(int slot)
SwVfpRegister FloatRegister
BodyGen *const gen_
BodyGen * gen
ro::BitSet tagged_slots
#define CHECK_GE(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NOT_NULL(val)
Definition logging.h:492
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
uint64_t make_uint64(uint32_t high, uint32_t low)
Definition macros.h:365
std::optional< CPURegister > scratch_reg
std::optional< UseScratchRegisterScope > temps