v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
assembler-mips64.cc
Go to the documentation of this file.
1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2012 the V8 project authors. All rights reserved.
34
36
37#if V8_TARGET_ARCH_MIPS64
38
39#include "src/base/cpu.h"
46
47namespace v8 {
48namespace internal {
49
50// Get the CPU features enabled by the build. For cross compilation the
51// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
52// can be defined to enable FPU instructions when building the
53// snapshot.
54static unsigned CpuFeaturesImpliedByCompiler() {
55 unsigned answer = 0;
56#ifdef CAN_USE_FPU_INSTRUCTIONS
57 answer |= 1u << FPU;
58#endif // def CAN_USE_FPU_INSTRUCTIONS
59
60 // If the compiler is allowed to use FPU then we can use FPU too in our code
61 // generation even when generating snapshots. This won't work for cross
62 // compilation.
63#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
64 answer |= 1u << FPU;
65#endif
66
67 return answer;
68}
69
71 // TODO(mips64): enable wasm simd after turboshaft isel supports simd
72 // instructions.
73 return false;
74}
75
76void CpuFeatures::ProbeImpl(bool cross_compile) {
78
79 // Only use statically determined features for cross compile (snapshot).
80 if (cross_compile) return;
81
82 // If the compiler is allowed to use fpu then we can use fpu too in our
83 // code generation.
84#ifndef __mips__
85 // For the simulator build, use FPU.
86 supported_ |= 1u << FPU;
87#if defined(_MIPS_ARCH_MIPS64R6) && defined(_MIPS_MSA)
88 supported_ |= 1u << MIPS_SIMD;
89#endif
90#else
91 // Probe for additional features at runtime.
92 base::CPU cpu;
93 if (cpu.has_fpu()) supported_ |= 1u << FPU;
94#if defined(_MIPS_MSA)
95 supported_ |= 1u << MIPS_SIMD;
96#else
97 if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
98#endif
99#endif
100
101 // Set a static value on whether Simd is supported.
102 // This variable is only used for certain archs to query SupportWasmSimd128()
103 // at runtime in builtins using an extern ref. Other callers should use
104 // CpuFeatures::SupportWasmSimd128().
106}
107
110
111int ToNumber(Register reg) {
112 DCHECK(reg.is_valid());
113 const int kNumbers[] = {
114 0, // zero_reg
115 1, // at
116 2, // v0
117 3, // v1
118 4, // a0
119 5, // a1
120 6, // a2
121 7, // a3
122 8, // a4
123 9, // a5
124 10, // a6
125 11, // a7
126 12, // t0
127 13, // t1
128 14, // t2
129 15, // t3
130 16, // s0
131 17, // s1
132 18, // s2
133 19, // s3
134 20, // s4
135 21, // s5
136 22, // s6
137 23, // s7
138 24, // t8
139 25, // t9
140 26, // k0
141 27, // k1
142 28, // gp
143 29, // sp
144 30, // fp
145 31, // ra
146 };
147 return kNumbers[reg.code()];
148}
149
150Register ToRegister(int num) {
151 DCHECK(num >= 0 && num < kNumRegisters);
152 const Register kRegisters[] = {
153 zero_reg, at, v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3,
154 s0, s1, s2, s3, s4, s5, s6, s7, t8, t9, k0, k1, gp, sp, fp, ra};
155 return kRegisters[num];
156}
157
158// -----------------------------------------------------------------------------
159// Implementation of RelocInfo.
160
161const int RelocInfo::kApplyMask =
164
166 // The deserializer needs to know whether a pointer is specially coded. Being
167 // specially coded on MIPS means that it is a lui/ori instruction, and that is
168 // always the case inside code objects.
169 return true;
170}
171
172bool RelocInfo::IsInConstantPool() { return false; }
173
174uint32_t RelocInfo::wasm_call_tag() const {
176 return static_cast<uint32_t>(
178}
179
180// -----------------------------------------------------------------------------
181// Implementation of Operand and MemOperand.
182// See assembler-mips-inl.h for inlined constructors.
183
184Operand::Operand(Handle<HeapObject> handle)
185 : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) {
186 value_.immediate = static_cast<intptr_t>(handle.address());
187}
188
189Operand Operand::EmbeddedNumber(double value) {
190 int32_t smi;
191 if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
192 Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
193 result.is_heap_number_request_ = true;
194 result.value_.heap_number_request = HeapNumberRequest(value);
195 return result;
196}
197
198MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
199 offset_ = offset;
200}
201
202MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
203 OffsetAddend offset_addend)
204 : Operand(rm) {
205 offset_ = unit * multiplier + offset_addend;
206}
207
208void Assembler::AllocateAndInstallRequestedHeapNumbers(LocalIsolate* isolate) {
209 DCHECK_IMPLIES(isolate == nullptr, heap_number_requests_.empty());
210 for (auto& request : heap_number_requests_) {
211 Handle<HeapObject> object;
212 object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
213 request.heap_number());
214 Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
215 set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
216 }
217}
218
219// -----------------------------------------------------------------------------
220// Specific instructions, constants, and masks.
221
222// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
223// operations as post-increment of sp.
224const Instr kPopInstruction = DADDIU | (sp.code() << kRsShift) |
225 (sp.code() << kRtShift) |
227// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
228const Instr kPushInstruction = DADDIU | (sp.code() << kRsShift) |
229 (sp.code() << kRtShift) |
231// Sd(r, MemOperand(sp, 0))
232const Instr kPushRegPattern = SD | (sp.code() << kRsShift) | (0 & kImm16Mask);
233// Ld(r, MemOperand(sp, 0))
234const Instr kPopRegPattern = LD | (sp.code() << kRsShift) | (0 & kImm16Mask);
235
237 LW | (fp.code() << kRsShift) | (0 & kImm16Mask);
238
240 SW | (fp.code() << kRsShift) | (0 & kImm16Mask);
241
243 LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
244
246 SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
247// A mask for the Rt register for push, pop, lw, sw instructions.
249const Instr kLwSwInstrTypeMask = 0xFFE00000;
250const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
252
253Assembler::Assembler(const AssemblerOptions& options,
254 std::unique_ptr<AssemblerBuffer> buffer)
255 : AssemblerBase(options, std::move(buffer)),
256 scratch_register_list_({at, s0}) {
257 if (CpuFeatures::IsSupported(MIPS_SIMD)) {
258 EnableCpuFeature(MIPS_SIMD);
259 }
260 reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
261
262 last_trampoline_pool_end_ = 0;
263 no_trampoline_pool_before_ = 0;
264 trampoline_pool_blocked_nesting_ = 0;
265 // We leave space (16 * kTrampolineSlotsSize)
266 // for BlockTrampolinePoolScope buffer.
267 next_buffer_check_ = v8_flags.force_long_branches
268 ? kMaxInt
269 : kMaxBranchOffset - kTrampolineSlotsSize * 16;
270 internal_trampoline_exception_ = false;
271 last_bound_pos_ = 0;
272
273 trampoline_emitted_ = v8_flags.force_long_branches;
274 unbound_labels_count_ = 0;
275 block_buffer_growth_ = false;
276}
277
278void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
279 GetCode(isolate->main_thread_local_isolate(), desc);
280}
281void Assembler::GetCode(LocalIsolate* isolate, CodeDesc* desc,
282 SafepointTableBuilderBase* safepoint_table_builder,
283 int handler_table_offset) {
284 // As a crutch to avoid having to add manual Align calls wherever we use a
285 // raw workflow to create InstructionStream objects (mostly in tests), add
286 // another Align call here. It does no harm - the end of the InstructionStream
287 // object is aligned to the (larger) kCodeAlignment anyways.
288 // TODO(jgruber): Consider moving responsibility for proper alignment to
289 // metadata table builders (safepoint, handler, constant pool, code
290 // comments).
291 DataAlign(InstructionStream::kMetadataAlignment);
292
293 EmitForbiddenSlotInstruction();
294
295 int code_comments_size = WriteCodeComments();
296
297 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
298
299 AllocateAndInstallRequestedHeapNumbers(isolate);
300
301 // Set up code descriptor.
302 // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
303 // this point to make CodeDesc initialization less fiddly.
304
305 static constexpr int kConstantPoolSize = 0;
306 static constexpr int kBuiltinJumpTableInfoSize = 0;
307 const int instruction_size = pc_offset();
308 const int builtin_jump_table_info_offset =
309 instruction_size - kBuiltinJumpTableInfoSize;
310 const int code_comments_offset =
311 builtin_jump_table_info_offset - code_comments_size;
312 const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
313 const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
314 ? constant_pool_offset
315 : handler_table_offset;
316 const int safepoint_table_offset =
317 (safepoint_table_builder == kNoSafepointTable)
318 ? handler_table_offset2
319 : safepoint_table_builder->safepoint_table_offset();
320 const int reloc_info_offset =
321 static_cast<int>(reloc_info_writer.pos() - buffer_->start());
322 CodeDesc::Initialize(desc, this, safepoint_table_offset,
323 handler_table_offset2, constant_pool_offset,
324 code_comments_offset, builtin_jump_table_info_offset,
325 reloc_info_offset);
326}
327
328void Assembler::Align(int m) {
329 DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
330 EmitForbiddenSlotInstruction();
331 while ((pc_offset() & (m - 1)) != 0) {
332 nop();
333 }
334}
335
336void Assembler::CodeTargetAlign() {
337 // No advantage to aligning branch/call targets to more than
338 // single instruction, that I am aware of.
339 Align(4);
340}
341
342Register Assembler::GetRtReg(Instr instr) {
343 return Register::from_code((instr & kRtFieldMask) >> kRtShift);
344}
345
346Register Assembler::GetRsReg(Instr instr) {
347 return Register::from_code((instr & kRsFieldMask) >> kRsShift);
348}
349
350Register Assembler::GetRdReg(Instr instr) {
351 return Register::from_code((instr & kRdFieldMask) >> kRdShift);
352}
353
354uint32_t Assembler::GetRt(Instr instr) {
355 return (instr & kRtFieldMask) >> kRtShift;
356}
357
358uint32_t Assembler::GetRtField(Instr instr) { return instr & kRtFieldMask; }
359
360uint32_t Assembler::GetRs(Instr instr) {
361 return (instr & kRsFieldMask) >> kRsShift;
362}
363
364uint32_t Assembler::GetRsField(Instr instr) { return instr & kRsFieldMask; }
365
366uint32_t Assembler::GetRd(Instr instr) {
367 return (instr & kRdFieldMask) >> kRdShift;
368}
369
370uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; }
371
372uint32_t Assembler::GetSa(Instr instr) {
373 return (instr & kSaFieldMask) >> kSaShift;
374}
375
376uint32_t Assembler::GetSaField(Instr instr) { return instr & kSaFieldMask; }
377
378uint32_t Assembler::GetOpcodeField(Instr instr) { return instr & kOpcodeMask; }
379
380uint32_t Assembler::GetFunction(Instr instr) {
381 return (instr & kFunctionFieldMask) >> kFunctionShift;
382}
383
384uint32_t Assembler::GetFunctionField(Instr instr) {
385 return instr & kFunctionFieldMask;
386}
387
388uint32_t Assembler::GetImmediate16(Instr instr) { return instr & kImm16Mask; }
389
390uint32_t Assembler::GetLabelConst(Instr instr) { return instr & ~kImm16Mask; }
391
392bool Assembler::IsPop(Instr instr) {
393 return (instr & ~kRtMask) == kPopRegPattern;
394}
395
396bool Assembler::IsPush(Instr instr) {
397 return (instr & ~kRtMask) == kPushRegPattern;
398}
399
400bool Assembler::IsSwRegFpOffset(Instr instr) {
401 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
402}
403
404bool Assembler::IsLwRegFpOffset(Instr instr) {
405 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
406}
407
408bool Assembler::IsSwRegFpNegOffset(Instr instr) {
409 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
410 kSwRegFpNegOffsetPattern);
411}
412
413bool Assembler::IsLwRegFpNegOffset(Instr instr) {
414 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
415 kLwRegFpNegOffsetPattern);
416}
417
418// Labels refer to positions in the (to be) generated code.
419// There are bound, linked, and unused labels.
420//
421// Bound labels refer to known positions in the already
422// generated code. pos() is the position the label refers to.
423//
424// Linked labels refer to unknown positions in the code
425// to be generated; pos() is the position of the last
426// instruction using the label.
427
428// The link chain is terminated by a value in the instruction of -1,
429// which is an otherwise illegal value (branch -1 is inf loop).
430// The instruction 16-bit offset field addresses 32-bit words, but in
431// code is conv to an 18-bit value addressing bytes, hence the -4 value.
432
433const int kEndOfChain = -4;
434// Determines the end of the Jump chain (a subset of the label link chain).
435const int kEndOfJumpChain = 0;
436
437bool Assembler::IsMsaBranch(Instr instr) {
438 uint32_t opcode = GetOpcodeField(instr);
439 uint32_t rs_field = GetRsField(instr);
440 if (opcode == COP1) {
441 switch (rs_field) {
442 case BZ_V:
443 case BZ_B:
444 case BZ_H:
445 case BZ_W:
446 case BZ_D:
447 case BNZ_V:
448 case BNZ_B:
449 case BNZ_H:
450 case BNZ_W:
451 case BNZ_D:
452 return true;
453 default:
454 return false;
455 }
456 } else {
457 return false;
458 }
459}
460
461bool Assembler::IsBranch(Instr instr) {
462 uint32_t opcode = GetOpcodeField(instr);
463 uint32_t rt_field = GetRtField(instr);
464 uint32_t rs_field = GetRsField(instr);
465 // Checks if the instruction is a branch.
466 bool isBranch =
467 opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
468 opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
469 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
470 rt_field == BLTZAL || rt_field == BGEZAL)) ||
471 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
472 (opcode == COP1 && rs_field == BC1EQZ) ||
473 (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr);
474 if (!isBranch && kArchVariant == kMips64r6) {
475 // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
476 // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
477 isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
478 opcode == BALC ||
479 (opcode == POP66 && rs_field != 0) || // BEQZC
480 (opcode == POP76 && rs_field != 0); // BNEZC
481 }
482 return isBranch;
483}
484
485bool Assembler::IsBc(Instr instr) {
486 uint32_t opcode = GetOpcodeField(instr);
487 // Checks if the instruction is a BC or BALC.
488 return opcode == BC || opcode == BALC;
489}
490
491bool Assembler::IsNal(Instr instr) {
492 uint32_t opcode = GetOpcodeField(instr);
493 uint32_t rt_field = GetRtField(instr);
494 uint32_t rs_field = GetRsField(instr);
495 return opcode == REGIMM && rt_field == BLTZAL && rs_field == 0;
496}
497
498bool Assembler::IsBzc(Instr instr) {
499 uint32_t opcode = GetOpcodeField(instr);
500 // Checks if the instruction is BEQZC or BNEZC.
501 return (opcode == POP66 && GetRsField(instr) != 0) ||
502 (opcode == POP76 && GetRsField(instr) != 0);
503}
504
505bool Assembler::IsEmittedConstant(Instr instr) {
506 uint32_t label_constant = GetLabelConst(instr);
507 return label_constant == 0; // Emitted label const in reg-exp engine.
508}
509
510bool Assembler::IsBeq(Instr instr) { return GetOpcodeField(instr) == BEQ; }
511
512bool Assembler::IsBne(Instr instr) { return GetOpcodeField(instr) == BNE; }
513
514bool Assembler::IsBeqzc(Instr instr) {
515 uint32_t opcode = GetOpcodeField(instr);
516 return opcode == POP66 && GetRsField(instr) != 0;
517}
518
519bool Assembler::IsBnezc(Instr instr) {
520 uint32_t opcode = GetOpcodeField(instr);
521 return opcode == POP76 && GetRsField(instr) != 0;
522}
523
524bool Assembler::IsBeqc(Instr instr) {
525 uint32_t opcode = GetOpcodeField(instr);
526 uint32_t rs = GetRsField(instr);
527 uint32_t rt = GetRtField(instr);
528 return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
529}
530
531bool Assembler::IsBnec(Instr instr) {
532 uint32_t opcode = GetOpcodeField(instr);
533 uint32_t rs = GetRsField(instr);
534 uint32_t rt = GetRtField(instr);
535 return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
536}
537
538bool Assembler::IsMov(Instr instr, Register rd, Register rs) {
539 uint32_t opcode = GetOpcodeField(instr);
540 uint32_t rd_field = GetRd(instr);
541 uint32_t rs_field = GetRs(instr);
542 uint32_t rt_field = GetRt(instr);
543 uint32_t rd_reg = static_cast<uint32_t>(rd.code());
544 uint32_t rs_reg = static_cast<uint32_t>(rs.code());
545 uint32_t function_field = GetFunctionField(instr);
546 // Checks if the instruction is an OR with zero_reg argument (aka MOV).
547 bool res = opcode == SPECIAL && function_field == OR && rd_field == rd_reg &&
548 rs_field == rs_reg && rt_field == 0;
549 return res;
550}
551
552bool Assembler::IsJump(Instr instr) {
553 uint32_t opcode = GetOpcodeField(instr);
554 uint32_t rt_field = GetRtField(instr);
555 uint32_t rd_field = GetRdField(instr);
556 uint32_t function_field = GetFunctionField(instr);
557 // Checks if the instruction is a jump.
558 return opcode == J || opcode == JAL ||
559 (opcode == SPECIAL && rt_field == 0 &&
560 ((function_field == JALR) ||
561 (rd_field == 0 && (function_field == JR))));
562}
563
564bool Assembler::IsJ(Instr instr) {
565 uint32_t opcode = GetOpcodeField(instr);
566 // Checks if the instruction is a jump.
567 return opcode == J;
568}
569
570bool Assembler::IsJal(Instr instr) { return GetOpcodeField(instr) == JAL; }
571
572bool Assembler::IsJr(Instr instr) {
573 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
574}
575
576bool Assembler::IsJalr(Instr instr) {
577 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
578}
579
580bool Assembler::IsLui(Instr instr) {
581 uint32_t opcode = GetOpcodeField(instr);
582 // Checks if the instruction is a load upper immediate.
583 return opcode == LUI;
584}
585
586bool Assembler::IsOri(Instr instr) {
587 uint32_t opcode = GetOpcodeField(instr);
588 // Checks if the instruction is a load upper immediate.
589 return opcode == ORI;
590}
591
592bool Assembler::IsNop(Instr instr, unsigned int type) {
593 // See Assembler::nop(type).
594 DCHECK_LT(type, 32);
595 uint32_t opcode = GetOpcodeField(instr);
596 uint32_t function = GetFunctionField(instr);
597 uint32_t rt = GetRt(instr);
598 uint32_t rd = GetRd(instr);
599 uint32_t sa = GetSa(instr);
600
601 // Traditional mips nop == sll(zero_reg, zero_reg, 0)
602 // When marking non-zero type, use sll(zero_reg, at, type)
603 // to avoid use of mips ssnop and ehb special encodings
604 // of the sll instruction.
605
606 Register nop_rt_reg = (type == 0) ? zero_reg : at;
607 bool ret = (opcode == SPECIAL && function == SLL &&
608 rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
609 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) && sa == type);
610
611 return ret;
612}
613
614int32_t Assembler::GetBranchOffset(Instr instr) {
615 DCHECK(IsBranch(instr));
616 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
617}
618
619bool Assembler::IsLw(Instr instr) {
620 return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
621}
622
623int16_t Assembler::GetLwOffset(Instr instr) {
624 DCHECK(IsLw(instr));
625 return ((instr & kImm16Mask));
626}
627
628Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
629 DCHECK(IsLw(instr));
630
631 // We actually create a new lw instruction based on the original one.
632 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) |
633 (offset & kImm16Mask);
634
635 return temp_instr;
636}
637
638bool Assembler::IsSw(Instr instr) {
639 return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
640}
641
642Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
643 DCHECK(IsSw(instr));
644 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
645}
646
647bool Assembler::IsAddImmediate(Instr instr) {
648 return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
649}
650
651Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
652 DCHECK(IsAddImmediate(instr));
653 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
654}
655
656bool Assembler::IsAndImmediate(Instr instr) {
657 return GetOpcodeField(instr) == ANDI;
658}
659
660static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
661 if (kArchVariant == kMips64r6) {
662 if (Assembler::IsBc(instr)) {
663 return Assembler::OffsetSize::kOffset26;
664 } else if (Assembler::IsBzc(instr)) {
665 return Assembler::OffsetSize::kOffset21;
666 }
667 }
668 return Assembler::OffsetSize::kOffset16;
669}
670
671static inline int32_t AddBranchOffset(int pos, Instr instr) {
672 int bits = OffsetSizeInBits(instr);
673 const int32_t mask = (1 << bits) - 1;
674 bits = 32 - bits;
675
676 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
677 // the compiler uses arithmetic shifts for signed integers.
678 int32_t imm = ((instr & mask) << bits) >> (bits - 2);
679
680 if (imm == kEndOfChain) {
681 // EndOfChain sentinel is returned directly, not relative to pc or pos.
682 return kEndOfChain;
683 } else {
684 return pos + Assembler::kBranchPCOffset + imm;
685 }
686}
687
688int Assembler::target_at(int pos, bool is_internal) {
689 if (is_internal) {
690 int64_t* p = reinterpret_cast<int64_t*>(buffer_start_ + pos);
691 int64_t address = *p;
692 if (address == kEndOfJumpChain) {
693 return kEndOfChain;
694 } else {
695 int64_t instr_address = reinterpret_cast<int64_t>(p);
696 DCHECK(instr_address - address < INT_MAX);
697 int delta = static_cast<int>(instr_address - address);
698 DCHECK(pos > delta);
699 return pos - delta;
700 }
701 }
702 Instr instr = instr_at(pos);
703 if ((instr & ~kImm16Mask) == 0) {
704 // Emitted label constant, not part of a branch.
705 if (instr == 0) {
706 return kEndOfChain;
707 } else {
708 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
709 return (imm18 + pos);
710 }
711 }
712 // Check we have a branch or jump instruction.
713 DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr) ||
714 IsMov(instr, t8, ra));
715 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
716 // the compiler uses arithmetic shifts for signed integers.
717 if (IsBranch(instr)) {
718 return AddBranchOffset(pos, instr);
719 } else if (IsMov(instr, t8, ra)) {
720 int32_t imm32;
721 if (IsAddImmediate(instr_at(pos + kInstrSize))) {
722 Instr instr_daddiu = instr_at(pos + kInstrSize);
723 imm32 = instr_daddiu & static_cast<int32_t>(kImm16Mask);
724 imm32 = (imm32 << 16) >> 16;
725 return imm32;
726 }
727
728 Instr instr_lui = instr_at(pos + 2 * kInstrSize);
729 Instr instr_ori = instr_at(pos + 3 * kInstrSize);
730 DCHECK(IsLui(instr_lui));
731 DCHECK(IsOri(instr_ori));
732 imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
733 imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
734 if (imm32 == kEndOfJumpChain) {
735 // EndOfChain sentinel is returned directly, not relative to pc or pos.
736 return kEndOfChain;
737 }
738 return pos + Assembler::kLongBranchPCOffset + imm32;
739 } else if (IsLui(instr)) {
740 if (IsNal(instr_at(pos + kInstrSize))) {
741 int32_t imm32;
742 Instr instr_lui = instr_at(pos + 0 * kInstrSize);
743 Instr instr_ori = instr_at(pos + 2 * kInstrSize);
744 DCHECK(IsLui(instr_lui));
745 DCHECK(IsOri(instr_ori));
746 imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
747 imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
748 if (imm32 == kEndOfJumpChain) {
749 // EndOfChain sentinel is returned directly, not relative to pc or pos.
750 return kEndOfChain;
751 }
752 return pos + Assembler::kLongBranchPCOffset + imm32;
753 } else {
754 Instr instr_lui = instr_at(pos + 0 * kInstrSize);
755 Instr instr_ori = instr_at(pos + 1 * kInstrSize);
756 Instr instr_ori2 = instr_at(pos + 3 * kInstrSize);
757 DCHECK(IsOri(instr_ori));
758 DCHECK(IsOri(instr_ori2));
759
760 // TODO(plind) create named constants for shift values.
761 int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
762 imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
763 imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
764 // Sign extend address;
765 imm >>= 16;
766
767 if (imm == kEndOfJumpChain) {
768 // EndOfChain sentinel is returned directly, not relative to pc or pos.
769 return kEndOfChain;
770 } else {
771 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_start_ + pos);
772 DCHECK(instr_address - imm < INT_MAX);
773 int delta = static_cast<int>(instr_address - imm);
774 DCHECK(pos > delta);
775 return pos - delta;
776 }
777 }
778 } else {
779 DCHECK(IsJ(instr) || IsJal(instr));
780 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
781 if (imm28 == kEndOfJumpChain) {
782 // EndOfChain sentinel is returned directly, not relative to pc or pos.
783 return kEndOfChain;
784 } else {
785 // Sign extend 28-bit offset.
786 int32_t delta = static_cast<int32_t>((imm28 << 4) >> 4);
787 return pos + delta;
788 }
789 }
790}
791
792static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
793 Instr instr) {
794 int32_t bits = OffsetSizeInBits(instr);
795 int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
796 DCHECK_EQ(imm & 3, 0);
797 imm >>= 2;
798
799 const int32_t mask = (1 << bits) - 1;
800 instr &= ~mask;
801 DCHECK(is_intn(imm, bits));
802
803 return instr | (imm & mask);
804}
805
806void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
807 if (is_internal) {
808 uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
809 *reinterpret_cast<uint64_t*>(buffer_start_ + pos) = imm;
810 return;
811 }
812 Instr instr = instr_at(pos);
813 if ((instr & ~kImm16Mask) == 0) {
814 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
815 // Emitted label constant, not part of a branch.
816 // Make label relative to Code pointer of generated InstructionStream
817 // object.
818 instr_at_put(
819 pos, target_pos + (InstructionStream::kHeaderSize - kHeapObjectTag));
820 return;
821 }
822
823 if (IsBranch(instr)) {
824 instr = SetBranchOffset(pos, target_pos, instr);
825 instr_at_put(pos, instr);
826 } else if (IsLui(instr)) {
827 if (IsNal(instr_at(pos + kInstrSize))) {
828 Instr instr_lui = instr_at(pos + 0 * kInstrSize);
829 Instr instr_ori = instr_at(pos + 2 * kInstrSize);
830 DCHECK(IsLui(instr_lui));
831 DCHECK(IsOri(instr_ori));
832 int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
833 DCHECK_EQ(imm & 3, 0);
834 if (is_int16(imm + Assembler::kLongBranchPCOffset -
835 Assembler::kBranchPCOffset)) {
836 // Optimize by converting to regular branch and link with 16-bit
837 // offset.
838 Instr instr_b = REGIMM | BGEZAL; // Branch and link.
839 instr_b = SetBranchOffset(pos, target_pos, instr_b);
840 // Correct ra register to point to one instruction after jalr from
841 // MacroAssembler::BranchAndLinkLong.
842 Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
843 kOptimizedBranchAndLinkLongReturnOffset;
844
845 instr_at_put(pos, instr_b);
846 instr_at_put(pos + 1 * kInstrSize, instr_a);
847 } else {
848 instr_lui &= ~kImm16Mask;
849 instr_ori &= ~kImm16Mask;
850
851 instr_at_put(pos + 0 * kInstrSize,
852 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
853 instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask));
854 }
855 } else {
856 Instr instr_lui = instr_at(pos + 0 * kInstrSize);
857 Instr instr_ori = instr_at(pos + 1 * kInstrSize);
858 Instr instr_ori2 = instr_at(pos + 3 * kInstrSize);
859 DCHECK(IsOri(instr_ori));
860 DCHECK(IsOri(instr_ori2));
861
862 uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
863 DCHECK_EQ(imm & 3, 0);
864
865 instr_lui &= ~kImm16Mask;
866 instr_ori &= ~kImm16Mask;
867 instr_ori2 &= ~kImm16Mask;
868
869 instr_at_put(pos + 0 * kInstrSize,
870 instr_lui | ((imm >> 32) & kImm16Mask));
871 instr_at_put(pos + 1 * kInstrSize,
872 instr_ori | ((imm >> 16) & kImm16Mask));
873 instr_at_put(pos + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask));
874 }
875 } else if (IsMov(instr, t8, ra)) {
876 if (IsAddImmediate(instr_at(pos + kInstrSize))) {
877 Instr instr_daddiu = instr_at(pos + kInstrSize);
878 int32_t imm_short = target_pos - pos;
879 DCHECK(is_int16(imm_short));
880
881 instr_daddiu &= ~kImm16Mask;
882 instr_at_put(pos + kInstrSize, instr_daddiu | (imm_short & kImm16Mask));
883 return;
884 }
885
886 Instr instr_lui = instr_at(pos + 2 * kInstrSize);
887 Instr instr_ori = instr_at(pos + 3 * kInstrSize);
888 DCHECK(IsLui(instr_lui));
889 DCHECK(IsOri(instr_ori));
890
891 int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset);
892
893 if (is_int16(imm_short)) {
894 // Optimize by converting to regular branch with 16-bit
895 // offset
896 Instr instr_b = BEQ;
897 instr_b = SetBranchOffset(pos, target_pos, instr_b);
898
899 Instr instr_j = instr_at(pos + 5 * kInstrSize);
900 Instr instr_branch_delay;
901
902 if (IsJump(instr_j)) {
903 // Case when branch delay slot is protected.
904 instr_branch_delay = nopInstr;
905 } else {
906 // Case when branch delay slot is used.
907 instr_branch_delay = instr_at(pos + 7 * kInstrSize);
908 }
909 instr_at_put(pos, instr_b);
910 instr_at_put(pos + 1 * kInstrSize, instr_branch_delay);
911 } else {
912 int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
913 DCHECK_EQ(imm & 3, 0);
914
915 instr_lui &= ~kImm16Mask;
916 instr_ori &= ~kImm16Mask;
917
918 instr_at_put(pos + 2 * kInstrSize,
919 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
920 instr_at_put(pos + 3 * kInstrSize, instr_ori | (imm & kImm16Mask));
921 }
922 } else if (IsJ(instr) || IsJal(instr)) {
923 int32_t imm28 = target_pos - pos;
924 DCHECK_EQ(imm28 & 3, 0);
925
926 uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
927 DCHECK(is_uint26(imm26));
928 // Place 26-bit signed offset with markings.
929 // When code is committed it will be resolved to j/jal.
930 int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark;
931 instr_at_put(pos, mark | (imm26 & kImm26Mask));
932 } else {
933 int32_t imm28 = target_pos - pos;
934 DCHECK_EQ(imm28 & 3, 0);
935
936 uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
937 DCHECK(is_uint26(imm26));
938 // Place raw 26-bit signed offset.
939 // When code is committed it will be resolved to j/jal.
940 instr &= ~kImm26Mask;
941 instr_at_put(pos, instr | (imm26 & kImm26Mask));
942 }
943}
944
945void Assembler::print(const Label* L) {
946 if (L->is_unused()) {
947 PrintF("unused label\n");
948 } else if (L->is_bound()) {
949 PrintF("bound label to %d\n", L->pos());
950 } else if (L->is_linked()) {
951 Label l;
952 l.link_to(L->pos());
953 PrintF("unbound label");
954 while (l.is_linked()) {
955 PrintF("@ %d ", l.pos());
956 Instr instr = instr_at(l.pos());
957 if ((instr & ~kImm16Mask) == 0) {
958 PrintF("value\n");
959 } else {
960 PrintF("%d\n", instr);
961 }
962 next(&l, is_internal_reference(&l));
963 }
964 } else {
965 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
966 }
967}
968
969void Assembler::bind_to(Label* L, int pos) {
970 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
971 int trampoline_pos = kInvalidSlotPos;
972 bool is_internal = false;
973 if (L->is_linked() && !trampoline_emitted_) {
974 unbound_labels_count_--;
975 if (!is_internal_reference(L)) {
976 next_buffer_check_ += kTrampolineSlotsSize;
977 }
978 }
979
980 while (L->is_linked()) {
981 int fixup_pos = L->pos();
982 int dist = pos - fixup_pos;
983 is_internal = is_internal_reference(L);
984 next(L, is_internal); // Call next before overwriting link with target at
985 // fixup_pos.
986 Instr instr = instr_at(fixup_pos);
987 if (is_internal) {
988 target_at_put(fixup_pos, pos, is_internal);
989 } else {
990 if (IsBranch(instr)) {
991 int branch_offset = BranchOffset(instr);
992 if (dist > branch_offset) {
993 if (trampoline_pos == kInvalidSlotPos) {
994 trampoline_pos = get_trampoline_entry(fixup_pos);
995 CHECK_NE(trampoline_pos, kInvalidSlotPos);
996 }
997 CHECK((trampoline_pos - fixup_pos) <= branch_offset);
998 target_at_put(fixup_pos, trampoline_pos, false);
999 fixup_pos = trampoline_pos;
1000 }
1001 target_at_put(fixup_pos, pos, false);
1002 } else {
1003 DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
1004 IsEmittedConstant(instr) || IsMov(instr, t8, ra));
1005 target_at_put(fixup_pos, pos, false);
1006 }
1007 }
1008 }
1009 L->bind_to(pos);
1010
1011 // Keep track of the last bound label so we don't eliminate any instructions
1012 // before a bound label.
1013 if (pos > last_bound_pos_) last_bound_pos_ = pos;
1014}
1015
1016void Assembler::bind(Label* L) {
1017 DCHECK(!L->is_bound()); // Label can only be bound once.
1018 bind_to(L, pc_offset());
1019}
1020
1021void Assembler::next(Label* L, bool is_internal) {
1022 DCHECK(L->is_linked());
1023 int link = target_at(L->pos(), is_internal);
1024 if (link == kEndOfChain) {
1025 L->Unuse();
1026 } else {
1027 DCHECK_GE(link, 0);
1028 L->link_to(link);
1029 }
1030}
1031
1032bool Assembler::is_near(Label* L) {
1033 DCHECK(L->is_bound());
1034 return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
1035}
1036
1037bool Assembler::is_near(Label* L, OffsetSize bits) {
1038 if (L == nullptr || !L->is_bound()) return true;
1039 return ((pc_offset() - L->pos()) <
1040 (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
1041}
1042
1043bool Assembler::is_near_branch(Label* L) {
1044 DCHECK(L->is_bound());
1045 return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
1046}
1047
1048int Assembler::BranchOffset(Instr instr) {
1049 // At pre-R6 and for other R6 branches the offset is 16 bits.
1050 int bits = OffsetSize::kOffset16;
1051
1052 if (kArchVariant == kMips64r6) {
1053 uint32_t opcode = GetOpcodeField(instr);
1054 switch (opcode) {
1055 // Checks BC or BALC.
1056 case BC:
1057 case BALC:
1058 bits = OffsetSize::kOffset26;
1059 break;
1060
1061 // Checks BEQZC or BNEZC.
1062 case POP66:
1063 case POP76:
1064 if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
1065 break;
1066 default:
1067 break;
1068 }
1069 }
1070
1071 return (1 << (bits + 2 - 1)) - 1;
1072}
1073
1074// We have to use a temporary register for things that can be relocated even
1075// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
1076// space. There is no guarantee that the relocated location can be similarly
1077// encoded.
1078bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
1079 return !RelocInfo::IsNoInfo(rmode);
1080}
1081
1082void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
1083 Register rd, uint16_t sa,
1084 SecondaryField func) {
1085 DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
1086 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1087 (rd.code() << kRdShift) | (sa << kSaShift) | func;
1088 emit(instr);
1089}
1090
1091void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
1092 uint16_t msb, uint16_t lsb,
1093 SecondaryField func) {
1094 DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1095 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1096 (msb << kRdShift) | (lsb << kSaShift) | func;
1097 emit(instr);
1098}
1099
1100void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt,
1101 FPURegister ft, FPURegister fs, FPURegister fd,
1102 SecondaryField func) {
1103 DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1104 Instr instr = opcode | fmt | (ft.code() << kFtShift) |
1105 (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1106 emit(instr);
1107}
1108
1109void Assembler::GenInstrRegister(Opcode opcode, FPURegister fr, FPURegister ft,
1110 FPURegister fs, FPURegister fd,
1111 SecondaryField func) {
1112 DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1113 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) |
1114 (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1115 emit(instr);
1116}
1117
1118void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
1119 FPURegister fs, FPURegister fd,
1120 SecondaryField func) {
1121 DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1122 Instr instr = opcode | fmt | (rt.code() << kRtShift) |
1123 (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1124 emit(instr);
1125}
1126
1127void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
1128 FPUControlRegister fs, SecondaryField func) {
1129 DCHECK(fs.is_valid() && rt.is_valid());
1130 Instr instr =
1131 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1132 emit(instr);
1133}
1134
1135// Instructions with immediate value.
1136// Registers are in the order of the instruction encoding, from left to right.
1137void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1138 int32_t j,
1139 CompactBranchType is_compact_branch) {
1140 DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1141 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1142 (j & kImm16Mask);
1143 emit(instr, is_compact_branch);
1144}
1145
1146void Assembler::GenInstrImmediate(Opcode opcode, Register base, Register rt,
1147 int32_t offset9, int bit6,
1148 SecondaryField func) {
1149 DCHECK(base.is_valid() && rt.is_valid() && is_int9(offset9) &&
1150 is_uint1(bit6));
1151 Instr instr = opcode | (base.code() << kBaseShift) | (rt.code() << kRtShift) |
1152 ((offset9 << kImm9Shift) & kImm9Mask) | bit6 << kBit6Shift |
1153 func;
1154 emit(instr);
1155}
1156
1157void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1158 int32_t j,
1159 CompactBranchType is_compact_branch) {
1160 DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1161 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
1162 emit(instr, is_compact_branch);
1163}
1164
1165void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1166 int32_t j,
1167 CompactBranchType is_compact_branch) {
1168 DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1169 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) |
1170 (j & kImm16Mask);
1171 emit(instr, is_compact_branch);
1172}
1173
1174void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1175 CompactBranchType is_compact_branch) {
1176 DCHECK(rs.is_valid() && (is_int21(offset21)));
1177 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1178 emit(instr, is_compact_branch);
1179}
1180
1181void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1182 uint32_t offset21) {
1183 DCHECK(rs.is_valid() && (is_uint21(offset21)));
1184 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1185 emit(instr);
1186}
1187
1188void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1189 CompactBranchType is_compact_branch) {
1190 DCHECK(is_int26(offset26));
1191 Instr instr = opcode | (offset26 & kImm26Mask);
1192 emit(instr, is_compact_branch);
1193}
1194
1195void Assembler::GenInstrJump(Opcode opcode, uint32_t address) {
1196 BlockTrampolinePoolScope block_trampoline_pool(this);
1197 DCHECK(is_uint26(address));
1198 Instr instr = opcode | address;
1199 emit(instr);
1200 BlockTrampolinePoolFor(1); // For associated delay slot.
1201}
1202
1203// MSA instructions
1204void Assembler::GenInstrMsaI8(SecondaryField operation, uint32_t imm8,
1205 MSARegister ws, MSARegister wd) {
1206 DCHECK(IsEnabled(MIPS_SIMD));
1207 DCHECK(ws.is_valid() && wd.is_valid() && is_uint8(imm8));
1208 Instr instr = MSA | operation | ((imm8 & kImm8Mask) << kWtShift) |
1209 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1210 emit(instr);
1211}
1212
1213void Assembler::GenInstrMsaI5(SecondaryField operation, SecondaryField df,
1214 int32_t imm5, MSARegister ws, MSARegister wd) {
1215 DCHECK(IsEnabled(MIPS_SIMD));
1216 DCHECK(ws.is_valid() && wd.is_valid());
1217 DCHECK((operation == MAXI_S) || (operation == MINI_S) ||
1218 (operation == CEQI) || (operation == CLTI_S) ||
1219 (operation == CLEI_S)
1220 ? is_int5(imm5)
1221 : is_uint5(imm5));
1222 Instr instr = MSA | operation | df | ((imm5 & kImm5Mask) << kWtShift) |
1223 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1224 emit(instr);
1225}
1226
1227void Assembler::GenInstrMsaBit(SecondaryField operation, SecondaryField df,
1228 uint32_t m, MSARegister ws, MSARegister wd) {
1229 DCHECK(IsEnabled(MIPS_SIMD));
1230 DCHECK(ws.is_valid() && wd.is_valid() && is_valid_msa_df_m(df, m));
1231 Instr instr = MSA | operation | df | (m << kWtShift) |
1232 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1233 emit(instr);
1234}
1235
1236void Assembler::GenInstrMsaI10(SecondaryField operation, SecondaryField df,
1237 int32_t imm10, MSARegister wd) {
1238 DCHECK(IsEnabled(MIPS_SIMD));
1239 DCHECK(wd.is_valid() && is_int10(imm10));
1240 Instr instr = MSA | operation | df | ((imm10 & kImm10Mask) << kWsShift) |
1241 (wd.code() << kWdShift);
1242 emit(instr);
1243}
1244
1245template <typename RegType>
1246void Assembler::GenInstrMsa3R(SecondaryField operation, SecondaryField df,
1247 RegType t, MSARegister ws, MSARegister wd) {
1248 DCHECK(IsEnabled(MIPS_SIMD));
1249 DCHECK(t.is_valid() && ws.is_valid() && wd.is_valid());
1250 Instr instr = MSA | operation | df | (t.code() << kWtShift) |
1251 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1252 emit(instr);
1253}
1254
1255template <typename DstType, typename SrcType>
1256void Assembler::GenInstrMsaElm(SecondaryField operation, SecondaryField df,
1257 uint32_t n, SrcType src, DstType dst) {
1258 DCHECK(IsEnabled(MIPS_SIMD));
1259 DCHECK(src.is_valid() && dst.is_valid() && is_valid_msa_df_n(df, n));
1260 Instr instr = MSA | operation | df | (n << kWtShift) |
1261 (src.code() << kWsShift) | (dst.code() << kWdShift) |
1263 emit(instr);
1264}
1265
1266void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df,
1267 MSARegister wt, MSARegister ws, MSARegister wd) {
1268 DCHECK(IsEnabled(MIPS_SIMD));
1269 DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1270 DCHECK_LT(df, 2);
1271 Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) |
1272 (ws.code() << kWsShift) | (wd.code() << kWdShift);
1273 emit(instr);
1274}
1275
1276void Assembler::GenInstrMsaVec(SecondaryField operation, MSARegister wt,
1277 MSARegister ws, MSARegister wd) {
1278 DCHECK(IsEnabled(MIPS_SIMD));
1279 DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid());
1280 Instr instr = MSA | operation | (wt.code() << kWtShift) |
1281 (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1283 emit(instr);
1284}
1285
1286void Assembler::GenInstrMsaMI10(SecondaryField operation, int32_t s10,
1287 Register rs, MSARegister wd) {
1288 DCHECK(IsEnabled(MIPS_SIMD));
1289 DCHECK(rs.is_valid() && wd.is_valid() && is_int10(s10));
1290 Instr instr = MSA | operation | ((s10 & kImm10Mask) << kWtShift) |
1291 (rs.code() << kWsShift) | (wd.code() << kWdShift);
1292 emit(instr);
1293}
1294
1295void Assembler::GenInstrMsa2R(SecondaryField operation, SecondaryField df,
1296 MSARegister ws, MSARegister wd) {
1297 DCHECK(IsEnabled(MIPS_SIMD));
1298 DCHECK(ws.is_valid() && wd.is_valid());
1299 Instr instr = MSA | MSA_2R_FORMAT | operation | df | (ws.code() << kWsShift) |
1300 (wd.code() << kWdShift) | MSA_VEC_2R_2RF_MINOR;
1301 emit(instr);
1302}
1303
1304void Assembler::GenInstrMsa2RF(SecondaryField operation, SecondaryField df,
1305 MSARegister ws, MSARegister wd) {
1306 DCHECK(IsEnabled(MIPS_SIMD));
1307 DCHECK(ws.is_valid() && wd.is_valid());
1308 Instr instr = MSA | MSA_2RF_FORMAT | operation | df |
1309 (ws.code() << kWsShift) | (wd.code() << kWdShift) |
1311 emit(instr);
1312}
1313
1314void Assembler::GenInstrMsaBranch(SecondaryField operation, MSARegister wt,
1315 int32_t offset16) {
1316 DCHECK(IsEnabled(MIPS_SIMD));
1317 DCHECK(wt.is_valid() && is_int16(offset16));
1318 BlockTrampolinePoolScope block_trampoline_pool(this);
1319 Instr instr =
1320 COP1 | operation | (wt.code() << kWtShift) | (offset16 & kImm16Mask);
1321 emit(instr);
1322 BlockTrampolinePoolFor(1); // For associated delay slot.
1323}
1324
1325// Returns the next free trampoline entry.
1326int32_t Assembler::get_trampoline_entry(int32_t pos) {
1327 int32_t trampoline_entry = kInvalidSlotPos;
1328 if (!internal_trampoline_exception_) {
1329 if (trampoline_.start() > pos) {
1330 trampoline_entry = trampoline_.take_slot();
1331 }
1332
1333 if (kInvalidSlotPos == trampoline_entry) {
1334 internal_trampoline_exception_ = true;
1335 }
1336 }
1337 return trampoline_entry;
1338}
1339
1340uint64_t Assembler::jump_address(Label* L) {
1341 int64_t target_pos;
1342 if (L->is_bound()) {
1343 target_pos = L->pos();
1344 } else {
1345 if (L->is_linked()) {
1346 target_pos = L->pos(); // L's link.
1347 L->link_to(pc_offset());
1348 } else {
1349 L->link_to(pc_offset());
1350 return kEndOfJumpChain;
1351 }
1352 }
1353 uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
1354 DCHECK_EQ(imm & 3, 0);
1355
1356 return imm;
1357}
1358
1359uint64_t Assembler::jump_offset(Label* L) {
1360 int64_t target_pos;
1361 int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1362
1363 if (L->is_bound()) {
1364 target_pos = L->pos();
1365 } else {
1366 if (L->is_linked()) {
1367 target_pos = L->pos(); // L's link.
1368 L->link_to(pc_offset() + pad);
1369 } else {
1370 L->link_to(pc_offset() + pad);
1371 return kEndOfJumpChain;
1372 }
1373 }
1374 int64_t imm = target_pos - (pc_offset() + pad);
1375 DCHECK_EQ(imm & 3, 0);
1376
1377 return static_cast<uint64_t>(imm);
1378}
1379
1380uint64_t Assembler::branch_long_offset(Label* L) {
1381 int64_t target_pos;
1382
1383 if (L->is_bound()) {
1384 target_pos = L->pos();
1385 } else {
1386 if (L->is_linked()) {
1387 target_pos = L->pos(); // L's link.
1388 L->link_to(pc_offset());
1389 } else {
1390 L->link_to(pc_offset());
1391 return kEndOfJumpChain;
1392 }
1393 }
1394 int64_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
1395 DCHECK_EQ(offset & 3, 0);
1396
1397 return static_cast<uint64_t>(offset);
1398}
1399
1400int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
1401 int32_t target_pos;
1402 int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1403
1404 if (L->is_bound()) {
1405 target_pos = L->pos();
1406 } else {
1407 if (L->is_linked()) {
1408 target_pos = L->pos();
1409 L->link_to(pc_offset() + pad);
1410 } else {
1411 L->link_to(pc_offset() + pad);
1412 if (!trampoline_emitted_) {
1413 unbound_labels_count_++;
1414 next_buffer_check_ -= kTrampolineSlotsSize;
1415 }
1416 return kEndOfChain;
1417 }
1418 }
1419
1420 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1421 DCHECK(is_intn(offset, bits + 2));
1422 DCHECK_EQ(offset & 3, 0);
1423
1424 return offset;
1425}
1426
1427void Assembler::label_at_put(Label* L, int at_offset) {
1428 int target_pos;
1429 if (L->is_bound()) {
1430 target_pos = L->pos();
1431 instr_at_put(at_offset, target_pos + (InstructionStream::kHeaderSize -
1432 kHeapObjectTag));
1433 } else {
1434 if (L->is_linked()) {
1435 target_pos = L->pos(); // L's link.
1436 int32_t imm18 = target_pos - at_offset;
1437 DCHECK_EQ(imm18 & 3, 0);
1438 int32_t imm16 = imm18 >> 2;
1439 DCHECK(is_int16(imm16));
1440 instr_at_put(at_offset, (imm16 & kImm16Mask));
1441 } else {
1442 target_pos = kEndOfChain;
1443 instr_at_put(at_offset, 0);
1444 if (!trampoline_emitted_) {
1445 unbound_labels_count_++;
1446 next_buffer_check_ -= kTrampolineSlotsSize;
1447 }
1448 }
1449 L->link_to(at_offset);
1450 }
1451}
1452
1453//------- Branch and jump instructions --------
1454
1455void Assembler::b(int16_t offset) { beq(zero_reg, zero_reg, offset); }
1456
1457void Assembler::bal(int16_t offset) { bgezal(zero_reg, offset); }
1458
1459void Assembler::bc(int32_t offset) {
1461 GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1462}
1463
1464void Assembler::balc(int32_t offset) {
1466 GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1467}
1468
1469void Assembler::beq(Register rs, Register rt, int16_t offset) {
1470 BlockTrampolinePoolScope block_trampoline_pool(this);
1471 GenInstrImmediate(BEQ, rs, rt, offset);
1472 BlockTrampolinePoolFor(1); // For associated delay slot.
1473}
1474
1475void Assembler::bgez(Register rs, int16_t offset) {
1476 BlockTrampolinePoolScope block_trampoline_pool(this);
1477 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1478 BlockTrampolinePoolFor(1); // For associated delay slot.
1479}
1480
1481void Assembler::bgezc(Register rt, int16_t offset) {
1483 DCHECK(rt != zero_reg);
1484 GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1485}
1486
1487void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1489 DCHECK(rs != zero_reg);
1490 DCHECK(rt != zero_reg);
1491 DCHECK(rs.code() != rt.code());
1492 GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1493}
1494
1495void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1497 DCHECK(rs != zero_reg);
1498 DCHECK(rt != zero_reg);
1499 DCHECK(rs.code() != rt.code());
1500 GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1501}
1502
1503void Assembler::bgezal(Register rs, int16_t offset) {
1504 DCHECK(kArchVariant != kMips64r6 || rs == zero_reg);
1505 DCHECK(rs != ra);
1506 BlockTrampolinePoolScope block_trampoline_pool(this);
1507 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1508 BlockTrampolinePoolFor(1); // For associated delay slot.
1509}
1510
1511void Assembler::bgtz(Register rs, int16_t offset) {
1512 BlockTrampolinePoolScope block_trampoline_pool(this);
1513 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1514 BlockTrampolinePoolFor(1); // For associated delay slot.
1515}
1516
1517void Assembler::bgtzc(Register rt, int16_t offset) {
1519 DCHECK(rt != zero_reg);
1520 GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1521 CompactBranchType::COMPACT_BRANCH);
1522}
1523
1524void Assembler::blez(Register rs, int16_t offset) {
1525 BlockTrampolinePoolScope block_trampoline_pool(this);
1526 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1527 BlockTrampolinePoolFor(1); // For associated delay slot.
1528}
1529
1530void Assembler::blezc(Register rt, int16_t offset) {
1532 DCHECK(rt != zero_reg);
1533 GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1534 CompactBranchType::COMPACT_BRANCH);
1535}
1536
1537void Assembler::bltzc(Register rt, int16_t offset) {
1539 DCHECK(rt != zero_reg);
1540 GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1541}
1542
1543void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1545 DCHECK(rs != zero_reg);
1546 DCHECK(rt != zero_reg);
1547 DCHECK(rs.code() != rt.code());
1548 GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1549}
1550
1551void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1553 DCHECK(rs != zero_reg);
1554 DCHECK(rt != zero_reg);
1555 DCHECK(rs.code() != rt.code());
1556 GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1557}
1558
1559void Assembler::bltz(Register rs, int16_t offset) {
1560 BlockTrampolinePoolScope block_trampoline_pool(this);
1561 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1562 BlockTrampolinePoolFor(1); // For associated delay slot.
1563}
1564
1565void Assembler::bltzal(Register rs, int16_t offset) {
1566 DCHECK(kArchVariant != kMips64r6 || rs == zero_reg);
1567 DCHECK(rs != ra);
1568 BlockTrampolinePoolScope block_trampoline_pool(this);
1569 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1570 BlockTrampolinePoolFor(1); // For associated delay slot.
1571}
1572
1573void Assembler::bne(Register rs, Register rt, int16_t offset) {
1574 BlockTrampolinePoolScope block_trampoline_pool(this);
1575 GenInstrImmediate(BNE, rs, rt, offset);
1576 BlockTrampolinePoolFor(1); // For associated delay slot.
1577}
1578
1579void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1581 if (rs.code() >= rt.code()) {
1582 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1583 } else {
1584 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1585 }
1586}
1587
1588void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1590 if (rs.code() >= rt.code()) {
1591 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1592 } else {
1593 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1594 }
1595}
1596
1597void Assembler::blezalc(Register rt, int16_t offset) {
1599 DCHECK(rt != zero_reg);
1600 DCHECK(rt != ra);
1601 GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1602 CompactBranchType::COMPACT_BRANCH);
1603}
1604
1605void Assembler::bgezalc(Register rt, int16_t offset) {
1607 DCHECK(rt != zero_reg);
1608 DCHECK(rt != ra);
1609 GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1610}
1611
1612void Assembler::bgezall(Register rs, int16_t offset) {
1614 DCHECK(rs != zero_reg);
1615 DCHECK(rs != ra);
1616 BlockTrampolinePoolScope block_trampoline_pool(this);
1617 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1618 BlockTrampolinePoolFor(1); // For associated delay slot.
1619}
1620
1621void Assembler::bltzalc(Register rt, int16_t offset) {
1623 DCHECK(rt != zero_reg);
1624 DCHECK(rt != ra);
1625 GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
1626}
1627
1628void Assembler::bgtzalc(Register rt, int16_t offset) {
1630 DCHECK(rt != zero_reg);
1631 DCHECK(rt != ra);
1632 GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1633 CompactBranchType::COMPACT_BRANCH);
1634}
1635
1636void Assembler::beqzalc(Register rt, int16_t offset) {
1638 DCHECK(rt != zero_reg);
1639 DCHECK(rt != ra);
1640 GenInstrImmediate(ADDI, zero_reg, rt, offset,
1641 CompactBranchType::COMPACT_BRANCH);
1642}
1643
1644void Assembler::bnezalc(Register rt, int16_t offset) {
1646 DCHECK(rt != zero_reg);
1647 DCHECK(rt != ra);
1648 GenInstrImmediate(DADDI, zero_reg, rt, offset,
1649 CompactBranchType::COMPACT_BRANCH);
1650}
1651
1652void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1654 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1655 if (rs.code() < rt.code()) {
1656 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1657 } else {
1658 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1659 }
1660}
1661
1662void Assembler::beqzc(Register rs, int32_t offset) {
1664 DCHECK(rs != zero_reg);
1665 GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
1666}
1667
1668void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1670 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1671 if (rs.code() < rt.code()) {
1672 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1673 } else {
1674 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1675 }
1676}
1677
1678void Assembler::bnezc(Register rs, int32_t offset) {
1680 DCHECK(rs != zero_reg);
1681 GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
1682}
1683
1684void Assembler::j(int64_t target) {
1685 // Deprecated. Use PC-relative jumps instead.
1686 UNREACHABLE();
1687}
1688
1689void Assembler::j(Label* target) {
1690 // Deprecated. Use PC-relative jumps instead.
1691 UNREACHABLE();
1692}
1693
1694void Assembler::jal(Label* target) {
1695 // Deprecated. Use PC-relative jumps instead.
1696 UNREACHABLE();
1697}
1698
1699void Assembler::jal(int64_t target) {
1700 // Deprecated. Use PC-relative jumps instead.
1701 UNREACHABLE();
1702}
1703
1704void Assembler::jr(Register rs) {
1705 if (kArchVariant != kMips64r6) {
1706 BlockTrampolinePoolScope block_trampoline_pool(this);
1707 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1708 BlockTrampolinePoolFor(1); // For associated delay slot.
1709 } else {
1710 jalr(rs, zero_reg);
1711 }
1712}
1713
1714void Assembler::jalr(Register rs, Register rd) {
1715 DCHECK(rs.code() != rd.code());
1716 BlockTrampolinePoolScope block_trampoline_pool(this);
1717 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1718 BlockTrampolinePoolFor(1); // For associated delay slot.
1719}
1720
1721void Assembler::jic(Register rt, int16_t offset) {
1723 GenInstrImmediate(POP66, zero_reg, rt, offset);
1724}
1725
1726void Assembler::jialc(Register rt, int16_t offset) {
1728 GenInstrImmediate(POP76, zero_reg, rt, offset);
1729}
1730
1731// -------Data-processing-instructions---------
1732
1733// Arithmetic.
1734
1735void Assembler::addu(Register rd, Register rs, Register rt) {
1736 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1737}
1738
1739void Assembler::addiu(Register rd, Register rs, int32_t j) {
1740 GenInstrImmediate(ADDIU, rs, rd, j);
1741}
1742
1743void Assembler::subu(Register rd, Register rs, Register rt) {
1744 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1745}
1746
1747void Assembler::mul(Register rd, Register rs, Register rt) {
1748 if (kArchVariant == kMips64r6) {
1749 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1750 } else {
1751 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1752 }
1753}
1754
1755void Assembler::muh(Register rd, Register rs, Register rt) {
1757 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1758}
1759
1760void Assembler::mulu(Register rd, Register rs, Register rt) {
1762 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1763}
1764
1765void Assembler::muhu(Register rd, Register rs, Register rt) {
1767 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1768}
1769
1770void Assembler::dmul(Register rd, Register rs, Register rt) {
1772 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
1773}
1774
1775void Assembler::dmuh(Register rd, Register rs, Register rt) {
1777 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
1778}
1779
1780void Assembler::dmulu(Register rd, Register rs, Register rt) {
1782 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
1783}
1784
1785void Assembler::dmuhu(Register rd, Register rs, Register rt) {
1787 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
1788}
1789
1790void Assembler::mult(Register rs, Register rt) {
1792 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1793}
1794
1795void Assembler::multu(Register rs, Register rt) {
1797 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1798}
1799
1800void Assembler::daddiu(Register rd, Register rs, int32_t j) {
1801 GenInstrImmediate(DADDIU, rs, rd, j);
1802}
1803
1804void Assembler::div(Register rs, Register rt) {
1805 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1806}
1807
1808void Assembler::div(Register rd, Register rs, Register rt) {
1810 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1811}
1812
1813void Assembler::mod(Register rd, Register rs, Register rt) {
1815 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1816}
1817
1818void Assembler::divu(Register rs, Register rt) {
1819 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1820}
1821
1822void Assembler::divu(Register rd, Register rs, Register rt) {
1824 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1825}
1826
1827void Assembler::modu(Register rd, Register rs, Register rt) {
1829 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1830}
1831
1832void Assembler::daddu(Register rd, Register rs, Register rt) {
1833 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
1834}
1835
1836void Assembler::dsubu(Register rd, Register rs, Register rt) {
1837 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
1838}
1839
1840void Assembler::dmult(Register rs, Register rt) {
1841 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
1842}
1843
1844void Assembler::dmultu(Register rs, Register rt) {
1845 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
1846}
1847
1848void Assembler::ddiv(Register rs, Register rt) {
1849 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
1850}
1851
1852void Assembler::ddiv(Register rd, Register rs, Register rt) {
1854 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
1855}
1856
1857void Assembler::dmod(Register rd, Register rs, Register rt) {
1859 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
1860}
1861
1862void Assembler::ddivu(Register rs, Register rt) {
1863 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
1864}
1865
1866void Assembler::ddivu(Register rd, Register rs, Register rt) {
1868 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
1869}
1870
1871void Assembler::dmodu(Register rd, Register rs, Register rt) {
1873 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
1874}
1875
1876// Logical.
1877
1878void Assembler::and_(Register rd, Register rs, Register rt) {
1879 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1880}
1881
1882void Assembler::andi(Register rt, Register rs, int32_t j) {
1883 DCHECK(is_uint16(j));
1884 GenInstrImmediate(ANDI, rs, rt, j);
1885}
1886
1887void Assembler::or_(Register rd, Register rs, Register rt) {
1888 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1889}
1890
1891void Assembler::ori(Register rt, Register rs, int32_t j) {
1892 DCHECK(is_uint16(j));
1893 GenInstrImmediate(ORI, rs, rt, j);
1894}
1895
1896void Assembler::xor_(Register rd, Register rs, Register rt) {
1897 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1898}
1899
1900void Assembler::xori(Register rt, Register rs, int32_t j) {
1901 DCHECK(is_uint16(j));
1902 GenInstrImmediate(XORI, rs, rt, j);
1903}
1904
1905void Assembler::nor(Register rd, Register rs, Register rt) {
1906 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1907}
1908
1909// Shifts.
1910void Assembler::sll(Register rd, Register rt, uint16_t sa,
1911 bool coming_from_nop) {
1912 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1913 // generated using the sll instruction. They must be generated using
1914 // nop(int/NopMarkerTypes).
1915 DCHECK(coming_from_nop || (rd != zero_reg && rt != zero_reg));
1916 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
1917}
1918
1919void Assembler::sllv(Register rd, Register rt, Register rs) {
1920 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1921}
1922
1923void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1924 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
1925}
1926
1927void Assembler::srlv(Register rd, Register rt, Register rs) {
1928 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1929}
1930
1931void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1932 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
1933}
1934
1935void Assembler::srav(Register rd, Register rt, Register rs) {
1936 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1937}
1938
1939void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1940 // Should be called via MacroAssembler::Ror.
1941 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1943 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
1944 (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1945 emit(instr);
1946}
1947
1948void Assembler::rotrv(Register rd, Register rt, Register rs) {
1949 // Should be called via MacroAssembler::Ror.
1950 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1952 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1953 (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1954 emit(instr);
1955}
1956
1957void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
1958 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
1959}
1960
1961void Assembler::dsllv(Register rd, Register rt, Register rs) {
1962 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
1963}
1964
1965void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
1966 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
1967}
1968
1969void Assembler::dsrlv(Register rd, Register rt, Register rs) {
1970 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
1971}
1972
1973void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
1974 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1975 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
1976 (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
1977 emit(instr);
1978}
1979
1980void Assembler::drotr32(Register rd, Register rt, uint16_t sa) {
1981 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1982 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
1983 (rd.code() << kRdShift) | (sa << kSaShift) | DSRL32;
1984 emit(instr);
1985}
1986
1987void Assembler::drotrv(Register rd, Register rt, Register rs) {
1988 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1989 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
1990 (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
1991 emit(instr);
1992}
1993
1994void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
1995 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
1996}
1997
1998void Assembler::dsrav(Register rd, Register rt, Register rs) {
1999 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
2000}
2001
2002void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
2003 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
2004}
2005
2006void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
2007 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
2008}
2009
2010void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
2011 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
2012}
2013
2014void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
2015 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2016 DCHECK_LE(sa, 3);
2018 Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
2019 rd.code() << kRdShift | sa << kSaShift | LSA;
2020 emit(instr);
2021}
2022
2023void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
2024 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
2025 DCHECK_LE(sa, 3);
2027 Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
2028 rd.code() << kRdShift | sa << kSaShift | DLSA;
2029 emit(instr);
2030}
2031
2032// ------------Memory-instructions-------------
2033
2034void Assembler::AdjustBaseAndOffset(MemOperand* src,
2035 OffsetAccessType access_type,
2036 int second_access_add_to_offset) {
2037 // This method is used to adjust the base register and offset pair
2038 // for a load/store when the offset doesn't fit into int16_t.
2039 // It is assumed that 'base + offset' is sufficiently aligned for memory
2040 // operands that are machine word in size or smaller. For doubleword-sized
2041 // operands it's assumed that 'base' is a multiple of 8, while 'offset'
2042 // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
2043 // and spilled variables on the stack accessed relative to the stack
2044 // pointer register).
2045 // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
2046
2047 bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0;
2048 bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
2049 DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7.
2050
2051 // is_int16 must be passed a signed value, hence the static cast below.
2052 if (is_int16(src->offset()) &&
2053 (!two_accesses || is_int16(static_cast<int32_t>(
2054 src->offset() + second_access_add_to_offset)))) {
2055 // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
2056 // value) fits into int16_t.
2057 return;
2058 }
2059
2060#ifdef DEBUG
2061 // Remember the "(mis)alignment" of 'offset', it will be checked at the end.
2062 uint32_t misalignment = src->offset() & (kDoubleSize - 1);
2063#endif
2064
2065 // Do not load the whole 32-bit 'offset' if it can be represented as
2066 // a sum of two 16-bit signed offsets. This can save an instruction or two.
2067 // To simplify matters, only do this for a symmetric range of offsets from
2068 // about -64KB to about +64KB, allowing further addition of 4 when accessing
2069 // 64-bit variables with two 32-bit accesses.
2070 constexpr int32_t kMinOffsetForSimpleAdjustment =
2071 0x7FF8; // Max int16_t that's a multiple of 8.
2072 constexpr int32_t kMaxOffsetForSimpleAdjustment =
2073 2 * kMinOffsetForSimpleAdjustment;
2074
2075 UseScratchRegisterScope temps(this);
2076 Register scratch = temps.Acquire();
2077 // Must not overwrite the register 'base' while loading 'offset'.
2078 DCHECK(src->rm() != scratch);
2079
2080 if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
2081 daddiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment);
2082 src->offset_ -= kMinOffsetForSimpleAdjustment;
2083 } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
2084 src->offset() < 0) {
2085 daddiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment);
2086 src->offset_ += kMinOffsetForSimpleAdjustment;
2087 } else if (kArchVariant == kMips64r6) {
2088 // On r6 take advantage of the daui instruction, e.g.:
2089 // daui scratch, base, offset_high
2090 // [dahi scratch, 1] // When `offset` is close to +2GB.
2091 // lw reg_lo, offset_low(scratch)
2092 // [lw reg_hi, (offset_low+4)(scratch)] // If misaligned 64-bit load.
2093 // or when offset_low+4 overflows int16_t:
2094 // daui scratch, base, offset_high
2095 // daddiu scratch, scratch, 8
2096 // lw reg_lo, (offset_low-8)(scratch)
2097 // lw reg_hi, (offset_low-4)(scratch)
2098 int16_t offset_low = static_cast<uint16_t>(src->offset());
2099 int32_t offset_low32 = offset_low;
2100 int16_t offset_high = static_cast<uint16_t>(src->offset() >> 16);
2101 bool increment_hi16 = offset_low < 0;
2102 bool overflow_hi16 = false;
2103
2104 if (increment_hi16) {
2105 offset_high++;
2106 overflow_hi16 = (offset_high == -32768);
2107 }
2108 daui(scratch, src->rm(), static_cast<uint16_t>(offset_high));
2109
2110 if (overflow_hi16) {
2111 dahi(scratch, 1);
2112 }
2113
2114 if (two_accesses && !is_int16(static_cast<int32_t>(
2115 offset_low32 + second_access_add_to_offset))) {
2116 // Avoid overflow in the 16-bit offset of the load/store instruction when
2117 // adding 4.
2118 daddiu(scratch, scratch, kDoubleSize);
2119 offset_low32 -= kDoubleSize;
2120 }
2121
2122 src->offset_ = offset_low32;
2123 } else {
2124 // Do not load the whole 32-bit 'offset' if it can be represented as
2125 // a sum of three 16-bit signed offsets. This can save an instruction.
2126 // To simplify matters, only do this for a symmetric range of offsets from
2127 // about -96KB to about +96KB, allowing further addition of 4 when accessing
2128 // 64-bit variables with two 32-bit accesses.
2129 constexpr int32_t kMinOffsetForMediumAdjustment =
2130 2 * kMinOffsetForSimpleAdjustment;
2131 constexpr int32_t kMaxOffsetForMediumAdjustment =
2132 3 * kMinOffsetForSimpleAdjustment;
2133 if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) {
2134 daddiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2);
2135 daddiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2);
2136 src->offset_ -= kMinOffsetForMediumAdjustment;
2137 } else if (-kMaxOffsetForMediumAdjustment <= src->offset() &&
2138 src->offset() < 0) {
2139 daddiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2);
2140 daddiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2);
2141 src->offset_ += kMinOffsetForMediumAdjustment;
2142 } else {
2143 // Now that all shorter options have been exhausted, load the full 32-bit
2144 // offset.
2145 int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize);
2146 lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask);
2147 ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset.
2148 daddu(scratch, scratch, src->rm());
2149 src->offset_ -= loaded_offset;
2150 }
2151 }
2152 src->rm_ = scratch;
2153
2154 DCHECK(is_int16(src->offset()));
2155 if (two_accesses) {
2156 DCHECK(is_int16(
2157 static_cast<int32_t>(src->offset() + second_access_add_to_offset)));
2158 }
2159 DCHECK(misalignment == (src->offset() & (kDoubleSize - 1)));
2160}
2161
2162void Assembler::lb(Register rd, const MemOperand& rs) {
2163 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
2164}
2165
2166void Assembler::lbu(Register rd, const MemOperand& rs) {
2167 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
2168}
2169
2170void Assembler::lh(Register rd, const MemOperand& rs) {
2171 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
2172}
2173
2174void Assembler::lhu(Register rd, const MemOperand& rs) {
2175 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
2176}
2177
2178void Assembler::lw(Register rd, const MemOperand& rs) {
2179 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
2180}
2181
2182void Assembler::lwu(Register rd, const MemOperand& rs) {
2183 GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
2184}
2185
2186void Assembler::lwl(Register rd, const MemOperand& rs) {
2187 DCHECK(is_int16(rs.offset_));
2189 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
2190}
2191
2192void Assembler::lwr(Register rd, const MemOperand& rs) {
2193 DCHECK(is_int16(rs.offset_));
2195 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
2196}
2197
2198void Assembler::sb(Register rd, const MemOperand& rs) {
2199 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
2200}
2201
2202void Assembler::sh(Register rd, const MemOperand& rs) {
2203 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
2204}
2205
2206void Assembler::sw(Register rd, const MemOperand& rs) {
2207 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
2208}
2209
2210void Assembler::swl(Register rd, const MemOperand& rs) {
2211 DCHECK(is_int16(rs.offset_));
2213 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
2214}
2215
2216void Assembler::swr(Register rd, const MemOperand& rs) {
2217 DCHECK(is_int16(rs.offset_));
2219 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
2220}
2221
2222void Assembler::ll(Register rd, const MemOperand& rs) {
2223 if (kArchVariant == kMips64r6) {
2224 DCHECK(is_int9(rs.offset_));
2225 GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LL_R6);
2226 } else {
2228 DCHECK(is_int16(rs.offset_));
2229 GenInstrImmediate(LL, rs.rm(), rd, rs.offset_);
2230 }
2231}
2232
2233void Assembler::lld(Register rd, const MemOperand& rs) {
2234 if (kArchVariant == kMips64r6) {
2235 DCHECK(is_int9(rs.offset_));
2236 GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LLD_R6);
2237 } else {
2239 DCHECK(is_int16(rs.offset_));
2240 GenInstrImmediate(LLD, rs.rm(), rd, rs.offset_);
2241 }
2242}
2243
2244void Assembler::sc(Register rd, const MemOperand& rs) {
2245 if (kArchVariant == kMips64r6) {
2246 DCHECK(is_int9(rs.offset_));
2247 GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SC_R6);
2248 } else {
2250 GenInstrImmediate(SC, rs.rm(), rd, rs.offset_);
2251 }
2252}
2253
2254void Assembler::scd(Register rd, const MemOperand& rs) {
2255 if (kArchVariant == kMips64r6) {
2256 DCHECK(is_int9(rs.offset_));
2257 GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SCD_R6);
2258 } else {
2260 GenInstrImmediate(SCD, rs.rm(), rd, rs.offset_);
2261 }
2262}
2263
2264void Assembler::lui(Register rd, int32_t j) {
2265 DCHECK(is_uint16(j) || is_int16(j));
2266 GenInstrImmediate(LUI, zero_reg, rd, j);
2267}
2268
2269void Assembler::aui(Register rt, Register rs, int32_t j) {
2270 // This instruction uses same opcode as 'lui'. The difference in encoding is
2271 // 'lui' has zero reg. for rs field.
2272 DCHECK(is_uint16(j));
2273 GenInstrImmediate(LUI, rs, rt, j);
2274}
2275
2276void Assembler::daui(Register rt, Register rs, int32_t j) {
2277 DCHECK(is_uint16(j));
2278 DCHECK(rs != zero_reg);
2279 GenInstrImmediate(DAUI, rs, rt, j);
2280}
2281
2282void Assembler::dahi(Register rs, int32_t j) {
2283 DCHECK(is_uint16(j));
2284 GenInstrImmediate(REGIMM, rs, DAHI, j);
2285}
2286
2287void Assembler::dati(Register rs, int32_t j) {
2288 DCHECK(is_uint16(j));
2289 GenInstrImmediate(REGIMM, rs, DATI, j);
2290}
2291
2292void Assembler::ldl(Register rd, const MemOperand& rs) {
2293 DCHECK(is_int16(rs.offset_));
2295 GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
2296}
2297
2298void Assembler::ldr(Register rd, const MemOperand& rs) {
2299 DCHECK(is_int16(rs.offset_));
2301 GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
2302}
2303
2304void Assembler::sdl(Register rd, const MemOperand& rs) {
2305 DCHECK(is_int16(rs.offset_));
2307 GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
2308}
2309
2310void Assembler::sdr(Register rd, const MemOperand& rs) {
2311 DCHECK(is_int16(rs.offset_));
2313 GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
2314}
2315
2316void Assembler::ld(Register rd, const MemOperand& rs) {
2317 GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
2318}
2319
2320void Assembler::sd(Register rd, const MemOperand& rs) {
2321 GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
2322}
2323
2324// ---------PC-Relative instructions-----------
2325
2326void Assembler::addiupc(Register rs, int32_t imm19) {
2328 DCHECK(rs.is_valid() && is_int19(imm19));
2329 uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
2330 GenInstrImmediate(PCREL, rs, imm21);
2331}
2332
2333void Assembler::lwpc(Register rs, int32_t offset19) {
2335 DCHECK(rs.is_valid() && is_int19(offset19));
2336 uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
2337 GenInstrImmediate(PCREL, rs, imm21);
2338}
2339
2340void Assembler::lwupc(Register rs, int32_t offset19) {
2342 DCHECK(rs.is_valid() && is_int19(offset19));
2343 uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
2344 GenInstrImmediate(PCREL, rs, imm21);
2345}
2346
2347void Assembler::ldpc(Register rs, int32_t offset18) {
2349 DCHECK(rs.is_valid() && is_int18(offset18));
2350 uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
2351 GenInstrImmediate(PCREL, rs, imm21);
2352}
2353
2354void Assembler::auipc(Register rs, int16_t imm16) {
2356 DCHECK(rs.is_valid());
2357 uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
2358 GenInstrImmediate(PCREL, rs, imm21);
2359}
2360
2361void Assembler::aluipc(Register rs, int16_t imm16) {
2363 DCHECK(rs.is_valid());
2364 uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
2365 GenInstrImmediate(PCREL, rs, imm21);
2366}
2367
2368// -------------Misc-instructions--------------
2369
2370// Break / Trap instructions.
2371void Assembler::break_(uint32_t code, bool break_as_stop) {
2372 DCHECK_EQ(code & ~0xFFFFF, 0);
2373 // We need to invalidate breaks that could be stops as well because the
2374 // simulator expects a char pointer after the stop instruction.
2375 // See constants-mips.h for explanation.
2376 DCHECK(
2377 (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) ||
2378 (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode)));
2379 Instr break_instr = SPECIAL | BREAK | (code << 6);
2380 emit(break_instr);
2381}
2382
2383void Assembler::stop(uint32_t code) {
2384 DCHECK_GT(code, kMaxWatchpointCode);
2385 DCHECK_LE(code, kMaxStopCode);
2386#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
2387 break_(0x54321);
2388#else // V8_HOST_ARCH_MIPS
2389 break_(code, true);
2390#endif
2391}
2392
2393void Assembler::tge(Register rs, Register rt, uint16_t code) {
2394 DCHECK(is_uint10(code));
2395 Instr instr =
2396 SPECIAL | TGE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2397 emit(instr);
2398}
2399
2400void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2401 DCHECK(is_uint10(code));
2402 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift | rt.code() << kRtShift |
2403 code << 6;
2404 emit(instr);
2405}
2406
2407void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2408 DCHECK(is_uint10(code));
2409 Instr instr =
2410 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2411 emit(instr);
2412}
2413
2414void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2415 DCHECK(is_uint10(code));
2416 Instr instr = SPECIAL | TLTU | rs.code() << kRsShift | rt.code() << kRtShift |
2417 code << 6;
2418 emit(instr);
2419}
2420
2421void Assembler::teq(Register rs, Register rt, uint16_t code) {
2422 DCHECK(is_uint10(code));
2423 Instr instr =
2424 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2425 emit(instr);
2426}
2427
2428void Assembler::tne(Register rs, Register rt, uint16_t code) {
2429 DCHECK(is_uint10(code));
2430 Instr instr =
2431 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2432 emit(instr);
2433}
2434
2435void Assembler::sync() {
2436 Instr sync_instr = SPECIAL | SYNC;
2437 emit(sync_instr);
2438}
2439
2440// Move from HI/LO register.
2441
2442void Assembler::mfhi(Register rd) {
2443 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2444}
2445
2446void Assembler::mflo(Register rd) {
2447 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2448}
2449
2450// Set on less than instructions.
2451void Assembler::slt(Register rd, Register rs, Register rt) {
2452 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2453}
2454
2455void Assembler::sltu(Register rd, Register rs, Register rt) {
2456 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2457}
2458
2459void Assembler::slti(Register rt, Register rs, int32_t j) {
2460 GenInstrImmediate(SLTI, rs, rt, j);
2461}
2462
2463void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2464 GenInstrImmediate(SLTIU, rs, rt, j);
2465}
2466
2467// Conditional move.
2468void Assembler::movz(Register rd, Register rs, Register rt) {
2469 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2470}
2471
2472void Assembler::movn(Register rd, Register rs, Register rt) {
2473 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2474}
2475
2476void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2477 Register rt = Register::from_code((cc & 0x0007) << 2 | 1);
2478 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2479}
2480
2481void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2482 Register rt = Register::from_code((cc & 0x0007) << 2 | 0);
2483 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2484}
2485
2486void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2487 min(S, fd, fs, ft);
2488}
2489
2490void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2491 min(D, fd, fs, ft);
2492}
2493
2494void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2495 max(S, fd, fs, ft);
2496}
2497
2498void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2499 max(D, fd, fs, ft);
2500}
2501
2502void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2503 mina(S, fd, fs, ft);
2504}
2505
2506void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2507 mina(D, fd, fs, ft);
2508}
2509
2510void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2511 maxa(S, fd, fs, ft);
2512}
2513
2514void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2515 maxa(D, fd, fs, ft);
2516}
2517
2518void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2519 FPURegister ft) {
2521 DCHECK((fmt == D) || (fmt == S));
2522 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2523}
2524
2525void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2526 FPURegister ft) {
2528 DCHECK((fmt == D) || (fmt == S));
2529 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2530}
2531
2532// GPR.
2533void Assembler::seleqz(Register rd, Register rs, Register rt) {
2535 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2536}
2537
2538// GPR.
2539void Assembler::selnez(Register rd, Register rs, Register rt) {
2541 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2542}
2543
2544// Bit twiddling.
2545void Assembler::clz(Register rd, Register rs) {
2546 if (kArchVariant != kMips64r6) {
2547 // clz instr requires same GPR number in 'rd' and 'rt' fields.
2548 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2549 } else {
2550 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2551 }
2552}
2553
2554void Assembler::dclz(Register rd, Register rs) {
2555 if (kArchVariant != kMips64r6) {
2556 // dclz instr requires same GPR number in 'rd' and 'rt' fields.
2557 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, DCLZ);
2558 } else {
2559 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, DCLZ_R6);
2560 }
2561}
2562
2563void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2564 // Should be called via MacroAssembler::Ins.
2565 // ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2567 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2568}
2569
2570void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2571 // Should be called via MacroAssembler::Dins.
2572 // dins instr has 'rt' field as dest, and two uint5: msb, lsb.
2574 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
2575}
2576
2577void Assembler::dinsm_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2578 // Should be called via MacroAssembler::Dins.
2579 // dinsm instr has 'rt' field as dest, and two uint5: msbminus32, lsb.
2581 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1 - 32, pos, DINSM);
2582}
2583
2584void Assembler::dinsu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2585 // Should be called via MacroAssembler::Dins.
2586 // dinsu instr has 'rt' field as dest, and two uint5: msbminus32, lsbminus32.
2588 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1 - 32, pos - 32, DINSU);
2589}
2590
2591void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2592 // Should be called via MacroAssembler::Ext.
2593 // ext instr has 'rt' field as dest, and two uint5: msbd, lsb.
2595 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2596}
2597
2598void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2599 // Should be called via MacroAssembler::Dext.
2600 // dext instr has 'rt' field as dest, and two uint5: msbd, lsb.
2602 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
2603}
2604
2605void Assembler::dextm_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2606 // Should be called via MacroAssembler::Dextm.
2607 // dextm instr has 'rt' field as dest, and two uint5: msbdminus32, lsb.
2609 GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
2610}
2611
2612void Assembler::dextu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2613 // Should be called via MacroAssembler::Dextu.
2614 // dextu instr has 'rt' field as dest, and two uint5: msbd, lsbminus32.
2616 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
2617}
2618
2619void Assembler::bitswap(Register rd, Register rt) {
2621 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2622}
2623
2624void Assembler::dbitswap(Register rd, Register rt) {
2626 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
2627}
2628
2629void Assembler::pref(int32_t hint, const MemOperand& rs) {
2630 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2631 Instr instr =
2632 PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) | (rs.offset_);
2633 emit(instr);
2634}
2635
2636void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2638 DCHECK(is_uint3(bp));
2639 uint16_t sa = (ALIGN << kBp2Bits) | bp;
2640 GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2641}
2642
2643void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
2645 DCHECK(is_uint3(bp));
2646 uint16_t sa = (DALIGN << kBp3Bits) | bp;
2647 GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
2648}
2649
2650void Assembler::wsbh(Register rd, Register rt) {
2652 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
2653}
2654
2655void Assembler::dsbh(Register rd, Register rt) {
2657 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSBH, DBSHFL);
2658}
2659
2660void Assembler::dshd(Register rd, Register rt) {
2662 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSHD, DBSHFL);
2663}
2664
2665void Assembler::seh(Register rd, Register rt) {
2667 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
2668}
2669
2670void Assembler::seb(Register rd, Register rt) {
2672 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2673}
2674
2675// --------Coprocessor-instructions----------------
2676
2677// Load, store, move.
2678void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2679 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2680}
2681
2682void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2683 GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
2684}
2685
2686void Assembler::swc1(FPURegister fs, const MemOperand& src) {
2687 GenInstrImmediate(SWC1, src.rm(), fs, src.offset_);
2688}
2689
2690void Assembler::sdc1(FPURegister fs, const MemOperand& src) {
2691 GenInstrImmediate(SDC1, src.rm(), fs, src.offset_);
2692}
2693
2694void Assembler::mtc1(Register rt, FPURegister fs) {
2695 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2696}
2697
2698void Assembler::mthc1(Register rt, FPURegister fs) {
2699 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2700}
2701
2702void Assembler::dmtc1(Register rt, FPURegister fs) {
2703 GenInstrRegister(COP1, DMTC1, rt, fs, f0);
2704}
2705
2706void Assembler::mfc1(Register rt, FPURegister fs) {
2707 GenInstrRegister(COP1, MFC1, rt, fs, f0);
2708}
2709
2710void Assembler::mfhc1(Register rt, FPURegister fs) {
2711 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2712}
2713
2714void Assembler::dmfc1(Register rt, FPURegister fs) {
2715 GenInstrRegister(COP1, DMFC1, rt, fs, f0);
2716}
2717
2718void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2719 GenInstrRegister(COP1, CTC1, rt, fs);
2720}
2721
2722void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2723 GenInstrRegister(COP1, CFC1, rt, fs);
2724}
2725
2726void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2727 FPURegister ft) {
2729 DCHECK((fmt == D) || (fmt == S));
2730
2731 GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2732}
2733
2734void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2735 sel(S, fd, fs, ft);
2736}
2737
2738void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2739 sel(D, fd, fs, ft);
2740}
2741
2742// FPR.
2743void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2744 FPURegister ft) {
2745 DCHECK((fmt == D) || (fmt == S));
2746 GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2747}
2748
2749void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2750 seleqz(D, fd, fs, ft);
2751}
2752
2753void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2754 seleqz(S, fd, fs, ft);
2755}
2756
2757void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2758 selnez(D, fd, fs, ft);
2759}
2760
2761void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2762 selnez(S, fd, fs, ft);
2763}
2764
2765void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2767 GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2768}
2769
2770void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2772 GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2773}
2774
2775void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2777 FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
2778 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2779}
2780
2781void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2783 FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
2784 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2785}
2786
2787void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2789 FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
2790 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2791}
2792
2793void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2795 FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
2796 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2797}
2798
2799void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2801 GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2802}
2803
2804void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2806 GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2807}
2808
2809// FPR.
2810void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2811 FPURegister ft) {
2813 DCHECK((fmt == D) || (fmt == S));
2814 GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2815}
2816
2817// Arithmetic.
2818
2819void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2820 GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
2821}
2822
2823void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2824 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2825}
2826
2827void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2828 GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
2829}
2830
2831void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2832 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2833}
2834
2835void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2836 GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
2837}
2838
2839void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2840 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2841}
2842
2843void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2844 FPURegister ft) {
2845 // On Loongson 3A (MIPS64R2), MADD.S instruction is actually fused MADD.S and
2846 // this causes failure in some of the tests. Since this optimization is rarely
2847 // used, and not used at all on MIPS64R6, this isntruction is removed.
2848 UNREACHABLE();
2849}
2850
2851void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2852 FPURegister ft) {
2853 // On Loongson 3A (MIPS64R2), MADD.D instruction is actually fused MADD.D and
2854 // this causes failure in some of the tests. Since this optimization is rarely
2855 // used, and not used at all on MIPS64R6, this isntruction is removed.
2856 UNREACHABLE();
2857}
2858
2859void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2860 FPURegister ft) {
2861 // See explanation for instruction madd_s.
2862 UNREACHABLE();
2863}
2864
2865void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2866 FPURegister ft) {
2867 // See explanation for instruction madd_d.
2868 UNREACHABLE();
2869}
2870
2871void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2873 GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
2874}
2875
2876void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2878 GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
2879}
2880
2881void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2883 GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
2884}
2885
2886void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2888 GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
2889}
2890
2891void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2892 GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
2893}
2894
2895void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2896 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2897}
2898
2899void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2900 GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
2901}
2902
2903void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2904 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2905}
2906
2907void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2908 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2909}
2910
2911void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2912 GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2913}
2914
2915void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2916 GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
2917}
2918
2919void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2920 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2921}
2922
2923void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2924 GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
2925}
2926
2927void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2928 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2929}
2930
2931void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2932 GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2933}
2934
2935void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2936 GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2937}
2938
2939void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2940 GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2941}
2942
2943void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2944 GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2945}
2946
2947// Conversions.
2948void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2949 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2950}
2951
2952void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2953 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2954}
2955
2956void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2957 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2958}
2959
2960void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2961 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2962}
2963
2964void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2965 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2966}
2967
2968void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2969 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2970}
2971
2972void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2973 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2974}
2975
2976void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2977 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2978}
2979
2980void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2981 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2982}
2983
2984void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2985 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2986}
2987
2988void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2989
2990void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2991
2992void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2994 GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2995}
2996
2997void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
2999 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
3000}
3001
3002void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
3004 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
3005}
3006
3007void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
3009 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
3010}
3011
3012void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
3014 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
3015}
3016
3017void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
3018 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
3019}
3020
3021void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
3022 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
3023}
3024
3025void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
3026 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
3027}
3028
3029void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
3030 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
3031}
3032
3033void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
3034 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
3035}
3036
3037void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
3038 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
3039}
3040
3041void Assembler::class_s(FPURegister fd, FPURegister fs) {
3043 GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
3044}
3045
3046void Assembler::class_d(FPURegister fd, FPURegister fs) {
3048 GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
3049}
3050
3051void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
3052 FPURegister ft) {
3054 DCHECK((fmt == D) || (fmt == S));
3055 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
3056}
3057
3058void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
3059 FPURegister ft) {
3061 DCHECK((fmt == D) || (fmt == S));
3062 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
3063}
3064
3065void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
3066 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
3067}
3068
3069void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
3071 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
3072}
3073
3074void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
3075 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
3076}
3077
3078void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
3079 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
3080}
3081
3082void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
3084 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
3085}
3086
3087void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
3088 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
3089}
3090
3091// Conditions for >= MIPSr6.
3092void Assembler::cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd,
3093 FPURegister fs, FPURegister ft) {
3095 DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
3096 Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift |
3097 fd.code() << kFdShift | (0 << 5) | cond;
3098 emit(instr);
3099}
3100
3101void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
3102 FPURegister ft) {
3103 cmp(cond, W, fd, fs, ft);
3104}
3105
3106void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
3107 FPURegister ft) {
3108 cmp(cond, L, fd, fs, ft);
3109}
3110
3111void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
3113 BlockTrampolinePoolScope block_trampoline_pool(this);
3114 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
3115 emit(instr);
3116 BlockTrampolinePoolFor(1); // For associated delay slot.
3117}
3118
3119void Assembler::bc1nez(int16_t offset, FPURegister ft) {
3121 BlockTrampolinePoolScope block_trampoline_pool(this);
3122 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
3123 emit(instr);
3124 BlockTrampolinePoolFor(1); // For associated delay slot.
3125}
3126
3127// Conditions for < MIPSr6.
3128void Assembler::c(FPUCondition cond, SecondaryField fmt, FPURegister fs,
3129 FPURegister ft, uint16_t cc) {
3131 DCHECK(is_uint3(cc));
3132 DCHECK(fmt == S || fmt == D);
3133 DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
3134 Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift |
3135 cc << 8 | 3 << 4 | cond;
3136 emit(instr);
3137}
3138
3139void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
3140 uint16_t cc) {
3141 c(cond, S, fs, ft, cc);
3142}
3143
3144void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
3145 uint16_t cc) {
3146 c(cond, D, fs, ft, cc);
3147}
3148
3149void Assembler::fcmp(FPURegister src1, const double src2, FPUCondition cond) {
3150 DCHECK_EQ(src2, 0.0);
3151 mtc1(zero_reg, f14);
3152 cvt_d_w(f14, f14);
3153 c(cond, D, src1, f14, 0);
3154}
3155
3156void Assembler::bc1f(int16_t offset, uint16_t cc) {
3157 BlockTrampolinePoolScope block_trampoline_pool(this);
3158 DCHECK(is_uint3(cc));
3159 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
3160 emit(instr);
3161 BlockTrampolinePoolFor(1); // For associated delay slot.
3162}
3163
3164void Assembler::bc1t(int16_t offset, uint16_t cc) {
3165 BlockTrampolinePoolScope block_trampoline_pool(this);
3166 DCHECK(is_uint3(cc));
3167 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
3168 emit(instr);
3169 BlockTrampolinePoolFor(1); // For associated delay slot.
3170}
3171
3172// ---------- MSA instructions ------------
3173#define MSA_BRANCH_LIST(V) \
3174 V(bz_v, BZ_V) \
3175 V(bz_b, BZ_B) \
3176 V(bz_h, BZ_H) \
3177 V(bz_w, BZ_W) \
3178 V(bz_d, BZ_D) \
3179 V(bnz_v, BNZ_V) \
3180 V(bnz_b, BNZ_B) \
3181 V(bnz_h, BNZ_H) \
3182 V(bnz_w, BNZ_W) \
3183 V(bnz_d, BNZ_D)
3184
3185#define MSA_BRANCH(name, opcode) \
3186 void Assembler::name(MSARegister wt, int16_t offset) { \
3187 GenInstrMsaBranch(opcode, wt, offset); \
3188 }
3189
3190MSA_BRANCH_LIST(MSA_BRANCH)
3191#undef MSA_BRANCH
3192#undef MSA_BRANCH_LIST
3193
3194#define MSA_LD_ST_LIST(V) \
3195 V(ld_b, LD_B, 1) \
3196 V(ld_h, LD_H, 2) \
3197 V(ld_w, LD_W, 4) \
3198 V(ld_d, LD_D, 8) \
3199 V(st_b, ST_B, 1) \
3200 V(st_h, ST_H, 2) \
3201 V(st_w, ST_W, 4) \
3202 V(st_d, ST_D, 8)
3203
3204#define MSA_LD_ST(name, opcode, b) \
3205 void Assembler::name(MSARegister wd, const MemOperand& rs) { \
3206 MemOperand source = rs; \
3207 AdjustBaseAndOffset(&source); \
3208 if (is_int10(source.offset())) { \
3209 DCHECK_EQ(source.offset() % b, 0); \
3210 GenInstrMsaMI10(opcode, source.offset() / b, source.rm(), wd); \
3211 } else { \
3212 UseScratchRegisterScope temps(this); \
3213 Register scratch = temps.Acquire(); \
3214 DCHECK_NE(rs.rm(), scratch); \
3215 daddiu(scratch, source.rm(), source.offset()); \
3216 GenInstrMsaMI10(opcode, 0, scratch, wd); \
3217 } \
3218 }
3219
3220MSA_LD_ST_LIST(MSA_LD_ST)
3221#undef MSA_LD_ST
3222#undef MSA_LD_ST_LIST
3223
3224#define MSA_I10_LIST(V) \
3225 V(ldi_b, I5_DF_b) \
3226 V(ldi_h, I5_DF_h) \
3227 V(ldi_w, I5_DF_w) \
3228 V(ldi_d, I5_DF_d)
3229
3230#define MSA_I10(name, format) \
3231 void Assembler::name(MSARegister wd, int32_t imm10) { \
3232 GenInstrMsaI10(LDI, format, imm10, wd); \
3233 }
3234MSA_I10_LIST(MSA_I10)
3235#undef MSA_I10
3236#undef MSA_I10_LIST
3237
3238#define MSA_I5_LIST(V) \
3239 V(addvi, ADDVI) \
3240 V(subvi, SUBVI) \
3241 V(maxi_s, MAXI_S) \
3242 V(maxi_u, MAXI_U) \
3243 V(mini_s, MINI_S) \
3244 V(mini_u, MINI_U) \
3245 V(ceqi, CEQI) \
3246 V(clti_s, CLTI_S) \
3247 V(clti_u, CLTI_U) \
3248 V(clei_s, CLEI_S) \
3249 V(clei_u, CLEI_U)
3250
3251#define MSA_I5_FORMAT(name, opcode, format) \
3252 void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3253 uint32_t imm5) { \
3254 GenInstrMsaI5(opcode, I5_DF_##format, imm5, ws, wd); \
3255 }
3256
3257#define MSA_I5(name, opcode) \
3258 MSA_I5_FORMAT(name, opcode, b) \
3259 MSA_I5_FORMAT(name, opcode, h) \
3260 MSA_I5_FORMAT(name, opcode, w) \
3261 MSA_I5_FORMAT(name, opcode, d)
3262
3263MSA_I5_LIST(MSA_I5)
3264#undef MSA_I5
3265#undef MSA_I5_FORMAT
3266#undef MSA_I5_LIST
3267
3268#define MSA_I8_LIST(V) \
3269 V(andi_b, ANDI_B) \
3270 V(ori_b, ORI_B) \
3271 V(nori_b, NORI_B) \
3272 V(xori_b, XORI_B) \
3273 V(bmnzi_b, BMNZI_B) \
3274 V(bmzi_b, BMZI_B) \
3275 V(bseli_b, BSELI_B) \
3276 V(shf_b, SHF_B) \
3277 V(shf_h, SHF_H) \
3278 V(shf_w, SHF_W)
3279
3280#define MSA_I8(name, opcode) \
3281 void Assembler::name(MSARegister wd, MSARegister ws, uint32_t imm8) { \
3282 GenInstrMsaI8(opcode, imm8, ws, wd); \
3283 }
3284
3285MSA_I8_LIST(MSA_I8)
3286#undef MSA_I8
3287#undef MSA_I8_LIST
3288
3289#define MSA_VEC_LIST(V) \
3290 V(and_v, AND_V) \
3291 V(or_v, OR_V) \
3292 V(nor_v, NOR_V) \
3293 V(xor_v, XOR_V) \
3294 V(bmnz_v, BMNZ_V) \
3295 V(bmz_v, BMZ_V) \
3296 V(bsel_v, BSEL_V)
3297
3298#define MSA_VEC(name, opcode) \
3299 void Assembler::name(MSARegister wd, MSARegister ws, MSARegister wt) { \
3300 GenInstrMsaVec(opcode, wt, ws, wd); \
3301 }
3302
3303MSA_VEC_LIST(MSA_VEC)
3304#undef MSA_VEC
3305#undef MSA_VEC_LIST
3306
3307#define MSA_2R_LIST(V) \
3308 V(pcnt, PCNT) \
3309 V(nloc, NLOC) \
3310 V(nlzc, NLZC)
3311
3312#define MSA_2R_FORMAT(name, opcode, format) \
3313 void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3314 GenInstrMsa2R(opcode, MSA_2R_DF_##format, ws, wd); \
3315 }
3316
3317#define MSA_2R(name, opcode) \
3318 MSA_2R_FORMAT(name, opcode, b) \
3319 MSA_2R_FORMAT(name, opcode, h) \
3320 MSA_2R_FORMAT(name, opcode, w) \
3321 MSA_2R_FORMAT(name, opcode, d)
3322
3323MSA_2R_LIST(MSA_2R)
3324#undef MSA_2R
3325#undef MSA_2R_FORMAT
3326#undef MSA_2R_LIST
3327
3328#define MSA_FILL(format) \
3329 void Assembler::fill_##format(MSARegister wd, Register rs) { \
3330 DCHECK(IsEnabled(MIPS_SIMD)); \
3331 DCHECK(rs.is_valid() && wd.is_valid()); \
3332 Instr instr = MSA | MSA_2R_FORMAT | FILL | MSA_2R_DF_##format | \
3333 (rs.code() << kWsShift) | (wd.code() << kWdShift) | \
3334 MSA_VEC_2R_2RF_MINOR; \
3335 emit(instr); \
3336 }
3337
3338MSA_FILL(b)
3339MSA_FILL(h)
3340MSA_FILL(w)
3341MSA_FILL(d)
3342#undef MSA_FILL
3343
3344#define MSA_2RF_LIST(V) \
3345 V(fclass, FCLASS) \
3346 V(ftrunc_s, FTRUNC_S) \
3347 V(ftrunc_u, FTRUNC_U) \
3348 V(fsqrt, FSQRT) \
3349 V(frsqrt, FRSQRT) \
3350 V(frcp, FRCP) \
3351 V(frint, FRINT) \
3352 V(flog2, FLOG2) \
3353 V(fexupl, FEXUPL) \
3354 V(fexupr, FEXUPR) \
3355 V(ffql, FFQL) \
3356 V(ffqr, FFQR) \
3357 V(ftint_s, FTINT_S) \
3358 V(ftint_u, FTINT_U) \
3359 V(ffint_s, FFINT_S) \
3360 V(ffint_u, FFINT_U)
3361
3362#define MSA_2RF_FORMAT(name, opcode, format) \
3363 void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \
3364 GenInstrMsa2RF(opcode, MSA_2RF_DF_##format, ws, wd); \
3365 }
3366
3367#define MSA_2RF(name, opcode) \
3368 MSA_2RF_FORMAT(name, opcode, w) \
3369 MSA_2RF_FORMAT(name, opcode, d)
3370
3371MSA_2RF_LIST(MSA_2RF)
3372#undef MSA_2RF
3373#undef MSA_2RF_FORMAT
3374#undef MSA_2RF_LIST
3375
3376#define MSA_3R_LIST(V) \
3377 V(sll, SLL_MSA) \
3378 V(sra, SRA_MSA) \
3379 V(srl, SRL_MSA) \
3380 V(bclr, BCLR) \
3381 V(bset, BSET) \
3382 V(bneg, BNEG) \
3383 V(binsl, BINSL) \
3384 V(binsr, BINSR) \
3385 V(addv, ADDV) \
3386 V(subv, SUBV) \
3387 V(max_s, MAX_S) \
3388 V(max_u, MAX_U) \
3389 V(min_s, MIN_S) \
3390 V(min_u, MIN_U) \
3391 V(max_a, MAX_A) \
3392 V(min_a, MIN_A) \
3393 V(ceq, CEQ) \
3394 V(clt_s, CLT_S) \
3395 V(clt_u, CLT_U) \
3396 V(cle_s, CLE_S) \
3397 V(cle_u, CLE_U) \
3398 V(add_a, ADD_A) \
3399 V(adds_a, ADDS_A) \
3400 V(adds_s, ADDS_S) \
3401 V(adds_u, ADDS_U) \
3402 V(ave_s, AVE_S) \
3403 V(ave_u, AVE_U) \
3404 V(aver_s, AVER_S) \
3405 V(aver_u, AVER_U) \
3406 V(subs_s, SUBS_S) \
3407 V(subs_u, SUBS_U) \
3408 V(subsus_u, SUBSUS_U) \
3409 V(subsuu_s, SUBSUU_S) \
3410 V(asub_s, ASUB_S) \
3411 V(asub_u, ASUB_U) \
3412 V(mulv, MULV) \
3413 V(maddv, MADDV) \
3414 V(msubv, MSUBV) \
3415 V(div_s, DIV_S_MSA) \
3416 V(div_u, DIV_U) \
3417 V(mod_s, MOD_S) \
3418 V(mod_u, MOD_U) \
3419 V(dotp_s, DOTP_S) \
3420 V(dotp_u, DOTP_U) \
3421 V(dpadd_s, DPADD_S) \
3422 V(dpadd_u, DPADD_U) \
3423 V(dpsub_s, DPSUB_S) \
3424 V(dpsub_u, DPSUB_U) \
3425 V(pckev, PCKEV) \
3426 V(pckod, PCKOD) \
3427 V(ilvl, ILVL) \
3428 V(ilvr, ILVR) \
3429 V(ilvev, ILVEV) \
3430 V(ilvod, ILVOD) \
3431 V(vshf, VSHF) \
3432 V(srar, SRAR) \
3433 V(srlr, SRLR) \
3434 V(hadd_s, HADD_S) \
3435 V(hadd_u, HADD_U) \
3436 V(hsub_s, HSUB_S) \
3437 V(hsub_u, HSUB_U)
3438
3439#define MSA_3R_FORMAT(name, opcode, format) \
3440 void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3441 MSARegister wt) { \
3442 GenInstrMsa3R<MSARegister>(opcode, MSA_3R_DF_##format, wt, ws, wd); \
3443 }
3444
3445#define MSA_3R_FORMAT_SLD_SPLAT(name, opcode, format) \
3446 void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3447 Register rt) { \
3448 GenInstrMsa3R<Register>(opcode, MSA_3R_DF_##format, rt, ws, wd); \
3449 }
3450
3451#define MSA_3R(name, opcode) \
3452 MSA_3R_FORMAT(name, opcode, b) \
3453 MSA_3R_FORMAT(name, opcode, h) \
3454 MSA_3R_FORMAT(name, opcode, w) \
3455 MSA_3R_FORMAT(name, opcode, d)
3456
3457#define MSA_3R_SLD_SPLAT(name, opcode) \
3458 MSA_3R_FORMAT_SLD_SPLAT(name, opcode, b) \
3459 MSA_3R_FORMAT_SLD_SPLAT(name, opcode, h) \
3460 MSA_3R_FORMAT_SLD_SPLAT(name, opcode, w) \
3461 MSA_3R_FORMAT_SLD_SPLAT(name, opcode, d)
3462
3463MSA_3R_LIST(MSA_3R)
3464MSA_3R_SLD_SPLAT(sld, SLD)
3465MSA_3R_SLD_SPLAT(splat, SPLAT)
3466
3467#undef MSA_3R
3468#undef MSA_3R_FORMAT
3469#undef MSA_3R_FORMAT_SLD_SPLAT
3470#undef MSA_3R_SLD_SPLAT
3471#undef MSA_3R_LIST
3472
3473#define MSA_3RF_LIST1(V) \
3474 V(fcaf, FCAF) \
3475 V(fcun, FCUN) \
3476 V(fceq, FCEQ) \
3477 V(fcueq, FCUEQ) \
3478 V(fclt, FCLT) \
3479 V(fcult, FCULT) \
3480 V(fcle, FCLE) \
3481 V(fcule, FCULE) \
3482 V(fsaf, FSAF) \
3483 V(fsun, FSUN) \
3484 V(fseq, FSEQ) \
3485 V(fsueq, FSUEQ) \
3486 V(fslt, FSLT) \
3487 V(fsult, FSULT) \
3488 V(fsle, FSLE) \
3489 V(fsule, FSULE) \
3490 V(fadd, FADD) \
3491 V(fsub, FSUB) \
3492 V(fmul, FMUL) \
3493 V(fdiv, FDIV) \
3494 V(fmadd, FMADD) \
3495 V(fmsub, FMSUB) \
3496 V(fexp2, FEXP2) \
3497 V(fmin, FMIN) \
3498 V(fmin_a, FMIN_A) \
3499 V(fmax, FMAX) \
3500 V(fmax_a, FMAX_A) \
3501 V(fcor, FCOR) \
3502 V(fcune, FCUNE) \
3503 V(fcne, FCNE) \
3504 V(fsor, FSOR) \
3505 V(fsune, FSUNE) \
3506 V(fsne, FSNE)
3507
3508#define MSA_3RF_LIST2(V) \
3509 V(fexdo, FEXDO) \
3510 V(ftq, FTQ) \
3511 V(mul_q, MUL_Q) \
3512 V(madd_q, MADD_Q) \
3513 V(msub_q, MSUB_Q) \
3514 V(mulr_q, MULR_Q) \
3515 V(maddr_q, MADDR_Q) \
3516 V(msubr_q, MSUBR_Q)
3517
3518#define MSA_3RF_FORMAT(name, opcode, df, df_c) \
3519 void Assembler::name##_##df(MSARegister wd, MSARegister ws, \
3520 MSARegister wt) { \
3521 GenInstrMsa3RF(opcode, df_c, wt, ws, wd); \
3522 }
3523
3524#define MSA_3RF_1(name, opcode) \
3525 MSA_3RF_FORMAT(name, opcode, w, 0) \
3526 MSA_3RF_FORMAT(name, opcode, d, 1)
3527
3528#define MSA_3RF_2(name, opcode) \
3529 MSA_3RF_FORMAT(name, opcode, h, 0) \
3530 MSA_3RF_FORMAT(name, opcode, w, 1)
3531
3532MSA_3RF_LIST1(MSA_3RF_1)
3533MSA_3RF_LIST2(MSA_3RF_2)
3534#undef MSA_3RF_1
3535#undef MSA_3RF_2
3536#undef MSA_3RF_FORMAT
3537#undef MSA_3RF_LIST1
3538#undef MSA_3RF_LIST2
3539
3540void Assembler::sldi_b(MSARegister wd, MSARegister ws, uint32_t n) {
3541 GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_B, n, ws, wd);
3542}
3543
3544void Assembler::sldi_h(MSARegister wd, MSARegister ws, uint32_t n) {
3545 GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_H, n, ws, wd);
3546}
3547
3548void Assembler::sldi_w(MSARegister wd, MSARegister ws, uint32_t n) {
3549 GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_W, n, ws, wd);
3550}
3551
3552void Assembler::sldi_d(MSARegister wd, MSARegister ws, uint32_t n) {
3553 GenInstrMsaElm<MSARegister, MSARegister>(SLDI, ELM_DF_D, n, ws, wd);
3554}
3555
3556void Assembler::splati_b(MSARegister wd, MSARegister ws, uint32_t n) {
3557 GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_B, n, ws, wd);
3558}
3559
3560void Assembler::splati_h(MSARegister wd, MSARegister ws, uint32_t n) {
3561 GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_H, n, ws, wd);
3562}
3563
3564void Assembler::splati_w(MSARegister wd, MSARegister ws, uint32_t n) {
3565 GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_W, n, ws, wd);
3566}
3567
3568void Assembler::splati_d(MSARegister wd, MSARegister ws, uint32_t n) {
3569 GenInstrMsaElm<MSARegister, MSARegister>(SPLATI, ELM_DF_D, n, ws, wd);
3570}
3571
3572void Assembler::copy_s_b(Register rd, MSARegister ws, uint32_t n) {
3573 GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_B, n, ws, rd);
3574}
3575
3576void Assembler::copy_s_h(Register rd, MSARegister ws, uint32_t n) {
3577 GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_H, n, ws, rd);
3578}
3579
3580void Assembler::copy_s_w(Register rd, MSARegister ws, uint32_t n) {
3581 GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_W, n, ws, rd);
3582}
3583
3584void Assembler::copy_s_d(Register rd, MSARegister ws, uint32_t n) {
3585 GenInstrMsaElm<Register, MSARegister>(COPY_S, ELM_DF_D, n, ws, rd);
3586}
3587
3588void Assembler::copy_u_b(Register rd, MSARegister ws, uint32_t n) {
3589 GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_B, n, ws, rd);
3590}
3591
3592void Assembler::copy_u_h(Register rd, MSARegister ws, uint32_t n) {
3593 GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_H, n, ws, rd);
3594}
3595
3596void Assembler::copy_u_w(Register rd, MSARegister ws, uint32_t n) {
3597 GenInstrMsaElm<Register, MSARegister>(COPY_U, ELM_DF_W, n, ws, rd);
3598}
3599
3600void Assembler::insert_b(MSARegister wd, uint32_t n, Register rs) {
3601 GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_B, n, rs, wd);
3602}
3603
3604void Assembler::insert_h(MSARegister wd, uint32_t n, Register rs) {
3605 GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_H, n, rs, wd);
3606}
3607
3608void Assembler::insert_w(MSARegister wd, uint32_t n, Register rs) {
3609 GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_W, n, rs, wd);
3610}
3611
3612void Assembler::insert_d(MSARegister wd, uint32_t n, Register rs) {
3613 GenInstrMsaElm<MSARegister, Register>(INSERT, ELM_DF_D, n, rs, wd);
3614}
3615
3616void Assembler::insve_b(MSARegister wd, uint32_t n, MSARegister ws) {
3617 GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_B, n, ws, wd);
3618}
3619
3620void Assembler::insve_h(MSARegister wd, uint32_t n, MSARegister ws) {
3621 GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_H, n, ws, wd);
3622}
3623
3624void Assembler::insve_w(MSARegister wd, uint32_t n, MSARegister ws) {
3625 GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_W, n, ws, wd);
3626}
3627
3628void Assembler::insve_d(MSARegister wd, uint32_t n, MSARegister ws) {
3629 GenInstrMsaElm<MSARegister, MSARegister>(INSVE, ELM_DF_D, n, ws, wd);
3630}
3631
3632void Assembler::move_v(MSARegister wd, MSARegister ws) {
3633 DCHECK(IsEnabled(MIPS_SIMD));
3634 DCHECK(ws.is_valid() && wd.is_valid());
3635 Instr instr = MSA | MOVE_V | (ws.code() << kWsShift) |
3636 (wd.code() << kWdShift) | MSA_ELM_MINOR;
3637 emit(instr);
3638}
3639
3640void Assembler::ctcmsa(MSAControlRegister cd, Register rs) {
3641 DCHECK(IsEnabled(MIPS_SIMD));
3642 DCHECK(cd.is_valid() && rs.is_valid());
3643 Instr instr = MSA | CTCMSA | (rs.code() << kWsShift) |
3644 (cd.code() << kWdShift) | MSA_ELM_MINOR;
3645 emit(instr);
3646}
3647
3648void Assembler::cfcmsa(Register rd, MSAControlRegister cs) {
3649 DCHECK(IsEnabled(MIPS_SIMD));
3650 DCHECK(rd.is_valid() && cs.is_valid());
3651 Instr instr = MSA | CFCMSA | (cs.code() << kWsShift) |
3652 (rd.code() << kWdShift) | MSA_ELM_MINOR;
3653 emit(instr);
3654}
3655
3656#define MSA_BIT_LIST(V) \
3657 V(slli, SLLI) \
3658 V(srai, SRAI) \
3659 V(srli, SRLI) \
3660 V(bclri, BCLRI) \
3661 V(bseti, BSETI) \
3662 V(bnegi, BNEGI) \
3663 V(binsli, BINSLI) \
3664 V(binsri, BINSRI) \
3665 V(sat_s, SAT_S) \
3666 V(sat_u, SAT_U) \
3667 V(srari, SRARI) \
3668 V(srlri, SRLRI)
3669
3670#define MSA_BIT_FORMAT(name, opcode, format) \
3671 void Assembler::name##_##format(MSARegister wd, MSARegister ws, \
3672 uint32_t m) { \
3673 GenInstrMsaBit(opcode, BIT_DF_##format, m, ws, wd); \
3674 }
3675
3676#define MSA_BIT(name, opcode) \
3677 MSA_BIT_FORMAT(name, opcode, b) \
3678 MSA_BIT_FORMAT(name, opcode, h) \
3679 MSA_BIT_FORMAT(name, opcode, w) \
3680 MSA_BIT_FORMAT(name, opcode, d)
3681
3682MSA_BIT_LIST(MSA_BIT)
3683#undef MSA_BIT
3684#undef MSA_BIT_FORMAT
3685#undef MSA_BIT_LIST
3686
3687int Assembler::RelocateInternalReference(
3688 RelocInfo::Mode rmode, Address pc, intptr_t pc_delta,
3689 WritableJitAllocation* jit_allocation) {
3690 if (RelocInfo::IsInternalReference(rmode)) {
3691 intptr_t internal_ref = ReadUnalignedValue<intptr_t>(pc);
3692 if (internal_ref == kEndOfJumpChain) {
3693 return 0; // Number of instructions patched.
3694 }
3695 internal_ref += pc_delta; // Relocate entry.
3696 if (jit_allocation) {
3697 jit_allocation->WriteUnalignedValue<intptr_t>(pc, internal_ref);
3698 } else {
3699 WriteUnalignedValue<intptr_t>(pc, internal_ref);
3700 }
3701 return 2; // Number of instructions patched.
3702 }
3703 Instr instr = instr_at(pc);
3704 DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
3705 if (IsLui(instr)) {
3706 Instr instr_lui = instr_at(pc + 0 * kInstrSize);
3707 Instr instr_ori = instr_at(pc + 1 * kInstrSize);
3708 Instr instr_ori2 = instr_at(pc + 3 * kInstrSize);
3709 DCHECK(IsOri(instr_ori));
3710 DCHECK(IsOri(instr_ori2));
3711 // TODO(plind): symbolic names for the shifts.
3712 int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
3713 imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
3714 imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
3715 // Sign extend address.
3716 imm >>= 16;
3717
3718 if (imm == kEndOfJumpChain) {
3719 return 0; // Number of instructions patched.
3720 }
3721 imm += pc_delta;
3722 DCHECK_EQ(imm & 3, 0);
3723
3724 instr_lui &= ~kImm16Mask;
3725 instr_ori &= ~kImm16Mask;
3726 instr_ori2 &= ~kImm16Mask;
3727
3728 instr_at_put(pc + 0 * kInstrSize, instr_lui | ((imm >> 32) & kImm16Mask),
3729 jit_allocation);
3730 instr_at_put(pc + 1 * kInstrSize, instr_ori | (imm >> 16 & kImm16Mask),
3731 jit_allocation);
3732 instr_at_put(pc + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask),
3733 jit_allocation);
3734 return 4; // Number of instructions patched.
3735 } else if (IsJ(instr) || IsJal(instr)) {
3736 // Regular j/jal relocation.
3737 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
3738 imm28 += pc_delta;
3739 imm28 &= kImm28Mask;
3740 instr &= ~kImm26Mask;
3741 DCHECK_EQ(imm28 & 3, 0);
3742 uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
3743 instr_at_put(pc, instr | (imm26 & kImm26Mask), jit_allocation);
3744 return 1; // Number of instructions patched.
3745 } else {
3746 DCHECK(((instr & kJumpRawMask) == kJRawMark) ||
3747 ((instr & kJumpRawMask) == kJalRawMark));
3748 // Unbox raw offset and emit j/jal.
3749 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
3750 // Sign extend 28-bit offset to 32-bit.
3751 imm28 = (imm28 << 4) >> 4;
3752 uint64_t target =
3753 static_cast<int64_t>(imm28) + reinterpret_cast<uint64_t>(pc);
3754 target &= kImm28Mask;
3755 DCHECK_EQ(imm28 & 3, 0);
3756 uint32_t imm26 = static_cast<uint32_t>(target >> 2);
3757 // Check markings whether to emit j or jal.
3758 uint32_t unbox = (instr & kJRawMark) ? J : JAL;
3759 instr_at_put(pc, unbox | (imm26 & kImm26Mask), jit_allocation);
3760 return 1; // Number of instructions patched.
3761 }
3762}
3763
3764void Assembler::GrowBuffer() {
3765 // Compute new buffer size.
3766 int old_size = buffer_->size();
3767 int new_size = std::min(2 * old_size, old_size + 1 * MB);
3768
3769 // Some internal data structures overflow for very large buffers,
3770 // they must ensure that kMaximalBufferSize is not too large.
3771 if (new_size > kMaximalBufferSize) {
3772 V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
3773 }
3774
3775 // Set up new buffer.
3776 std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
3777 DCHECK_EQ(new_size, new_buffer->size());
3778 uint8_t* new_start = new_buffer->start();
3779
3780 // Copy the data.
3781 intptr_t pc_delta = new_start - buffer_start_;
3782 intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
3783 size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
3784 MemMove(new_start, buffer_start_, pc_offset());
3785 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3786 reloc_size);
3787
3788 // Switch buffers.
3789 buffer_ = std::move(new_buffer);
3790 buffer_start_ = new_start;
3791 pc_ += pc_delta;
3792 pc_for_safepoint_ += pc_delta;
3793 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3794 reloc_info_writer.last_pc() + pc_delta);
3795
3796 // Relocate runtime entries.
3797 base::Vector<uint8_t> instructions{buffer_start_,
3798 static_cast<size_t>(pc_offset())};
3799 base::Vector<const uint8_t> reloc_info{reloc_info_writer.pos(), reloc_size};
3800 for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) {
3801 RelocInfo::Mode rmode = it.rinfo()->rmode();
3802 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
3803 RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta);
3804 }
3805 }
3806
3807 DCHECK(!overflow());
3808}
3809
3810void Assembler::db(uint8_t data) {
3811 CheckForEmitInForbiddenSlot();
3812 *reinterpret_cast<uint8_t*>(pc_) = data;
3813 pc_ += sizeof(uint8_t);
3814}
3815
3816void Assembler::dd(uint32_t data) {
3817 CheckForEmitInForbiddenSlot();
3818 *reinterpret_cast<uint32_t*>(pc_) = data;
3819 pc_ += sizeof(uint32_t);
3820}
3821
3822void Assembler::dq(uint64_t data) {
3823 CheckForEmitInForbiddenSlot();
3824 *reinterpret_cast<uint64_t*>(pc_) = data;
3825 pc_ += sizeof(uint64_t);
3826}
3827
3828void Assembler::dd(Label* label) {
3829 uint64_t data;
3830 CheckForEmitInForbiddenSlot();
3831 if (label->is_bound()) {
3832 data = reinterpret_cast<uint64_t>(buffer_start_ + label->pos());
3833 } else {
3834 data = jump_address(label);
3835 unbound_labels_count_++;
3836 internal_reference_positions_.insert(label->pos());
3837 }
3838 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3839 EmitHelper(data);
3840}
3841
3842void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3843 if (!ShouldRecordRelocInfo(rmode)) return;
3844 // We do not try to reuse pool constants.
3845 RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data);
3846 DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
3847 reloc_info_writer.Write(&rinfo);
3848}
3849
3850void Assembler::BlockTrampolinePoolFor(int instructions) {
3851 CheckTrampolinePoolQuick(instructions);
3852 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3853}
3854
3855void Assembler::CheckTrampolinePool() {
3856 // Some small sequences of instructions must not be broken up by the
3857 // insertion of a trampoline pool; such sequences are protected by setting
3858 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3859 // which are both checked here. Also, recursive calls to CheckTrampolinePool
3860 // are blocked by trampoline_pool_blocked_nesting_.
3861 if ((trampoline_pool_blocked_nesting_ > 0) ||
3862 (pc_offset() < no_trampoline_pool_before_)) {
3863 // Emission is currently blocked; make sure we try again as soon as
3864 // possible.
3865 if (trampoline_pool_blocked_nesting_ > 0) {
3866 next_buffer_check_ = pc_offset() + kInstrSize;
3867 } else {
3868 next_buffer_check_ = no_trampoline_pool_before_;
3869 }
3870 return;
3871 }
3872
3873 DCHECK(!trampoline_emitted_);
3874 DCHECK_GE(unbound_labels_count_, 0);
3875 if (unbound_labels_count_ > 0) {
3876 // First we emit jump (2 instructions), then we emit trampoline pool.
3877 {
3878 BlockTrampolinePoolScope block_trampoline_pool(this);
3879 Label after_pool;
3880 if (kArchVariant == kMips64r6) {
3881 bc(&after_pool);
3882 } else {
3883 b(&after_pool);
3884 }
3885 nop();
3886
3887 int pool_start = pc_offset();
3888 for (int i = 0; i < unbound_labels_count_; i++) {
3889 {
3890 if (kArchVariant == kMips64r6) {
3891 bc(&after_pool);
3892 nop();
3893 } else {
3894 or_(t8, ra, zero_reg);
3895 nal(); // Read PC into ra register.
3896 lui(t9, 0); // Branch delay slot.
3897 ori(t9, t9, 0);
3898 daddu(t9, ra, t9);
3899 or_(ra, t8, zero_reg);
3900 // Instruction jr will take or_ from the next trampoline.
3901 // in its branch delay slot. This is the expected behavior
3902 // in order to decrease size of trampoline pool.
3903 jr(t9);
3904 }
3905 }
3906 }
3907 nop();
3908 // If unbound_labels_count_ is big enough, label after_pool will
3909 // need a trampoline too, so we must create the trampoline before
3910 // the bind operation to make sure function 'bind' can get this
3911 // information.
3912 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
3913 bind(&after_pool);
3914
3915 trampoline_emitted_ = true;
3916 // As we are only going to emit trampoline once, we need to prevent any
3917 // further emission.
3918 next_buffer_check_ = kMaxInt;
3919 }
3920 } else {
3921 // Number of branches to unbound label at this point is zero, so we can
3922 // move next buffer check to maximum.
3923 next_buffer_check_ =
3924 pc_offset() + kMaxBranchOffset - kTrampolineSlotsSize * 16;
3925 }
3926 return;
3927}
3928
3929Address Assembler::target_address_at(Address pc) {
3930 Instr instr0 = instr_at(pc);
3931 Instr instr1 = instr_at(pc + 1 * kInstrSize);
3932 Instr instr3 = instr_at(pc + 3 * kInstrSize);
3933
3934 // Interpret 4 instructions for address generated by li: See listing in
3935 // Assembler::set_target_address_at() just below.
3936 if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
3937 (GetOpcodeField(instr3) == ORI)) {
3938 // Assemble the 48 bit value.
3939 int64_t addr =
3940 static_cast<int64_t>(((uint64_t)(GetImmediate16(instr0)) << 32) |
3941 ((uint64_t)(GetImmediate16(instr1)) << 16) |
3942 ((uint64_t)(GetImmediate16(instr3))));
3943
3944 // Sign extend to get canonical address.
3945 addr = (addr << 16) >> 16;
3946 return static_cast<Address>(addr);
3947 }
3948 // We should never get here, force a bad address if we do.
3949 UNREACHABLE();
3950}
3951
3952// On Mips64, a target address is stored in a 4-instruction sequence:
3953// 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
3954// 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
3955// 2: dsll(rd, rd, 16);
3956// 3: ori(rd, rd, j.imm32_ & kImm16Mask);
3957//
3958// Patching the address must replace all the lui & ori instructions,
3959// and flush the i-cache.
3960//
3961// There is an optimization below, which emits a nop when the address
3962// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
3963// and possibly removed.
3964void Assembler::set_target_value_at(Address pc, uint64_t target,
3965 WritableJitAllocation* jit_allocation,
3966 ICacheFlushMode icache_flush_mode) {
3967 // There is an optimization where only 4 instructions are used to load address
3968 // in code on MIP64 because only 48-bits of address is effectively used.
3969 // It relies on fact the upper [63:48] bits are not used for virtual address
3970 // translation and they have to be set according to value of bit 47 in order
3971 // get canonical address.
3972 Instr instr1 = instr_at(pc + kInstrSize);
3973 uint32_t rt_code = GetRt(instr1);
3974
3975#ifdef DEBUG
3976 // Check we have the result from a li macro-instruction.
3977 Instr instr0 = instr_at(pc);
3978 Instr instr3 = instr_at(pc + kInstrSize * 3);
3979 DCHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
3980 GetOpcodeField(instr3) == ORI));
3981#endif
3982
3983 // Must use 4 instructions to insure patchable code.
3984 // lui rt, upper-16.
3985 // ori rt, rt, lower-16.
3986 // dsll rt, rt, 16.
3987 // ori rt rt, lower-16.
3988 Instr new_instr0 =
3989 LUI | (rt_code << kRtShift) | ((target >> 32) & kImm16Mask);
3990 Instr new_instr1 = ORI | (rt_code << kRtShift) | (rt_code << kRsShift) |
3991 ((target >> 16) & kImm16Mask);
3992 Instr new_instr3 = ORI | (rt_code << kRsShift) | (rt_code << kRtShift) |
3993 (target & kImm16Mask);
3994 instr_at_put(pc, new_instr0, jit_allocation);
3995 instr_at_put(pc + kInstrSize, new_instr1, jit_allocation);
3996 instr_at_put(pc + kInstrSize * 3, new_instr3, jit_allocation);
3997
3998 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
3999 FlushInstructionCache(pc, 4 * kInstrSize);
4000 }
4001}
4002
4003LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
4004 uint8_t laneidx) {
4005 switch (rep) {
4006 case MachineRepresentation::kWord8:
4007 *this = LoadStoreLaneParams(laneidx, MSA_B, 16);
4008 break;
4009 case MachineRepresentation::kWord16:
4010 *this = LoadStoreLaneParams(laneidx, MSA_H, 8);
4011 break;
4012 case MachineRepresentation::kWord32:
4013 *this = LoadStoreLaneParams(laneidx, MSA_W, 4);
4014 break;
4015 case MachineRepresentation::kWord64:
4016 *this = LoadStoreLaneParams(laneidx, MSA_D, 2);
4017 break;
4018 default:
4019 UNREACHABLE();
4020 }
4021}
4022
4023} // namespace internal
4024} // namespace v8
4025
4026#endif // V8_TARGET_ARCH_MIPS64
#define BREAK
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
SourcePosition pos
static V8_INLINE Address target_address_at(Address pc, Address constant_pool)
static bool supports_wasm_simd_128_
static unsigned supported_
static void ProbeImpl(bool cross_compile)
V8_INLINE Operand(int32_t immediate, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
static const int kApplyMask
Definition reloc-info.h:369
uint32_t wasm_call_tag() const
static constexpr int ModeMask(Mode mode)
Definition reloc-info.h:272
Operand const offset_
Register const value_
Handle< Code > code
base::OwnedVector< uint8_t > buffer_
Definition assembler.cc:111
@ kMips64r6
@ kMips64r2
static const ArchVariants kArchVariant
Label label
#define SC(name, caption)
int32_t offset
BalanceOverflow overflow
Instruction * instr
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int pc_offset
uint32_t const mask
#define D(Name)
Definition maglev-ir.h:6426
int m
Definition mul-fft.cc:294
STL namespace.
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
signed short int16_t
Definition unicode.cc:38
uintptr_t Address
Definition memory.h:13
constexpr Register no_reg
const Instr kSwRegFpNegOffsetPattern
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
const int kFunctionShift
const Instr kPopInstruction
const int kImm5Mask
const Instr nopInstr
constexpr Opcode SW
constexpr Opcode SPECIAL
constexpr Opcode BGTZ
constexpr BarrierOption LD
bool DoubleToSmiInteger(double value, int *smi_int_value)
void PrintF(const char *format,...)
Definition utils.cc:39
constexpr Opcode SD
const int kImm18Mask
constexpr Opcode BGTZL
constexpr Opcode POP66
const int kImm18Bits
const int32_t kJalRawMark
constexpr int kPointerSize
Definition globals.h:599
constexpr Opcode LW
const int32_t kJRawMark
void FlushInstructionCache(void *start, size_t size)
const Instr kLwSwInstrArgumentMask
const int kRsFieldMask
constexpr Opcode PREF
const int kImm10Mask
int ToNumber(Register reg)
const int kFunctionFieldMask
constexpr int kImm16Mask
const Instr kLwSwOffsetMask
const int kImm9Mask
constexpr int L
const int kImm19Mask
const Instr kSwRegFpOffsetPattern
constexpr Opcode JAL
constexpr Opcode J
V8_EXPORT_PRIVATE void MemMove(void *dest, const void *src, size_t size)
Definition memcopy.h:189
constexpr Opcode REGIMM
constexpr int kImm8Mask
constexpr Opcode MSA
constexpr Opcode LUI
const int kImm19Bits
const Instr kLwRegFpNegOffsetPattern
constexpr bool is_intn(int64_t x, unsigned n)
Definition utils.h:568
constexpr Opcode POP76
constexpr Opcode TEQ
V8_EXPORT_PRIVATE FlagValues v8_flags
const int kRtFieldMask
constexpr Opcode BLEZ
Register ToRegister(int num)
constexpr Opcode POP10
constexpr Opcode BALC
const int kEndOfJumpChain
constexpr Opcode DADDIU
static unsigned CpuFeaturesImpliedByCompiler()
const int kSaFieldMask
const int kImm21Mask
constexpr uint8_t kInstrSize
const int kOpcodeMask
constexpr Opcode BLEZL
constexpr Opcode BEQL
constexpr int kMaxInt
Definition globals.h:374
constexpr Opcode BNEL
constexpr Opcode POP30
constexpr int kDoubleSize
Definition globals.h:407
const Instr kPushRegPattern
const Instr kLwRegFpOffsetPattern
const Instr kPushInstruction
const int kRdFieldMask
const Instr kPopRegPattern
const int kEndOfChain
const int kImm9Shift
constexpr Opcode BC
constexpr Opcode COP1
const Instr kLwSwInstrTypeMask
const Instr kRtMask
constexpr int kNumRegisters
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
constexpr T RoundDown(T x, intptr_t m)
Definition macros.h:371
wasm::ValueType type