v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-mips64.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <limits.h> // For LONG_MIN, LONG_MAX.
6
7#if V8_TARGET_ARCH_MIPS64
8
9#include "src/base/bits.h"
19#include "src/debug/debug.h"
26#include "src/runtime/runtime.h"
28
29// Satisfy cpplint check, but don't include platform-specific header. It is
30// included recursively via macro-assembler.h.
31#if 0
33#endif
34
35#define __ ACCESS_MASM(masm)
36
37namespace v8 {
38namespace internal {
39
40static inline bool IsZero(const Operand& rt) {
41 if (rt.is_reg()) {
42 return rt.rm() == zero_reg;
43 } else {
44 return rt.immediate() == 0;
45 }
46}
47
49 Register exclusion1,
50 Register exclusion2,
51 Register exclusion3) const {
52 int bytes = 0;
53 RegList exclusions = {exclusion1, exclusion2, exclusion3};
54 RegList list = kJSCallerSaved - exclusions;
55 bytes += list.Count() * kPointerSize;
56
57 if (fp_mode == SaveFPRegsMode::kSave) {
58 bytes += kCallerSavedFPU.Count() * kDoubleSize;
59 }
60
61 return bytes;
62}
63
64int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
65 Register exclusion2, Register exclusion3) {
66 ASM_CODE_COMMENT(this);
67 int bytes = 0;
68 RegList exclusions = {exclusion1, exclusion2, exclusion3};
69 RegList list = kJSCallerSaved - exclusions;
70 MultiPush(list);
71 bytes += list.Count() * kPointerSize;
72
73 if (fp_mode == SaveFPRegsMode::kSave) {
75 bytes += kCallerSavedFPU.Count() * kDoubleSize;
76 }
77
78 return bytes;
79}
80
81int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
82 Register exclusion2, Register exclusion3) {
83 ASM_CODE_COMMENT(this);
84 int bytes = 0;
85 if (fp_mode == SaveFPRegsMode::kSave) {
87 bytes += kCallerSavedFPU.Count() * kDoubleSize;
88 }
89
90 RegList exclusions = {exclusion1, exclusion2, exclusion3};
91 RegList list = kJSCallerSaved - exclusions;
92 MultiPop(list);
93 bytes += list.Count() * kPointerSize;
94
95 return bytes;
96}
97
100}
101
103 Condition cond, Register src1,
104 const Operand& src2) {
105 Branch(2, NegateCondition(cond), src1, src2);
107}
108
109void MacroAssembler::PushCommonFrame(Register marker_reg) {
110 if (marker_reg.is_valid()) {
111 Push(ra, fp, marker_reg);
112 Daddu(fp, sp, Operand(kPointerSize));
113 } else {
114 Push(ra, fp);
115 mov(fp, sp);
116 }
117}
118
119void MacroAssembler::PushStandardFrame(Register function_reg) {
121 if (function_reg.is_valid()) {
122 Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
123 offset += 2 * kPointerSize;
124 } else {
127 }
128 Daddu(fp, sp, Operand(offset));
129}
130
131// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
132// The register 'object' contains a heap object pointer. The heap object
133// tag is shifted away.
134void MacroAssembler::RecordWriteField(Register object, int offset,
135 Register value, Register dst,
136 RAStatus ra_status,
137 SaveFPRegsMode save_fp,
138 SmiCheck smi_check) {
139 ASM_CODE_COMMENT(this);
140 DCHECK(!AreAliased(value, dst, t8, object));
141 // First, check if a write barrier is even needed. The tests below
142 // catch stores of Smis.
143 Label done;
144
145 // Skip barrier if writing a smi.
146 if (smi_check == SmiCheck::kInline) {
147 JumpIfSmi(value, &done);
148 }
149
150 // Although the object register is tagged, the offset is relative to the start
151 // of the object, so offset must be a multiple of kPointerSize.
153
154 Daddu(dst, object, Operand(offset - kHeapObjectTag));
155 if (v8_flags.slow_debug_code) {
156 BlockTrampolinePoolScope block_trampoline_pool(this);
157 Label ok;
158 And(t8, dst, Operand(kPointerSize - 1));
159 Branch(&ok, eq, t8, Operand(zero_reg));
160 stop();
161 bind(&ok);
162 }
163
164 RecordWrite(object, dst, value, ra_status, save_fp, SmiCheck::kOmit);
165
166 bind(&done);
167
168 // Clobber clobbered input registers when running with the debug-code flag
169 // turned on to provoke errors.
170 if (v8_flags.slow_debug_code) {
171 li(value, Operand(base::bit_cast<int64_t>(kZapValue + 4)));
172 li(dst, Operand(base::bit_cast<int64_t>(kZapValue + 8)));
173 }
174}
175
177 if (registers.is_empty()) return;
179}
180
182 if (registers.is_empty()) return;
184}
185
186void MacroAssembler::CallEphemeronKeyBarrier(Register object,
187 Register slot_address,
188 SaveFPRegsMode fp_mode) {
189 ASM_CODE_COMMENT(this);
190 DCHECK(!AreAliased(object, slot_address));
194
196 Register slot_address_parameter =
198
199 Push(object);
200 Push(slot_address);
201 Pop(slot_address_parameter);
202 Pop(object_parameter);
203
206}
207
209 Register slot_address,
210 SaveFPRegsMode fp_mode,
211 StubCallMode mode) {
212 DCHECK(!AreAliased(object, slot_address));
216
218 Register slot_address_parameter =
220
221 Push(object);
222 Push(slot_address);
223 Pop(slot_address_parameter);
224 Pop(object_parameter);
225
226 CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode);
227
229}
230
231void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
232 SaveFPRegsMode fp_mode,
233 StubCallMode mode) {
234 // Use CallRecordWriteStubSaveRegisters if the object and slot registers
235 // need to be caller saved.
238#if V8_ENABLE_WEBASSEMBLY
239 if (mode == StubCallMode::kCallWasmRuntimeStub) {
240 auto wasm_target =
241 static_cast<Address>(wasm::WasmCode::GetRecordWriteBuiltin(fp_mode));
242 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
243#else
244 if (false) {
245#endif
246 } else {
248 }
249}
250
251// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
252// The register 'object' contains a heap object pointer. The heap object
253// tag is shifted away.
254void MacroAssembler::RecordWrite(Register object, Register address,
255 Register value, RAStatus ra_status,
256 SaveFPRegsMode fp_mode, SmiCheck smi_check) {
257 DCHECK(!AreAliased(object, address, value, t8));
258 DCHECK(!AreAliased(object, address, value, t9));
259
260 if (v8_flags.slow_debug_code) {
261 UseScratchRegisterScope temps(this);
262 Register scratch = temps.Acquire();
263 DCHECK(!AreAliased(object, value, scratch));
264 Ld(scratch, MemOperand(address));
265 Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
266 Operand(value));
267 }
268
269 if (v8_flags.disable_write_barriers) {
270 return;
271 }
272
273 // First, check if a write barrier is even needed. The tests below
274 // catch stores of smis and stores into the young generation.
275 Label done;
276
277 if (smi_check == SmiCheck::kInline) {
278 DCHECK_EQ(0, kSmiTag);
279 JumpIfSmi(value, &done);
280 }
281
282 CheckPageFlag(value,
283 value, // Used as scratch.
285 CheckPageFlag(object,
286 value, // Used as scratch.
288
289 // Record the actual write.
290 if (ra_status == kRAHasNotBeenSaved) {
291 push(ra);
292 }
293
295 DCHECK(!AreAliased(object, slot_address, value));
296 mov(slot_address, address);
297 CallRecordWriteStub(object, slot_address, fp_mode);
298
299 if (ra_status == kRAHasNotBeenSaved) {
300 pop(ra);
301 }
302
303 bind(&done);
304
305 // Clobber clobbered registers when running with the debug-code flag
306 // turned on to provoke errors.
307 if (v8_flags.slow_debug_code) {
308 li(address, Operand(base::bit_cast<int64_t>(kZapValue + 12)));
309 li(value, Operand(base::bit_cast<int64_t>(kZapValue + 16)));
310 li(slot_address, Operand(base::bit_cast<int64_t>(kZapValue + 20)));
311 }
312}
313
314// ---------------------------------------------------------------------------
315// Instruction macros.
316
317void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
318 if (rt.is_reg()) {
319 addu(rd, rs, rt.rm());
320 } else {
321 if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
322 addiu(rd, rs, static_cast<int32_t>(rt.immediate()));
323 } else {
324 // li handles the relocation.
325 UseScratchRegisterScope temps(this);
326 Register scratch = temps.Acquire();
327 DCHECK(rs != scratch);
328 li(scratch, rt);
329 addu(rd, rs, scratch);
330 }
331 }
332}
333
334void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
335 if (rt.is_reg()) {
336 daddu(rd, rs, rt.rm());
337 } else {
338 if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
339 daddiu(rd, rs, static_cast<int32_t>(rt.immediate()));
340 } else {
341 // li handles the relocation.
342 UseScratchRegisterScope temps(this);
343 Register scratch = temps.Acquire();
344 DCHECK(rs != scratch);
345 li(scratch, rt);
346 daddu(rd, rs, scratch);
347 }
348 }
349}
350
351void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
352 if (rt.is_reg()) {
353 subu(rd, rs, rt.rm());
354 } else {
355 DCHECK(is_int32(rt.immediate()));
356 if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) {
357 addiu(rd, rs,
358 static_cast<int32_t>(
359 -rt.immediate())); // No subiu instr, use addiu(x, y, -imm).
360 } else {
361 UseScratchRegisterScope temps(this);
362 Register scratch = temps.Acquire();
363 DCHECK(rs != scratch);
364 if (-rt.immediate() >> 16 == 0 && !MustUseReg(rt.rmode())) {
365 // Use load -imm and addu when loading -imm generates one instruction.
366 li(scratch, -rt.immediate());
367 addu(rd, rs, scratch);
368 } else {
369 // li handles the relocation.
370 li(scratch, rt);
371 subu(rd, rs, scratch);
372 }
373 }
374 }
375}
376
377void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
378 if (rt.is_reg()) {
379 dsubu(rd, rs, rt.rm());
380 } else if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) {
381 daddiu(rd, rs,
382 static_cast<int32_t>(
383 -rt.immediate())); // No dsubiu instr, use daddiu(x, y, -imm).
384 } else {
385 DCHECK(rs != at);
386 int li_count = InstrCountForLi64Bit(rt.immediate());
387 int li_neg_count = InstrCountForLi64Bit(-rt.immediate());
388 if (li_neg_count < li_count && !MustUseReg(rt.rmode())) {
389 // Use load -imm and daddu when loading -imm generates one instruction.
390 DCHECK(rt.immediate() != std::numeric_limits<int32_t>::min());
391 UseScratchRegisterScope temps(this);
392 Register scratch = temps.Acquire();
393 li(scratch, Operand(-rt.immediate()));
394 Daddu(rd, rs, scratch);
395 } else {
396 // li handles the relocation.
397 UseScratchRegisterScope temps(this);
398 Register scratch = temps.Acquire();
399 li(scratch, rt);
400 dsubu(rd, rs, scratch);
401 }
402 }
403}
404
405void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
406 if (rt.is_reg()) {
407 mul(rd, rs, rt.rm());
408 } else {
409 // li handles the relocation.
410 UseScratchRegisterScope temps(this);
411 Register scratch = temps.Acquire();
412 DCHECK(rs != scratch);
413 li(scratch, rt);
414 mul(rd, rs, scratch);
415 }
416}
417
418void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
419 if (rt.is_reg()) {
420 if (kArchVariant != kMips64r6) {
421 mult(rs, rt.rm());
422 mfhi(rd);
423 } else {
424 muh(rd, rs, rt.rm());
425 }
426 } else {
427 // li handles the relocation.
428 UseScratchRegisterScope temps(this);
429 Register scratch = temps.Acquire();
430 DCHECK(rs != scratch);
431 li(scratch, rt);
432 if (kArchVariant != kMips64r6) {
433 mult(rs, scratch);
434 mfhi(rd);
435 } else {
436 muh(rd, rs, scratch);
437 }
438 }
439}
440
441void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
442 if (rt.is_reg()) {
443 if (kArchVariant != kMips64r6) {
444 multu(rs, rt.rm());
445 mfhi(rd);
446 } else {
447 muhu(rd, rs, rt.rm());
448 }
449 } else {
450 // li handles the relocation.
451 UseScratchRegisterScope temps(this);
452 Register scratch = temps.Acquire();
453 DCHECK(rs != scratch);
454 li(scratch, rt);
455 if (kArchVariant != kMips64r6) {
456 multu(rs, scratch);
457 mfhi(rd);
458 } else {
459 muhu(rd, rs, scratch);
460 }
461 }
462}
463
464void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
465 if (rt.is_reg()) {
466 if (kArchVariant == kMips64r6) {
467 dmul(rd, rs, rt.rm());
468 } else {
469 dmult(rs, rt.rm());
470 mflo(rd);
471 }
472 } else {
473 // li handles the relocation.
474 UseScratchRegisterScope temps(this);
475 Register scratch = temps.Acquire();
476 DCHECK(rs != scratch);
477 li(scratch, rt);
478 if (kArchVariant == kMips64r6) {
479 dmul(rd, rs, scratch);
480 } else {
481 dmult(rs, scratch);
482 mflo(rd);
483 }
484 }
485}
486
487void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
488 if (rt.is_reg()) {
489 if (kArchVariant == kMips64r6) {
490 dmuh(rd, rs, rt.rm());
491 } else {
492 dmult(rs, rt.rm());
493 mfhi(rd);
494 }
495 } else {
496 // li handles the relocation.
497 UseScratchRegisterScope temps(this);
498 Register scratch = temps.Acquire();
499 DCHECK(rs != scratch);
500 li(scratch, rt);
501 if (kArchVariant == kMips64r6) {
502 dmuh(rd, rs, scratch);
503 } else {
504 dmult(rs, scratch);
505 mfhi(rd);
506 }
507 }
508}
509
510void MacroAssembler::Dmulhu(Register rd, Register rs, const Operand& rt) {
511 if (rt.is_reg()) {
512 if (kArchVariant == kMips64r6) {
513 dmuhu(rd, rs, rt.rm());
514 } else {
515 dmultu(rs, rt.rm());
516 mfhi(rd);
517 }
518 } else {
519 // li handles the relocation.
520 UseScratchRegisterScope temps(this);
521 Register scratch = temps.Acquire();
522 DCHECK(rs != scratch);
523 li(scratch, rt);
524 if (kArchVariant == kMips64r6) {
525 dmuhu(rd, rs, scratch);
526 } else {
527 dmultu(rs, scratch);
528 mfhi(rd);
529 }
530 }
531}
532
533void MacroAssembler::Mult(Register rs, const Operand& rt) {
534 if (rt.is_reg()) {
535 mult(rs, rt.rm());
536 } else {
537 // li handles the relocation.
538 UseScratchRegisterScope temps(this);
539 Register scratch = temps.Acquire();
540 DCHECK(rs != scratch);
541 li(scratch, rt);
542 mult(rs, scratch);
543 }
544}
545
546void MacroAssembler::Dmult(Register rs, const Operand& rt) {
547 if (rt.is_reg()) {
548 dmult(rs, rt.rm());
549 } else {
550 // li handles the relocation.
551 UseScratchRegisterScope temps(this);
552 Register scratch = temps.Acquire();
553 DCHECK(rs != scratch);
554 li(scratch, rt);
555 dmult(rs, scratch);
556 }
557}
558
559void MacroAssembler::Multu(Register rs, const Operand& rt) {
560 if (rt.is_reg()) {
561 multu(rs, rt.rm());
562 } else {
563 // li handles the relocation.
564 UseScratchRegisterScope temps(this);
565 Register scratch = temps.Acquire();
566 DCHECK(rs != scratch);
567 li(scratch, rt);
568 multu(rs, scratch);
569 }
570}
571
572void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
573 if (rt.is_reg()) {
574 dmultu(rs, rt.rm());
575 } else {
576 // li handles the relocation.
577 UseScratchRegisterScope temps(this);
578 Register scratch = temps.Acquire();
579 DCHECK(rs != scratch);
580 li(scratch, rt);
581 dmultu(rs, scratch);
582 }
583}
584
585void MacroAssembler::Div(Register rs, const Operand& rt) {
586 if (rt.is_reg()) {
587 div(rs, rt.rm());
588 } else {
589 // li handles the relocation.
590 UseScratchRegisterScope temps(this);
591 Register scratch = temps.Acquire();
592 DCHECK(rs != scratch);
593 li(scratch, rt);
594 div(rs, scratch);
595 }
596}
597
598void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
599 if (rt.is_reg()) {
600 if (kArchVariant != kMips64r6) {
601 div(rs, rt.rm());
602 mflo(res);
603 } else {
604 div(res, rs, rt.rm());
605 }
606 } else {
607 // li handles the relocation.
608 UseScratchRegisterScope temps(this);
609 Register scratch = temps.Acquire();
610 DCHECK(rs != scratch);
611 li(scratch, rt);
612 if (kArchVariant != kMips64r6) {
613 div(rs, scratch);
614 mflo(res);
615 } else {
616 div(res, rs, scratch);
617 }
618 }
619}
620
621void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
622 if (rt.is_reg()) {
623 if (kArchVariant != kMips64r6) {
624 div(rs, rt.rm());
625 mfhi(rd);
626 } else {
627 mod(rd, rs, rt.rm());
628 }
629 } else {
630 // li handles the relocation.
631 UseScratchRegisterScope temps(this);
632 Register scratch = temps.Acquire();
633 DCHECK(rs != scratch);
634 li(scratch, rt);
635 if (kArchVariant != kMips64r6) {
636 div(rs, scratch);
637 mfhi(rd);
638 } else {
639 mod(rd, rs, scratch);
640 }
641 }
642}
643
644void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
645 if (rt.is_reg()) {
646 if (kArchVariant != kMips64r6) {
647 divu(rs, rt.rm());
648 mfhi(rd);
649 } else {
650 modu(rd, rs, rt.rm());
651 }
652 } else {
653 // li handles the relocation.
654 UseScratchRegisterScope temps(this);
655 Register scratch = temps.Acquire();
656 DCHECK(rs != scratch);
657 li(scratch, rt);
658 if (kArchVariant != kMips64r6) {
659 divu(rs, scratch);
660 mfhi(rd);
661 } else {
662 modu(rd, rs, scratch);
663 }
664 }
665}
666
667void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
668 if (rt.is_reg()) {
669 ddiv(rs, rt.rm());
670 } else {
671 // li handles the relocation.
672 UseScratchRegisterScope temps(this);
673 Register scratch = temps.Acquire();
674 DCHECK(rs != scratch);
675 li(scratch, rt);
676 ddiv(rs, scratch);
677 }
678}
679
680void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
681 if (kArchVariant != kMips64r6) {
682 if (rt.is_reg()) {
683 ddiv(rs, rt.rm());
684 mflo(rd);
685 } else {
686 // li handles the relocation.
687 UseScratchRegisterScope temps(this);
688 Register scratch = temps.Acquire();
689 DCHECK(rs != scratch);
690 li(scratch, rt);
691 ddiv(rs, scratch);
692 mflo(rd);
693 }
694 } else {
695 if (rt.is_reg()) {
696 ddiv(rd, rs, rt.rm());
697 } else {
698 // li handles the relocation.
699 UseScratchRegisterScope temps(this);
700 Register scratch = temps.Acquire();
701 DCHECK(rs != scratch);
702 li(scratch, rt);
703 ddiv(rd, rs, scratch);
704 }
705 }
706}
707
708void MacroAssembler::Divu(Register rs, const Operand& rt) {
709 if (rt.is_reg()) {
710 divu(rs, rt.rm());
711 } else {
712 // li handles the relocation.
713 UseScratchRegisterScope temps(this);
714 Register scratch = temps.Acquire();
715 DCHECK(rs != scratch);
716 li(scratch, rt);
717 divu(rs, scratch);
718 }
719}
720
721void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
722 if (rt.is_reg()) {
723 if (kArchVariant != kMips64r6) {
724 divu(rs, rt.rm());
725 mflo(res);
726 } else {
727 divu(res, rs, rt.rm());
728 }
729 } else {
730 // li handles the relocation.
731 UseScratchRegisterScope temps(this);
732 Register scratch = temps.Acquire();
733 DCHECK(rs != scratch);
734 li(scratch, rt);
735 if (kArchVariant != kMips64r6) {
736 divu(rs, scratch);
737 mflo(res);
738 } else {
739 divu(res, rs, scratch);
740 }
741 }
742}
743
744void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
745 if (rt.is_reg()) {
746 ddivu(rs, rt.rm());
747 } else {
748 // li handles the relocation.
749 UseScratchRegisterScope temps(this);
750 Register scratch = temps.Acquire();
751 DCHECK(rs != scratch);
752 li(scratch, rt);
753 ddivu(rs, scratch);
754 }
755}
756
757void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
758 if (rt.is_reg()) {
759 if (kArchVariant != kMips64r6) {
760 ddivu(rs, rt.rm());
761 mflo(res);
762 } else {
763 ddivu(res, rs, rt.rm());
764 }
765 } else {
766 // li handles the relocation.
767 UseScratchRegisterScope temps(this);
768 Register scratch = temps.Acquire();
769 DCHECK(rs != scratch);
770 li(scratch, rt);
771 if (kArchVariant != kMips64r6) {
772 ddivu(rs, scratch);
773 mflo(res);
774 } else {
775 ddivu(res, rs, scratch);
776 }
777 }
778}
779
780void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
781 if (kArchVariant != kMips64r6) {
782 if (rt.is_reg()) {
783 ddiv(rs, rt.rm());
784 mfhi(rd);
785 } else {
786 // li handles the relocation.
787 UseScratchRegisterScope temps(this);
788 Register scratch = temps.Acquire();
789 DCHECK(rs != scratch);
790 li(scratch, rt);
791 ddiv(rs, scratch);
792 mfhi(rd);
793 }
794 } else {
795 if (rt.is_reg()) {
796 dmod(rd, rs, rt.rm());
797 } else {
798 // li handles the relocation.
799 UseScratchRegisterScope temps(this);
800 Register scratch = temps.Acquire();
801 DCHECK(rs != scratch);
802 li(scratch, rt);
803 dmod(rd, rs, scratch);
804 }
805 }
806}
807
808void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
809 if (kArchVariant != kMips64r6) {
810 if (rt.is_reg()) {
811 ddivu(rs, rt.rm());
812 mfhi(rd);
813 } else {
814 // li handles the relocation.
815 UseScratchRegisterScope temps(this);
816 Register scratch = temps.Acquire();
817 DCHECK(rs != scratch);
818 li(scratch, rt);
819 ddivu(rs, scratch);
820 mfhi(rd);
821 }
822 } else {
823 if (rt.is_reg()) {
824 dmodu(rd, rs, rt.rm());
825 } else {
826 // li handles the relocation.
827 UseScratchRegisterScope temps(this);
828 Register scratch = temps.Acquire();
829 DCHECK(rs != scratch);
830 li(scratch, rt);
831 dmodu(rd, rs, scratch);
832 }
833 }
834}
835
836void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
837 if (rt.is_reg()) {
838 and_(rd, rs, rt.rm());
839 } else {
840 if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
841 andi(rd, rs, static_cast<int32_t>(rt.immediate()));
842 } else {
843 // li handles the relocation.
844 UseScratchRegisterScope temps(this);
845 Register scratch = temps.Acquire();
846 DCHECK(rs != scratch);
847 li(scratch, rt);
848 and_(rd, rs, scratch);
849 }
850 }
851}
852
853void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
854 if (rt.is_reg()) {
855 or_(rd, rs, rt.rm());
856 } else {
857 if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
858 ori(rd, rs, static_cast<int32_t>(rt.immediate()));
859 } else {
860 // li handles the relocation.
861 UseScratchRegisterScope temps(this);
862 Register scratch = temps.Acquire();
863 DCHECK(rs != scratch);
864 li(scratch, rt);
865 or_(rd, rs, scratch);
866 }
867 }
868}
869
870void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
871 if (rt.is_reg()) {
872 xor_(rd, rs, rt.rm());
873 } else {
874 if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
875 xori(rd, rs, static_cast<int32_t>(rt.immediate()));
876 } else {
877 // li handles the relocation.
878 UseScratchRegisterScope temps(this);
879 Register scratch = temps.Acquire();
880 DCHECK(rs != scratch);
881 li(scratch, rt);
882 xor_(rd, rs, scratch);
883 }
884 }
885}
886
887void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
888 if (rt.is_reg()) {
889 nor(rd, rs, rt.rm());
890 } else {
891 // li handles the relocation.
892 UseScratchRegisterScope temps(this);
893 Register scratch = temps.Acquire();
894 DCHECK(rs != scratch);
895 li(scratch, rt);
896 nor(rd, rs, scratch);
897 }
898}
899
900void MacroAssembler::Neg(Register rs, const Operand& rt) {
901 dsubu(rs, zero_reg, rt.rm());
902}
903
904void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
905 if (rt.is_reg()) {
906 slt(rd, rs, rt.rm());
907 } else {
908 if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
909 slti(rd, rs, static_cast<int32_t>(rt.immediate()));
910 } else {
911 // li handles the relocation.
912 UseScratchRegisterScope temps(this);
913 BlockTrampolinePoolScope block_trampoline_pool(this);
914 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
915 DCHECK(rs != scratch);
916 li(scratch, rt);
917 slt(rd, rs, scratch);
918 }
919 }
920}
921
922void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
923 if (rt.is_reg()) {
924 sltu(rd, rs, rt.rm());
925 } else {
926 const uint64_t int16_min = std::numeric_limits<int16_t>::min();
927 if (is_uint15(rt.immediate()) && !MustUseReg(rt.rmode())) {
928 // Imm range is: [0, 32767].
929 sltiu(rd, rs, static_cast<int32_t>(rt.immediate()));
930 } else if (is_uint15(rt.immediate() - int16_min) &&
931 !MustUseReg(rt.rmode())) {
932 // Imm range is: [max_unsigned-32767,max_unsigned].
933 sltiu(rd, rs, static_cast<uint16_t>(rt.immediate()));
934 } else {
935 // li handles the relocation.
936 UseScratchRegisterScope temps(this);
937 BlockTrampolinePoolScope block_trampoline_pool(this);
938 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
939 DCHECK(rs != scratch);
940 li(scratch, rt);
941 sltu(rd, rs, scratch);
942 }
943 }
944}
945
946void MacroAssembler::Sle(Register rd, Register rs, const Operand& rt) {
947 if (rt.is_reg()) {
948 slt(rd, rt.rm(), rs);
949 } else {
950 if (rt.immediate() == 0 && !MustUseReg(rt.rmode())) {
951 slt(rd, zero_reg, rs);
952 } else {
953 // li handles the relocation.
954 UseScratchRegisterScope temps(this);
955 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
956 BlockTrampolinePoolScope block_trampoline_pool(this);
957 DCHECK(rs != scratch);
958 li(scratch, rt);
959 slt(rd, scratch, rs);
960 }
961 }
962 xori(rd, rd, 1);
963}
964
965void MacroAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
966 if (rt.is_reg()) {
967 sltu(rd, rt.rm(), rs);
968 } else {
969 if (rt.immediate() == 0 && !MustUseReg(rt.rmode())) {
970 sltu(rd, zero_reg, rs);
971 } else {
972 // li handles the relocation.
973 UseScratchRegisterScope temps(this);
974 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
975 BlockTrampolinePoolScope block_trampoline_pool(this);
976 DCHECK(rs != scratch);
977 li(scratch, rt);
978 sltu(rd, scratch, rs);
979 }
980 }
981 xori(rd, rd, 1);
982}
983
984void MacroAssembler::Sge(Register rd, Register rs, const Operand& rt) {
985 Slt(rd, rs, rt);
986 xori(rd, rd, 1);
987}
988
989void MacroAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
990 Sltu(rd, rs, rt);
991 xori(rd, rd, 1);
992}
993
994void MacroAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
995 if (rt.is_reg()) {
996 slt(rd, rt.rm(), rs);
997 } else {
998 if (rt.immediate() == 0 && !MustUseReg(rt.rmode())) {
999 slt(rd, zero_reg, rs);
1000 } else {
1001 // li handles the relocation.
1002 UseScratchRegisterScope temps(this);
1003 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
1004 BlockTrampolinePoolScope block_trampoline_pool(this);
1005 DCHECK(rs != scratch);
1006 li(scratch, rt);
1007 slt(rd, scratch, rs);
1008 }
1009 }
1010}
1011
1012void MacroAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
1013 if (rt.is_reg()) {
1014 sltu(rd, rt.rm(), rs);
1015 } else {
1016 if (rt.immediate() == 0 && !MustUseReg(rt.rmode())) {
1017 sltu(rd, zero_reg, rs);
1018 } else {
1019 // li handles the relocation.
1020 UseScratchRegisterScope temps(this);
1021 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
1022 BlockTrampolinePoolScope block_trampoline_pool(this);
1023 DCHECK(rs != scratch);
1024 li(scratch, rt);
1025 sltu(rd, scratch, rs);
1026 }
1027 }
1028}
1029
1030void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1031 if (rt.is_reg()) {
1032 rotrv(rd, rs, rt.rm());
1033 } else {
1034 int64_t ror_value = rt.immediate() % 32;
1035 if (ror_value < 0) {
1036 ror_value += 32;
1037 }
1038 rotr(rd, rs, ror_value);
1039 }
1040}
1041
1042void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1043 if (rt.is_reg()) {
1044 drotrv(rd, rs, rt.rm());
1045 } else {
1046 int64_t dror_value = rt.immediate() % 64;
1047 if (dror_value < 0) dror_value += 64;
1048 if (dror_value <= 31) {
1049 drotr(rd, rs, dror_value);
1050 } else {
1051 drotr32(rd, rs, dror_value - 32);
1052 }
1053 }
1054}
1055
1056void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1057 pref(hint, rs);
1058}
1059
1060void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1061 Register scratch) {
1062 DCHECK(sa >= 1 && sa <= 31);
1063 if (kArchVariant == kMips64r6 && sa <= 4) {
1064 lsa(rd, rt, rs, sa - 1);
1065 } else {
1066 Register tmp = rd == rt ? scratch : rd;
1067 DCHECK(tmp != rt);
1068 sll(tmp, rs, sa);
1069 Addu(rd, rt, tmp);
1070 }
1071}
1072
1073void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
1074 Register scratch) {
1075 DCHECK(sa >= 1 && sa <= 63);
1076 if (kArchVariant == kMips64r6 && sa <= 4) {
1077 dlsa(rd, rt, rs, sa - 1);
1078 } else {
1079 Register tmp = rd == rt ? scratch : rd;
1080 DCHECK(tmp != rt);
1081 if (sa <= 31)
1082 dsll(tmp, rs, sa);
1083 else
1084 dsll32(tmp, rs, sa - 32);
1085 Daddu(rd, rt, tmp);
1086 }
1087}
1088
1089void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
1090 if (is_trampoline_emitted()) {
1091 Label skip;
1092 bnvc(rs, rt, &skip);
1094 bind(&skip);
1095 } else {
1096 bovc(rs, rt, L);
1097 }
1098}
1099
1100void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
1101 if (is_trampoline_emitted()) {
1102 Label skip;
1103 bovc(rs, rt, &skip);
1105 bind(&skip);
1106 } else {
1107 bnvc(rs, rt, L);
1108 }
1109}
1110
1111// ------------Pseudo-instructions-------------
1112
1113// Change endianness
1114void MacroAssembler::ByteSwapSigned(Register dest, Register src,
1115 int operand_size) {
1116 DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8);
1118 if (operand_size == 2) {
1119 wsbh(dest, src);
1120 seh(dest, dest);
1121 } else if (operand_size == 4) {
1122 wsbh(dest, src);
1123 rotr(dest, dest, 16);
1124 } else {
1125 dsbh(dest, src);
1126 dshd(dest, dest);
1127 }
1128}
1129
1130void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
1131 int operand_size) {
1132 DCHECK(operand_size == 2 || operand_size == 4);
1133 if (operand_size == 2) {
1134 wsbh(dest, src);
1135 andi(dest, dest, 0xFFFF);
1136 } else {
1137 wsbh(dest, src);
1138 rotr(dest, dest, 16);
1139 dinsu_(dest, zero_reg, 32, 32);
1140 }
1141}
1142
1143void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1144 DCHECK(rd != at);
1145 DCHECK(rs.rm() != at);
1146 if (kArchVariant == kMips64r6) {
1147 Lw(rd, rs);
1148 } else {
1150 DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
1151 MemOperand source = rs;
1152 // Adjust offset for two accesses and check if offset + 3 fits into int16_t.
1154 if (rd != source.rm()) {
1155 lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
1156 lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
1157 } else {
1158 UseScratchRegisterScope temps(this);
1159 Register scratch = temps.Acquire();
1160 lwr(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1161 lwl(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1162 mov(rd, scratch);
1163 }
1164 }
1165}
1166
1167void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
1168 if (kArchVariant == kMips64r6) {
1169 Lwu(rd, rs);
1170 } else {
1172 Ulw(rd, rs);
1173 Dext(rd, rd, 0, 32);
1174 }
1175}
1176
1177void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1178 DCHECK(rd != at);
1179 DCHECK(rs.rm() != at);
1180 DCHECK(rd != rs.rm());
1181 if (kArchVariant == kMips64r6) {
1182 Sw(rd, rs);
1183 } else {
1185 DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
1186 MemOperand source = rs;
1187 // Adjust offset for two accesses and check if offset + 3 fits into int16_t.
1189 swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset));
1190 swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset));
1191 }
1192}
1193
1194void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
1195 DCHECK(rd != at);
1196 DCHECK(rs.rm() != at);
1197 if (kArchVariant == kMips64r6) {
1198 Lh(rd, rs);
1199 } else {
1201 MemOperand source = rs;
1202 // Adjust offset for two accesses and check if offset + 1 fits into int16_t.
1204 UseScratchRegisterScope temps(this);
1205 Register scratch = temps.Acquire();
1206 if (source.rm() == scratch) {
1207#if defined(V8_TARGET_LITTLE_ENDIAN)
1208 Lb(rd, MemOperand(source.rm(), source.offset() + 1));
1209 Lbu(scratch, source);
1210#elif defined(V8_TARGET_BIG_ENDIAN)
1211 Lb(rd, source);
1212 Lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1213#endif
1214 } else {
1215#if defined(V8_TARGET_LITTLE_ENDIAN)
1216 Lbu(scratch, source);
1217 Lb(rd, MemOperand(source.rm(), source.offset() + 1));
1218#elif defined(V8_TARGET_BIG_ENDIAN)
1219 Lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1220 Lb(rd, source);
1221#endif
1222 }
1223 dsll(rd, rd, 8);
1224 or_(rd, rd, scratch);
1225 }
1226}
1227
1228void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
1229 DCHECK(rd != at);
1230 DCHECK(rs.rm() != at);
1231 if (kArchVariant == kMips64r6) {
1232 Lhu(rd, rs);
1233 } else {
1235 MemOperand source = rs;
1236 // Adjust offset for two accesses and check if offset + 1 fits into int16_t.
1238 UseScratchRegisterScope temps(this);
1239 Register scratch = temps.Acquire();
1240 if (source.rm() == scratch) {
1241#if defined(V8_TARGET_LITTLE_ENDIAN)
1242 Lbu(rd, MemOperand(source.rm(), source.offset() + 1));
1243 Lbu(scratch, source);
1244#elif defined(V8_TARGET_BIG_ENDIAN)
1245 Lbu(rd, source);
1246 Lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1247#endif
1248 } else {
1249#if defined(V8_TARGET_LITTLE_ENDIAN)
1250 Lbu(scratch, source);
1251 Lbu(rd, MemOperand(source.rm(), source.offset() + 1));
1252#elif defined(V8_TARGET_BIG_ENDIAN)
1253 Lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1254 Lbu(rd, source);
1255#endif
1256 }
1257 dsll(rd, rd, 8);
1258 or_(rd, rd, scratch);
1259 }
1260}
1261
1262void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
1263 DCHECK(rd != at);
1264 DCHECK(rs.rm() != at);
1265 DCHECK(rs.rm() != scratch);
1266 DCHECK(scratch != at);
1267 if (kArchVariant == kMips64r6) {
1268 Sh(rd, rs);
1269 } else {
1271 MemOperand source = rs;
1272 // Adjust offset for two accesses and check if offset + 1 fits into int16_t.
1274
1275 if (scratch != rd) {
1276 mov(scratch, rd);
1277 }
1278
1279#if defined(V8_TARGET_LITTLE_ENDIAN)
1280 Sb(scratch, source);
1281 srl(scratch, scratch, 8);
1282 Sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1283#elif defined(V8_TARGET_BIG_ENDIAN)
1284 Sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1285 srl(scratch, scratch, 8);
1286 Sb(scratch, source);
1287#endif
1288 }
1289}
1290
1291void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
1292 DCHECK(rd != at);
1293 DCHECK(rs.rm() != at);
1294 if (kArchVariant == kMips64r6) {
1295 Ld(rd, rs);
1296 } else {
1298 DCHECK(kMipsLdrOffset <= 7 && kMipsLdlOffset <= 7);
1299 MemOperand source = rs;
1300 // Adjust offset for two accesses and check if offset + 7 fits into int16_t.
1302 if (rd != source.rm()) {
1303 ldr(rd, MemOperand(source.rm(), source.offset() + kMipsLdrOffset));
1304 ldl(rd, MemOperand(source.rm(), source.offset() + kMipsLdlOffset));
1305 } else {
1306 UseScratchRegisterScope temps(this);
1307 Register scratch = temps.Acquire();
1308 ldr(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1309 ldl(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1310 mov(rd, scratch);
1311 }
1312 }
1313}
1314
1315// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
1316// bits,
1317// second word in high bits.
1318void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
1319 Register scratch) {
1320 Lwu(rd, rs);
1321 Lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1322 dsll32(scratch, scratch, 0);
1323 Daddu(rd, rd, scratch);
1324}
1325
1326void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
1327 DCHECK(rd != at);
1328 DCHECK(rs.rm() != at);
1329 if (kArchVariant == kMips64r6) {
1330 Sd(rd, rs);
1331 } else {
1333 DCHECK(kMipsSdrOffset <= 7 && kMipsSdlOffset <= 7);
1334 MemOperand source = rs;
1335 // Adjust offset for two accesses and check if offset + 7 fits into int16_t.
1337 sdr(rd, MemOperand(source.rm(), source.offset() + kMipsSdrOffset));
1338 sdl(rd, MemOperand(source.rm(), source.offset() + kMipsSdlOffset));
1339 }
1340}
1341
1342// Do 64-bit store as two consequent 32-bit stores to unaligned address.
1343void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
1344 Register scratch) {
1345 Sw(rd, rs);
1346 dsrl32(scratch, rd, 0);
1347 Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1348}
1349
1350void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
1351 Register scratch) {
1352 if (kArchVariant == kMips64r6) {
1353 Lwc1(fd, rs);
1354 } else {
1356 Ulw(scratch, rs);
1357 mtc1(scratch, fd);
1358 }
1359}
1360
1361void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
1362 Register scratch) {
1363 if (kArchVariant == kMips64r6) {
1364 Swc1(fd, rs);
1365 } else {
1367 mfc1(scratch, fd);
1368 Usw(scratch, rs);
1369 }
1370}
1371
1372void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
1373 Register scratch) {
1374 DCHECK(scratch != at);
1375 if (kArchVariant == kMips64r6) {
1376 Ldc1(fd, rs);
1377 } else {
1379 Uld(scratch, rs);
1380 dmtc1(scratch, fd);
1381 }
1382}
1383
1384void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
1385 Register scratch) {
1386 DCHECK(scratch != at);
1387 if (kArchVariant == kMips64r6) {
1388 Sdc1(fd, rs);
1389 } else {
1391 dmfc1(scratch, fd);
1392 Usd(scratch, rs);
1393 }
1394}
1395
1396void MacroAssembler::Lb(Register rd, const MemOperand& rs) {
1397 MemOperand source = rs;
1398 AdjustBaseAndOffset(&source);
1399 lb(rd, source);
1400}
1401
1402void MacroAssembler::Lbu(Register rd, const MemOperand& rs) {
1403 MemOperand source = rs;
1404 AdjustBaseAndOffset(&source);
1405 lbu(rd, source);
1406}
1407
1408void MacroAssembler::Sb(Register rd, const MemOperand& rs) {
1409 MemOperand source = rs;
1410 AdjustBaseAndOffset(&source);
1411 sb(rd, source);
1412}
1413
1414void MacroAssembler::Lh(Register rd, const MemOperand& rs) {
1415 MemOperand source = rs;
1416 AdjustBaseAndOffset(&source);
1417 lh(rd, source);
1418}
1419
1420void MacroAssembler::Lhu(Register rd, const MemOperand& rs) {
1421 MemOperand source = rs;
1422 AdjustBaseAndOffset(&source);
1423 lhu(rd, source);
1424}
1425
1426void MacroAssembler::Sh(Register rd, const MemOperand& rs) {
1427 MemOperand source = rs;
1428 AdjustBaseAndOffset(&source);
1429 sh(rd, source);
1430}
1431
1432void MacroAssembler::Lw(Register rd, const MemOperand& rs) {
1433 MemOperand source = rs;
1434 AdjustBaseAndOffset(&source);
1435 lw(rd, source);
1436}
1437
1438void MacroAssembler::Lwu(Register rd, const MemOperand& rs) {
1439 MemOperand source = rs;
1440 AdjustBaseAndOffset(&source);
1441 lwu(rd, source);
1442}
1443
1444void MacroAssembler::Sw(Register rd, const MemOperand& rs) {
1445 MemOperand source = rs;
1446 AdjustBaseAndOffset(&source);
1447 sw(rd, source);
1448}
1449
1450void MacroAssembler::Ld(Register rd, const MemOperand& rs) {
1451 MemOperand source = rs;
1452 AdjustBaseAndOffset(&source);
1453 ld(rd, source);
1454}
1455
1456void MacroAssembler::Sd(Register rd, const MemOperand& rs) {
1457 MemOperand source = rs;
1458 AdjustBaseAndOffset(&source);
1459 sd(rd, source);
1460}
1461
1462void MacroAssembler::Lwc1(FPURegister fd, const MemOperand& src) {
1463 MemOperand tmp = src;
1464 AdjustBaseAndOffset(&tmp);
1465 lwc1(fd, tmp);
1466}
1467
1468void MacroAssembler::Swc1(FPURegister fs, const MemOperand& src) {
1469 MemOperand tmp = src;
1470 AdjustBaseAndOffset(&tmp);
1471 swc1(fs, tmp);
1472}
1473
1474void MacroAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
1475 MemOperand tmp = src;
1476 AdjustBaseAndOffset(&tmp);
1477 ldc1(fd, tmp);
1478}
1479
1480void MacroAssembler::Sdc1(FPURegister fs, const MemOperand& src) {
1481 MemOperand tmp = src;
1482 AdjustBaseAndOffset(&tmp);
1483 sdc1(fs, tmp);
1484}
1485
1486void MacroAssembler::Ll(Register rd, const MemOperand& rs) {
1487 bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
1488 : is_int16(rs.offset());
1489 if (is_one_instruction) {
1490 ll(rd, rs);
1491 } else {
1492 UseScratchRegisterScope temps(this);
1493 Register scratch = temps.Acquire();
1494 li(scratch, rs.offset());
1495 daddu(scratch, scratch, rs.rm());
1496 ll(rd, MemOperand(scratch, 0));
1497 }
1498}
1499
1500void MacroAssembler::Lld(Register rd, const MemOperand& rs) {
1501 bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
1502 : is_int16(rs.offset());
1503 if (is_one_instruction) {
1504 lld(rd, rs);
1505 } else {
1506 UseScratchRegisterScope temps(this);
1507 Register scratch = temps.Acquire();
1508 li(scratch, rs.offset());
1509 daddu(scratch, scratch, rs.rm());
1510 lld(rd, MemOperand(scratch, 0));
1511 }
1512}
1513
1514void MacroAssembler::Sc(Register rd, const MemOperand& rs) {
1515 bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
1516 : is_int16(rs.offset());
1517 if (is_one_instruction) {
1518 sc(rd, rs);
1519 } else {
1520 UseScratchRegisterScope temps(this);
1521 Register scratch = temps.Acquire();
1522 li(scratch, rs.offset());
1523 daddu(scratch, scratch, rs.rm());
1524 sc(rd, MemOperand(scratch, 0));
1525 }
1526}
1527
1528void MacroAssembler::Scd(Register rd, const MemOperand& rs) {
1529 bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
1530 : is_int16(rs.offset());
1531 if (is_one_instruction) {
1532 scd(rd, rs);
1533 } else {
1534 UseScratchRegisterScope temps(this);
1535 Register scratch = temps.Acquire();
1536 li(scratch, rs.offset());
1537 daddu(scratch, scratch, rs.rm());
1538 scd(rd, MemOperand(scratch, 0));
1539 }
1540}
1541
1542void MacroAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
1543 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
1544 // non-isolate-independent code. In many cases it might be cheaper than
1545 // embedding the relocatable value.
1546 if (root_array_available_ && options().isolate_independent_code) {
1547 IndirectLoadConstant(dst, value);
1548 return;
1549 }
1550 li(dst, Operand(value), mode);
1551}
1552
1553void MacroAssembler::li(Register dst, ExternalReference reference,
1554 LiFlags mode) {
1555 if (root_array_available()) {
1556 if (reference.IsIsolateFieldId()) {
1557 Daddu(dst, kRootRegister, Operand(reference.offset_from_root_register()));
1558 return;
1559 }
1560 if (options().isolate_independent_code) {
1561 IndirectLoadExternalReference(dst, reference);
1562 return;
1563 }
1564 }
1565
1566 // External references should not get created with IDs if
1567 // `!root_array_available()`.
1568 CHECK(!reference.IsIsolateFieldId());
1569 li(dst, Operand(reference), mode);
1570}
1571
1572static inline int InstrCountForLiLower32Bit(int64_t value) {
1573 if (!is_int16(static_cast<int32_t>(value)) && (value & kUpper16MaskOf64) &&
1574 (value & kImm16Mask)) {
1575 return 2;
1576 } else {
1577 return 1;
1578 }
1579}
1580
1581void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
1582 if (is_int16(static_cast<int32_t>(j.immediate()))) {
1583 daddiu(rd, zero_reg, (j.immediate() & kImm16Mask));
1584 } else if (!(j.immediate() & kUpper16MaskOf64)) {
1585 ori(rd, zero_reg, j.immediate() & kImm16Mask);
1586 } else {
1587 lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
1588 if (j.immediate() & kImm16Mask) {
1589 ori(rd, rd, j.immediate() & kImm16Mask);
1590 }
1591 }
1592}
1593
1594static inline int InstrCountForLoadReplicatedConst32(int64_t value) {
1595 uint32_t x = static_cast<uint32_t>(value);
1596 uint32_t y = static_cast<uint32_t>(value >> 32);
1597
1598 if (x == y) {
1599 return (is_uint16(x) || is_int16(x) || (x & kImm16Mask) == 0) ? 2 : 3;
1600 }
1601
1602 return INT_MAX;
1603}
1604
1605int MacroAssembler::InstrCountForLi64Bit(int64_t value) {
1606 if (is_int32(value)) {
1607 return InstrCountForLiLower32Bit(value);
1608 } else {
1609 int bit31 = value >> 31 & 0x1;
1610 if ((value & kUpper16MaskOf64) == 0 && is_int16(value >> 32) &&
1612 return 2;
1613 } else if ((value & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
1615 return 2;
1616 } else if ((value & kImm16Mask) == 0 && is_int16((value >> 32) + bit31) &&
1618 return 2;
1619 } else if ((value & kImm16Mask) == 0 &&
1620 ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF) &&
1622 return 2;
1623 } else if (is_int16(static_cast<int32_t>(value)) &&
1624 is_int16((value >> 32) + bit31) && kArchVariant == kMips64r6) {
1625 return 2;
1626 } else if (is_int16(static_cast<int32_t>(value)) &&
1627 ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF) &&
1629 return 2;
1630 } else if (base::bits::IsPowerOfTwo(value + 1) ||
1631 value == std::numeric_limits<int64_t>::max()) {
1632 return 2;
1633 } else {
1634 int shift_cnt = base::bits::CountTrailingZeros64(value);
1635 int rep32_count = InstrCountForLoadReplicatedConst32(value);
1636 int64_t tmp = value >> shift_cnt;
1637 if (is_uint16(tmp)) {
1638 return 2;
1639 } else if (is_int16(tmp)) {
1640 return 2;
1641 } else if (rep32_count < 3) {
1642 return 2;
1643 } else if (is_int32(tmp)) {
1644 return 3;
1645 } else {
1646 shift_cnt = 16 + base::bits::CountTrailingZeros64(value >> 16);
1647 tmp = value >> shift_cnt;
1648 if (is_uint16(tmp)) {
1649 return 3;
1650 } else if (is_int16(tmp)) {
1651 return 3;
1652 } else if (rep32_count < 4) {
1653 return 3;
1654 } else if (kArchVariant == kMips64r6) {
1655 int64_t imm = value;
1656 int count = InstrCountForLiLower32Bit(imm);
1657 imm = (imm >> 32) + bit31;
1658 if (imm & kImm16Mask) {
1659 count++;
1660 }
1661 imm = (imm >> 16) + (imm >> 15 & 0x1);
1662 if (imm & kImm16Mask) {
1663 count++;
1664 }
1665 return count;
1666 } else {
1667 if (is_int48(value)) {
1668 int64_t k = value >> 16;
1669 int count = InstrCountForLiLower32Bit(k) + 1;
1670 if (value & kImm16Mask) {
1671 count++;
1672 }
1673 return count;
1674 } else {
1675 int64_t k = value >> 32;
1676 int count = InstrCountForLiLower32Bit(k);
1677 if ((value >> 16) & kImm16Mask) {
1678 count += 3;
1679 if (value & kImm16Mask) {
1680 count++;
1681 }
1682 } else {
1683 count++;
1684 if (value & kImm16Mask) {
1685 count++;
1686 }
1687 }
1688 return count;
1689 }
1690 }
1691 }
1692 }
1693 }
1694 UNREACHABLE();
1695 return INT_MAX;
1696}
1697
1698// All changes to if...else conditions here must be added to
1699// InstrCountForLi64Bit as well.
1700void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
1701 DCHECK(!j.is_reg());
1702 DCHECK(!MustUseReg(j.rmode()));
1703 DCHECK(mode == OPTIMIZE_SIZE);
1704 BlockTrampolinePoolScope block_trampoline_pool(this);
1705 // Normal load of an immediate value which does not need Relocation Info.
1706 if (is_int32(j.immediate())) {
1707 LiLower32BitHelper(rd, j);
1708 } else {
1709 int bit31 = j.immediate() >> 31 & 0x1;
1710 if ((j.immediate() & kUpper16MaskOf64) == 0 &&
1711 is_int16(j.immediate() >> 32) && kArchVariant == kMips64r6) {
1712 // 64-bit value which consists of an unsigned 16-bit value in its
1713 // least significant 32-bits, and a signed 16-bit value in its
1714 // most significant 32-bits.
1715 ori(rd, zero_reg, j.immediate() & kImm16Mask);
1716 dahi(rd, j.immediate() >> 32 & kImm16Mask);
1717 } else if ((j.immediate() & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
1719 // 64-bit value which consists of an unsigned 16-bit value in its
1720 // least significant 48-bits, and a signed 16-bit value in its
1721 // most significant 16-bits.
1722 ori(rd, zero_reg, j.immediate() & kImm16Mask);
1723 dati(rd, j.immediate() >> 48 & kImm16Mask);
1724 } else if ((j.immediate() & kImm16Mask) == 0 &&
1725 is_int16((j.immediate() >> 32) + bit31) &&
1727 // 16 LSBs (Least Significant Bits) all set to zero.
1728 // 48 MSBs (Most Significant Bits) hold a signed 32-bit value.
1729 lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
1730 dahi(rd, ((j.immediate() >> 32) + bit31) & kImm16Mask);
1731 } else if ((j.immediate() & kImm16Mask) == 0 &&
1732 ((j.immediate() >> 31) & 0x1FFFF) ==
1733 ((0x20000 - bit31) & 0x1FFFF) &&
1735 // 16 LSBs all set to zero.
1736 // 48 MSBs hold a signed value which can't be represented by signed
1737 // 32-bit number, and the middle 16 bits are all zero, or all one.
1738 lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
1739 dati(rd, ((j.immediate() >> 48) + bit31) & kImm16Mask);
1740 } else if (is_int16(static_cast<int32_t>(j.immediate())) &&
1741 is_int16((j.immediate() >> 32) + bit31) &&
1743 // 32 LSBs contain a signed 16-bit number.
1744 // 32 MSBs contain a signed 16-bit number.
1745 daddiu(rd, zero_reg, j.immediate() & kImm16Mask);
1746 dahi(rd, ((j.immediate() >> 32) + bit31) & kImm16Mask);
1747 } else if (is_int16(static_cast<int32_t>(j.immediate())) &&
1748 ((j.immediate() >> 31) & 0x1FFFF) ==
1749 ((0x20000 - bit31) & 0x1FFFF) &&
1751 // 48 LSBs contain an unsigned 16-bit number.
1752 // 16 MSBs contain a signed 16-bit number.
1753 daddiu(rd, zero_reg, j.immediate() & kImm16Mask);
1754 dati(rd, ((j.immediate() >> 48) + bit31) & kImm16Mask);
1755 } else if (base::bits::IsPowerOfTwo(j.immediate() + 1) ||
1756 j.immediate() == std::numeric_limits<int64_t>::max()) {
1757 // 64-bit values which have their "n" LSBs set to one, and their
1758 // "64-n" MSBs set to zero. "n" must meet the restrictions 0 < n < 64.
1759 int shift_cnt = 64 - base::bits::CountTrailingZeros64(j.immediate() + 1);
1760 daddiu(rd, zero_reg, -1);
1761 if (shift_cnt < 32) {
1762 dsrl(rd, rd, shift_cnt);
1763 } else {
1764 dsrl32(rd, rd, shift_cnt & 31);
1765 }
1766 } else {
1767 int shift_cnt = base::bits::CountTrailingZeros64(j.immediate());
1768 int rep32_count = InstrCountForLoadReplicatedConst32(j.immediate());
1769 int64_t tmp = j.immediate() >> shift_cnt;
1770 if (is_uint16(tmp)) {
1771 // Value can be computed by loading a 16-bit unsigned value, and
1772 // then shifting left.
1773 ori(rd, zero_reg, tmp & kImm16Mask);
1774 if (shift_cnt < 32) {
1775 dsll(rd, rd, shift_cnt);
1776 } else {
1777 dsll32(rd, rd, shift_cnt & 31);
1778 }
1779 } else if (is_int16(tmp)) {
1780 // Value can be computed by loading a 16-bit signed value, and
1781 // then shifting left.
1782 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1783 if (shift_cnt < 32) {
1784 dsll(rd, rd, shift_cnt);
1785 } else {
1786 dsll32(rd, rd, shift_cnt & 31);
1787 }
1788 } else if (rep32_count < 3) {
1789 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
1790 // value loaded into the 32 LSBs can be loaded with a single
1791 // MIPS instruction.
1792 LiLower32BitHelper(rd, j);
1793 Dins(rd, rd, 32, 32);
1794 } else if (is_int32(tmp)) {
1795 // Loads with 3 instructions.
1796 // Value can be computed by loading a 32-bit signed value, and
1797 // then shifting left.
1798 lui(rd, tmp >> kLuiShift & kImm16Mask);
1799 ori(rd, rd, tmp & kImm16Mask);
1800 if (shift_cnt < 32) {
1801 dsll(rd, rd, shift_cnt);
1802 } else {
1803 dsll32(rd, rd, shift_cnt & 31);
1804 }
1805 } else {
1806 shift_cnt = 16 + base::bits::CountTrailingZeros64(j.immediate() >> 16);
1807 tmp = j.immediate() >> shift_cnt;
1808 if (is_uint16(tmp)) {
1809 // Value can be computed by loading a 16-bit unsigned value,
1810 // shifting left, and "or"ing in another 16-bit unsigned value.
1811 ori(rd, zero_reg, tmp & kImm16Mask);
1812 if (shift_cnt < 32) {
1813 dsll(rd, rd, shift_cnt);
1814 } else {
1815 dsll32(rd, rd, shift_cnt & 31);
1816 }
1817 ori(rd, rd, j.immediate() & kImm16Mask);
1818 } else if (is_int16(tmp)) {
1819 // Value can be computed by loading a 16-bit signed value,
1820 // shifting left, and "or"ing in a 16-bit unsigned value.
1821 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1822 if (shift_cnt < 32) {
1823 dsll(rd, rd, shift_cnt);
1824 } else {
1825 dsll32(rd, rd, shift_cnt & 31);
1826 }
1827 ori(rd, rd, j.immediate() & kImm16Mask);
1828 } else if (rep32_count < 4) {
1829 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
1830 // value in the 32 LSBs requires 2 MIPS instructions to load.
1831 LiLower32BitHelper(rd, j);
1832 Dins(rd, rd, 32, 32);
1833 } else if (kArchVariant == kMips64r6) {
1834 // Loads with 3-4 instructions.
1835 // Catch-all case to get any other 64-bit values which aren't
1836 // handled by special cases above.
1837 int64_t imm = j.immediate();
1838 LiLower32BitHelper(rd, j);
1839 imm = (imm >> 32) + bit31;
1840 if (imm & kImm16Mask) {
1841 dahi(rd, imm & kImm16Mask);
1842 }
1843 imm = (imm >> 16) + (imm >> 15 & 0x1);
1844 if (imm & kImm16Mask) {
1845 dati(rd, imm & kImm16Mask);
1846 }
1847 } else {
1848 if (is_int48(j.immediate())) {
1849 Operand k = Operand(j.immediate() >> 16);
1850 LiLower32BitHelper(rd, k);
1851 dsll(rd, rd, 16);
1852 if (j.immediate() & kImm16Mask) {
1853 ori(rd, rd, j.immediate() & kImm16Mask);
1854 }
1855 } else {
1856 Operand k = Operand(j.immediate() >> 32);
1857 LiLower32BitHelper(rd, k);
1858 if ((j.immediate() >> 16) & kImm16Mask) {
1859 dsll(rd, rd, 16);
1860 ori(rd, rd, (j.immediate() >> 16) & kImm16Mask);
1861 dsll(rd, rd, 16);
1862 if (j.immediate() & kImm16Mask) {
1863 ori(rd, rd, j.immediate() & kImm16Mask);
1864 }
1865 } else {
1866 dsll32(rd, rd, 0);
1867 if (j.immediate() & kImm16Mask) {
1868 ori(rd, rd, j.immediate() & kImm16Mask);
1869 }
1870 }
1871 }
1872 }
1873 }
1874 }
1875 }
1876}
1877
1878void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1879 DCHECK(!j.is_reg());
1880 BlockTrampolinePoolScope block_trampoline_pool(this);
1881 if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
1882 int li_count = InstrCountForLi64Bit(j.immediate());
1883 int li_neg_count = InstrCountForLi64Bit(-j.immediate());
1884 int li_not_count = InstrCountForLi64Bit(~j.immediate());
1885 // Loading -MIN_INT64 could cause problems, but loading MIN_INT64 takes only
1886 // two instructions so no need to check for this.
1887 if (li_neg_count <= li_not_count && li_neg_count < li_count - 1) {
1888 DCHECK(j.immediate() != std::numeric_limits<int64_t>::min());
1889 li_optimized(rd, Operand(-j.immediate()), mode);
1890 Dsubu(rd, zero_reg, rd);
1891 } else if (li_neg_count > li_not_count && li_not_count < li_count - 1) {
1892 DCHECK(j.immediate() != std::numeric_limits<int64_t>::min());
1893 li_optimized(rd, Operand(~j.immediate()), mode);
1894 nor(rd, rd, rd);
1895 } else {
1896 li_optimized(rd, j, mode);
1897 }
1898 } else if (MustUseReg(j.rmode())) {
1899 int64_t immediate;
1900 if (j.IsHeapNumberRequest()) {
1901 RequestHeapNumber(j.heap_number_request());
1902 immediate = 0;
1903 } else {
1904 immediate = j.immediate();
1905 }
1906
1907 RecordRelocInfo(j.rmode(), immediate);
1908 if (RelocInfo::IsWasmCanonicalSigId(j.rmode()) ||
1910 // wasm_canonical_sig_id and wasm_code_pointer_table_entry are 32-bit
1911 // values.
1912 DCHECK(is_int32(immediate));
1913 lui(rd, (immediate >> 16) & kImm16Mask);
1914 ori(rd, rd, immediate & kImm16Mask);
1915 return;
1916 }
1917 lui(rd, (immediate >> 32) & kImm16Mask);
1918 ori(rd, rd, (immediate >> 16) & kImm16Mask);
1919 dsll(rd, rd, 16);
1920 ori(rd, rd, immediate & kImm16Mask);
1921 } else if (mode == ADDRESS_LOAD) {
1922 // We always need the same number of instructions as we may need to patch
1923 // this code to load another value which may need all 4 instructions.
1924 lui(rd, (j.immediate() >> 32) & kImm16Mask);
1925 ori(rd, rd, (j.immediate() >> 16) & kImm16Mask);
1926 dsll(rd, rd, 16);
1927 ori(rd, rd, j.immediate() & kImm16Mask);
1928 } else { // mode == CONSTANT_SIZE - always emit the same instruction
1929 // sequence.
1930 if (kArchVariant == kMips64r6) {
1931 int64_t imm = j.immediate();
1932 lui(rd, imm >> kLuiShift & kImm16Mask);
1933 ori(rd, rd, (imm & kImm16Mask));
1934 imm = (imm >> 32) + ((imm >> 31) & 0x1);
1935 dahi(rd, imm & kImm16Mask & kImm16Mask);
1936 imm = (imm >> 16) + ((imm >> 15) & 0x1);
1937 dati(rd, imm & kImm16Mask & kImm16Mask);
1938 } else {
1939 lui(rd, (j.immediate() >> 48) & kImm16Mask);
1940 ori(rd, rd, (j.immediate() >> 32) & kImm16Mask);
1941 dsll(rd, rd, 16);
1942 ori(rd, rd, (j.immediate() >> 16) & kImm16Mask);
1943 dsll(rd, rd, 16);
1944 ori(rd, rd, j.immediate() & kImm16Mask);
1945 }
1946 }
1947}
1948
1949void MacroAssembler::LoadIsolateField(Register dst, IsolateFieldId id) {
1950 li(dst, ExternalReference::Create(id));
1951}
1952
1954 int16_t num_to_push = regs.Count();
1955 int16_t stack_offset = num_to_push * kPointerSize;
1956
1957 Dsubu(sp, sp, Operand(stack_offset));
1958 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1959 if ((regs.bits() & (1 << i)) != 0) {
1960 stack_offset -= kPointerSize;
1961 Sd(ToRegister(i), MemOperand(sp, stack_offset));
1962 }
1963 }
1964}
1965
1967 int16_t stack_offset = 0;
1968
1969 for (int16_t i = 0; i < kNumRegisters; i++) {
1970 if ((regs.bits() & (1 << i)) != 0) {
1971 Ld(ToRegister(i), MemOperand(sp, stack_offset));
1972 stack_offset += kPointerSize;
1973 }
1974 }
1975 daddiu(sp, sp, stack_offset);
1976}
1977
1979 int16_t num_to_push = regs.Count();
1980 int16_t stack_offset = num_to_push * kDoubleSize;
1981
1982 Dsubu(sp, sp, Operand(stack_offset));
1983 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1984 if ((regs.bits() & (1 << i)) != 0) {
1985 stack_offset -= kDoubleSize;
1986 Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1987 }
1988 }
1989}
1990
1992 int16_t stack_offset = 0;
1993
1994 for (int16_t i = 0; i < kNumRegisters; i++) {
1995 if ((regs.bits() & (1 << i)) != 0) {
1996 Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1997 stack_offset += kDoubleSize;
1998 }
1999 }
2000 daddiu(sp, sp, stack_offset);
2001}
2002
2004 int16_t num_to_push = regs.Count();
2005 int16_t stack_offset = num_to_push * kSimd128Size;
2006
2007 Dsubu(sp, sp, Operand(stack_offset));
2008 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
2009 if ((regs.bits() & (1 << i)) != 0) {
2010 stack_offset -= kSimd128Size;
2011 st_d(MSARegister::from_code(i), MemOperand(sp, stack_offset));
2012 }
2013 }
2014}
2015
2017 int16_t stack_offset = 0;
2018
2019 for (int16_t i = 0; i < kNumRegisters; i++) {
2020 if ((regs.bits() & (1 << i)) != 0) {
2021 ld_d(MSARegister::from_code(i), MemOperand(sp, stack_offset));
2022 stack_offset += kSimd128Size;
2023 }
2024 }
2025 daddiu(sp, sp, stack_offset);
2026}
2027
2028void MacroAssembler::Ext(Register rt, Register rs, uint16_t pos,
2029 uint16_t size) {
2030 DCHECK_LT(pos, 32);
2031 DCHECK_LT(pos + size, 33);
2032 ext_(rt, rs, pos, size);
2033}
2034
2035void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
2036 uint16_t size) {
2037 DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
2038 pos + size <= 64);
2039 if (size > 32) {
2040 dextm_(rt, rs, pos, size);
2041 } else if (pos >= 32) {
2042 dextu_(rt, rs, pos, size);
2043 } else {
2044 dext_(rt, rs, pos, size);
2045 }
2046}
2047
2048void MacroAssembler::Ins(Register rt, Register rs, uint16_t pos,
2049 uint16_t size) {
2050 DCHECK_LT(pos, 32);
2051 DCHECK_LE(pos + size, 32);
2052 DCHECK_NE(size, 0);
2053 ins_(rt, rs, pos, size);
2054}
2055
2056void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
2057 uint16_t size) {
2058 DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
2059 pos + size <= 64);
2060 if (pos + size <= 32) {
2061 dins_(rt, rs, pos, size);
2062 } else if (pos < 32) {
2063 dinsm_(rt, rs, pos, size);
2064 } else {
2065 dinsu_(rt, rs, pos, size);
2066 }
2067}
2068
2069void MacroAssembler::ExtractBits(Register dest, Register source, Register pos,
2070 int size, bool sign_extend) {
2071 dsrav(dest, source, pos);
2072 Dext(dest, dest, 0, size);
2073 if (sign_extend) {
2074 switch (size) {
2075 case 8:
2076 seb(dest, dest);
2077 break;
2078 case 16:
2079 seh(dest, dest);
2080 break;
2081 case 32:
2082 // sign-extend word
2083 sll(dest, dest, 0);
2084 break;
2085 default:
2086 UNREACHABLE();
2087 }
2088 }
2089}
2090
2091void MacroAssembler::InsertBits(Register dest, Register source, Register pos,
2092 int size) {
2093 Dror(dest, dest, pos);
2094 Dins(dest, source, 0, size);
2095 {
2096 UseScratchRegisterScope temps(this);
2097 Register scratch = temps.Acquire();
2098 Dsubu(scratch, zero_reg, pos);
2099 Dror(dest, dest, scratch);
2100 }
2101}
2102
2103void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
2104 if (kArchVariant == kMips64r6) {
2105 // r6 neg_s changes the sign for NaN-like operands as well.
2106 neg_s(fd, fs);
2107 } else {
2109 BlockTrampolinePoolScope block_trampoline_pool(this);
2110 Label is_nan, done;
2111 Register scratch1 = t8;
2112 Register scratch2 = t9;
2113 CompareIsNanF32(fs, fs);
2114 BranchTrueShortF(&is_nan);
2115 Branch(USE_DELAY_SLOT, &done);
2116 // For NaN input, neg_s will return the same NaN value,
2117 // while the sign has to be changed separately.
2118 neg_s(fd, fs); // In delay slot.
2119 bind(&is_nan);
2120 mfc1(scratch1, fs);
2121 li(scratch2, kBinary32SignMask);
2122 Xor(scratch1, scratch1, scratch2);
2123 mtc1(scratch1, fd);
2124 bind(&done);
2125 }
2126}
2127
2128void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
2129 if (kArchVariant == kMips64r6) {
2130 // r6 neg_d changes the sign for NaN-like operands as well.
2131 neg_d(fd, fs);
2132 } else {
2134 BlockTrampolinePoolScope block_trampoline_pool(this);
2135 Label is_nan, done;
2136 Register scratch1 = t8;
2137 Register scratch2 = t9;
2138 CompareIsNanF64(fs, fs);
2139 BranchTrueShortF(&is_nan);
2140 Branch(USE_DELAY_SLOT, &done);
2141 // For NaN input, neg_d will return the same NaN value,
2142 // while the sign has to be changed separately.
2143 neg_d(fd, fs); // In delay slot.
2144 bind(&is_nan);
2145 dmfc1(scratch1, fs);
2146 li(scratch2, base::Double::kSignMask);
2147 Xor(scratch1, scratch1, scratch2);
2148 dmtc1(scratch1, fd);
2149 bind(&done);
2150 }
2151}
2152
2153void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
2154 // Move the data from fs to t8.
2155 BlockTrampolinePoolScope block_trampoline_pool(this);
2156 mfc1(t8, fs);
2157 Cvt_d_uw(fd, t8);
2158}
2159
2160void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
2161 BlockTrampolinePoolScope block_trampoline_pool(this);
2162
2163 // Convert rs to a FP value in fd.
2164 DCHECK(rs != t9);
2165 DCHECK(rs != at);
2166
2167 // Zero extend int32 in rs.
2168 Dext(t9, rs, 0, 32);
2169 dmtc1(t9, fd);
2170 cvt_d_l(fd, fd);
2171}
2172
2173void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
2174 BlockTrampolinePoolScope block_trampoline_pool(this);
2175 // Move the data from fs to t8.
2176 dmfc1(t8, fs);
2177 Cvt_d_ul(fd, t8);
2178}
2179
2180void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
2181 BlockTrampolinePoolScope block_trampoline_pool(this);
2182 // Convert rs to a FP value in fd.
2183
2184 DCHECK(rs != t9);
2185 DCHECK(rs != at);
2186
2187 Label msb_clear, conversion_done;
2188
2189 Branch(&msb_clear, ge, rs, Operand(zero_reg));
2190
2191 // Rs >= 2^63
2192 andi(t9, rs, 1);
2193 dsrl(rs, rs, 1);
2194 or_(t9, t9, rs);
2195 dmtc1(t9, fd);
2196 cvt_d_l(fd, fd);
2197 Branch(USE_DELAY_SLOT, &conversion_done);
2198 add_d(fd, fd, fd); // In delay slot.
2199
2200 bind(&msb_clear);
2201 // Rs < 2^63, we can do simple conversion.
2202 dmtc1(rs, fd);
2203 cvt_d_l(fd, fd);
2204
2205 bind(&conversion_done);
2206}
2207
2208void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
2209 BlockTrampolinePoolScope block_trampoline_pool(this);
2210 // Move the data from fs to t8.
2211 mfc1(t8, fs);
2212 Cvt_s_uw(fd, t8);
2213}
2214
2215void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
2216 BlockTrampolinePoolScope block_trampoline_pool(this);
2217 // Convert rs to a FP value in fd.
2218 DCHECK(rs != t9);
2219 DCHECK(rs != at);
2220
2221 // Zero extend int32 in rs.
2222 Dext(t9, rs, 0, 32);
2223 dmtc1(t9, fd);
2224 cvt_s_l(fd, fd);
2225}
2226
2227void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
2228 BlockTrampolinePoolScope block_trampoline_pool(this);
2229 // Move the data from fs to t8.
2230 dmfc1(t8, fs);
2231 Cvt_s_ul(fd, t8);
2232}
2233
2234void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
2235 BlockTrampolinePoolScope block_trampoline_pool(this);
2236 // Convert rs to a FP value in fd.
2237
2238 DCHECK(rs != t9);
2239 DCHECK(rs != at);
2240
2241 Label positive, conversion_done;
2242
2243 Branch(&positive, ge, rs, Operand(zero_reg));
2244
2245 // Rs >= 2^31.
2246 andi(t9, rs, 1);
2247 dsrl(rs, rs, 1);
2248 or_(t9, t9, rs);
2249 dmtc1(t9, fd);
2250 cvt_s_l(fd, fd);
2251 Branch(USE_DELAY_SLOT, &conversion_done);
2252 add_s(fd, fd, fd); // In delay slot.
2253
2254 bind(&positive);
2255 // Rs < 2^31, we can do simple conversion.
2256 dmtc1(rs, fd);
2257 cvt_s_l(fd, fd);
2258
2259 bind(&conversion_done);
2260}
2261
2262void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
2263 round_l_d(fd, fs);
2264}
2265
2266void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
2267 floor_l_d(fd, fs);
2268}
2269
2270void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
2271 ceil_l_d(fd, fs);
2272}
2273
2274void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
2275 trunc_l_d(fd, fs);
2276}
2277
2278void MacroAssembler::Trunc_l_ud(FPURegister fd, FPURegister fs,
2279 FPURegister scratch) {
2280 BlockTrampolinePoolScope block_trampoline_pool(this);
2281 // Load to GPR.
2282 dmfc1(t8, fs);
2283 // Reset sign bit.
2284 {
2285 UseScratchRegisterScope temps(this);
2286 Register scratch1 = temps.Acquire();
2287 li(scratch1, 0x7FFFFFFFFFFFFFFF);
2288 and_(t8, t8, scratch1);
2289 }
2290 dmtc1(t8, fs);
2291 trunc_l_d(fd, fs);
2292}
2293
2294void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs,
2295 FPURegister scratch) {
2296 BlockTrampolinePoolScope block_trampoline_pool(this);
2297 Trunc_uw_d(t8, fs, scratch);
2298 mtc1(t8, fd);
2299}
2300
2301void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
2302 FPURegister scratch) {
2303 BlockTrampolinePoolScope block_trampoline_pool(this);
2304 Trunc_uw_s(t8, fs, scratch);
2305 mtc1(t8, fd);
2306}
2307
2308void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
2309 FPURegister scratch, Register result) {
2310 BlockTrampolinePoolScope block_trampoline_pool(this);
2311 Trunc_ul_d(t8, fs, scratch, result);
2312 dmtc1(t8, fd);
2313}
2314
2315void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
2316 FPURegister scratch, Register result) {
2317 BlockTrampolinePoolScope block_trampoline_pool(this);
2318 Trunc_ul_s(t8, fs, scratch, result);
2319 dmtc1(t8, fd);
2320}
2321
2322void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
2323 trunc_w_d(fd, fs);
2324}
2325
2326void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
2327 round_w_d(fd, fs);
2328}
2329
2330void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
2331 floor_w_d(fd, fs);
2332}
2333
2334void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
2335 ceil_w_d(fd, fs);
2336}
2337
2338void MacroAssembler::Trunc_uw_d(Register rd, FPURegister fs,
2339 FPURegister scratch) {
2340 DCHECK(fs != scratch);
2341 DCHECK(rd != at);
2342
2343 {
2344 // Load 2^32 into scratch as its float representation.
2345 UseScratchRegisterScope temps(this);
2346 Register scratch1 = temps.Acquire();
2347 li(scratch1, 0x41F00000);
2348 mtc1(zero_reg, scratch);
2349 mthc1(scratch1, scratch);
2350 }
2351 // Test if scratch > fd.
2352 // If fd < 2^32 we can convert it normally.
2353 Label simple_convert;
2354 CompareF64(ULT, fs, scratch);
2355 BranchTrueShortF(&simple_convert);
2356
2357 // If fd > 2^32, the result should be UINT_32_MAX;
2358 Addu(rd, zero_reg, -1);
2359
2360 Label done;
2361 Branch(&done);
2362 // Simple conversion.
2363 bind(&simple_convert);
2364 // Double -> Int64 -> Uint32;
2365 trunc_l_d(scratch, fs);
2366 mfc1(rd, scratch);
2367
2368 bind(&done);
2369}
2370
2371void MacroAssembler::Trunc_uw_s(Register rd, FPURegister fs,
2372 FPURegister scratch) {
2373 DCHECK(fs != scratch);
2374 DCHECK(rd != at);
2375
2376 {
2377 // Load 2^32 into scratch as its float representation.
2378 UseScratchRegisterScope temps(this);
2379 Register scratch1 = temps.Acquire();
2380 li(scratch1, 0x4F800000);
2381 mtc1(scratch1, scratch);
2382 }
2383 // Test if scratch > fs.
2384 // If fs < 2^32 we can convert it normally.
2385 Label simple_convert;
2386 CompareF32(ULT, fs, scratch);
2387 BranchTrueShortF(&simple_convert);
2388
2389 // If fd > 2^32, the result should be UINT_32_MAX;
2390 Addu(rd, zero_reg, -1);
2391
2392 Label done;
2393 Branch(&done);
2394 // Simple conversion.
2395 bind(&simple_convert);
2396 // Float -> Int64 -> Uint32;
2397 trunc_l_s(scratch, fs);
2398 mfc1(rd, scratch);
2399
2400 bind(&done);
2401}
2402
2403void MacroAssembler::Trunc_ul_d(Register rd, FPURegister fs,
2404 FPURegister scratch, Register result) {
2405 DCHECK(fs != scratch);
2406 DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at));
2407
2408 Label simple_convert, done, fail;
2409 if (result.is_valid()) {
2410 mov(result, zero_reg);
2411 Move(scratch, -1.0);
2412 // If fd =< -1 or unordered, then the conversion fails.
2413 CompareF64(ULE, fs, scratch);
2414 BranchTrueShortF(&fail);
2415 }
2416
2417 // Load 2^63 into scratch as its double representation.
2418 li(at, 0x43E0000000000000);
2419 dmtc1(at, scratch);
2420
2421 // Test if scratch > fs.
2422 // If fs < 2^63 or unordered, we can convert it normally.
2423 CompareF64(ULT, fs, scratch);
2424 BranchTrueShortF(&simple_convert);
2425
2426 // First we subtract 2^63 from fs, then trunc it to rd
2427 // and add 2^63 to rd.
2428 sub_d(scratch, fs, scratch);
2429 trunc_l_d(scratch, scratch);
2430 dmfc1(rd, scratch);
2431 Or(rd, rd, Operand(1UL << 63));
2432 Branch(&done);
2433
2434 // Simple conversion.
2435 bind(&simple_convert);
2436 trunc_l_d(scratch, fs);
2437 dmfc1(rd, scratch);
2438
2439 bind(&done);
2440 if (result.is_valid()) {
2441 // Conversion is failed if the result is negative.
2442 {
2443 UseScratchRegisterScope temps(this);
2444 Register scratch1 = temps.Acquire();
2445 addiu(scratch1, zero_reg, -1);
2446 dsrl(scratch1, scratch1, 1); // Load 2^62.
2447 dmfc1(result, scratch);
2448 xor_(result, result, scratch1);
2449 }
2450 Slt(result, zero_reg, result);
2451 }
2452
2453 bind(&fail);
2454}
2455
2456void MacroAssembler::Trunc_ul_s(Register rd, FPURegister fs,
2457 FPURegister scratch, Register result) {
2458 DCHECK(fs != scratch);
2459 DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at));
2460
2461 Label simple_convert, done, fail;
2462 if (result.is_valid()) {
2463 mov(result, zero_reg);
2464 Move(scratch, -1.0f);
2465 // If fd =< -1 or unordered, then the conversion fails.
2466 CompareF32(ULE, fs, scratch);
2467 BranchTrueShortF(&fail);
2468 }
2469
2470 {
2471 // Load 2^63 into scratch as its float representation.
2472 UseScratchRegisterScope temps(this);
2473 Register scratch1 = temps.Acquire();
2474 li(scratch1, 0x5F000000);
2475 mtc1(scratch1, scratch);
2476 }
2477
2478 // Test if scratch > fs.
2479 // If fs < 2^63 or unordered, we can convert it normally.
2480 CompareF32(ULT, fs, scratch);
2481 BranchTrueShortF(&simple_convert);
2482
2483 // First we subtract 2^63 from fs, then trunc it to rd
2484 // and add 2^63 to rd.
2485 sub_s(scratch, fs, scratch);
2486 trunc_l_s(scratch, scratch);
2487 dmfc1(rd, scratch);
2488 Or(rd, rd, Operand(1UL << 63));
2489 Branch(&done);
2490
2491 // Simple conversion.
2492 bind(&simple_convert);
2493 trunc_l_s(scratch, fs);
2494 dmfc1(rd, scratch);
2495
2496 bind(&done);
2497 if (result.is_valid()) {
2498 // Conversion is failed if the result is negative or unordered.
2499 {
2500 UseScratchRegisterScope temps(this);
2501 Register scratch1 = temps.Acquire();
2502 addiu(scratch1, zero_reg, -1);
2503 dsrl(scratch1, scratch1, 1); // Load 2^62.
2504 dmfc1(result, scratch);
2505 xor_(result, result, scratch1);
2506 }
2507 Slt(result, zero_reg, result);
2508 }
2509
2510 bind(&fail);
2511}
2512
2513template <typename RoundFunc>
2514void MacroAssembler::RoundDouble(FPURegister dst, FPURegister src,
2515 FPURoundingMode mode, RoundFunc round) {
2516 BlockTrampolinePoolScope block_trampoline_pool(this);
2517 Register scratch = t8;
2518 if (kArchVariant == kMips64r6) {
2519 cfc1(scratch, FCSR);
2520 li(at, Operand(mode));
2521 ctc1(at, FCSR);
2522 rint_d(dst, src);
2523 ctc1(scratch, FCSR);
2524 } else {
2525 Label done;
2526 if (!IsDoubleZeroRegSet()) {
2527 Move(kDoubleRegZero, 0.0);
2528 }
2529 mfhc1(scratch, src);
2531 Branch(USE_DELAY_SLOT, &done, hs, at,
2533 mov_d(dst, src);
2534
2535 round(this, dst, src);
2536 dmfc1(at, dst);
2537 Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
2538 cvt_d_l(dst, dst);
2539 srl(at, scratch, 31);
2540 sll(at, at, 31);
2541 mthc1(at, dst);
2542 bind(&done);
2543 }
2544}
2545
2546void MacroAssembler::Floor_d_d(FPURegister dst, FPURegister src) {
2547 RoundDouble(dst, src, mode_floor,
2548 [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
2549 masm->floor_l_d(dst, src);
2550 });
2551}
2552
2553void MacroAssembler::Ceil_d_d(FPURegister dst, FPURegister src) {
2554 RoundDouble(dst, src, mode_ceil,
2555 [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
2556 masm->ceil_l_d(dst, src);
2557 });
2558}
2559
2560void MacroAssembler::Trunc_d_d(FPURegister dst, FPURegister src) {
2561 RoundDouble(dst, src, mode_trunc,
2562 [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
2563 masm->trunc_l_d(dst, src);
2564 });
2565}
2566
2567void MacroAssembler::Round_d_d(FPURegister dst, FPURegister src) {
2568 RoundDouble(dst, src, mode_round,
2569 [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
2570 masm->round_l_d(dst, src);
2571 });
2572}
2573
2574template <typename RoundFunc>
2575void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src,
2576 FPURoundingMode mode, RoundFunc round) {
2577 BlockTrampolinePoolScope block_trampoline_pool(this);
2578 Register scratch = t8;
2579 if (kArchVariant == kMips64r6) {
2580 cfc1(scratch, FCSR);
2581 li(at, Operand(mode));
2582 ctc1(at, FCSR);
2583 rint_s(dst, src);
2584 ctc1(scratch, FCSR);
2585 } else {
2589 Label done;
2590 if (!IsDoubleZeroRegSet()) {
2591 Move(kDoubleRegZero, 0.0);
2592 }
2593 mfc1(scratch, src);
2595 Branch(USE_DELAY_SLOT, &done, hs, at,
2597 mov_s(dst, src);
2598
2599 round(this, dst, src);
2600 mfc1(at, dst);
2601 Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
2602 cvt_s_w(dst, dst);
2603 srl(at, scratch, 31);
2604 sll(at, at, 31);
2605 mtc1(at, dst);
2606 bind(&done);
2607 }
2608}
2609
2610void MacroAssembler::Floor_s_s(FPURegister dst, FPURegister src) {
2611 RoundFloat(dst, src, mode_floor,
2612 [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
2613 masm->floor_w_s(dst, src);
2614 });
2615}
2616
2617void MacroAssembler::Ceil_s_s(FPURegister dst, FPURegister src) {
2618 RoundFloat(dst, src, mode_ceil,
2619 [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
2620 masm->ceil_w_s(dst, src);
2621 });
2622}
2623
2624void MacroAssembler::Trunc_s_s(FPURegister dst, FPURegister src) {
2625 RoundFloat(dst, src, mode_trunc,
2626 [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
2627 masm->trunc_w_s(dst, src);
2628 });
2629}
2630
2631void MacroAssembler::Round_s_s(FPURegister dst, FPURegister src) {
2632 RoundFloat(dst, src, mode_round,
2633 [](MacroAssembler* masm, FPURegister dst, FPURegister src) {
2634 masm->round_w_s(dst, src);
2635 });
2636}
2637
2638void MacroAssembler::LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx,
2639 MemOperand src) {
2640 UseScratchRegisterScope temps(this);
2641 Register scratch = temps.Acquire();
2642 switch (sz) {
2643 case MSA_B:
2644 Lbu(scratch, src);
2645 insert_b(dst, laneidx, scratch);
2646 break;
2647 case MSA_H:
2648 Lhu(scratch, src);
2649 insert_h(dst, laneidx, scratch);
2650 break;
2651 case MSA_W:
2652 Lwu(scratch, src);
2653 insert_w(dst, laneidx, scratch);
2654 break;
2655 case MSA_D:
2656 Ld(scratch, src);
2657 insert_d(dst, laneidx, scratch);
2658 break;
2659 default:
2660 UNREACHABLE();
2661 }
2662}
2663
2664void MacroAssembler::StoreLane(MSASize sz, MSARegister src, uint8_t laneidx,
2665 MemOperand dst) {
2666 UseScratchRegisterScope temps(this);
2667 Register scratch = temps.Acquire();
2668 switch (sz) {
2669 case MSA_B:
2670 copy_u_b(scratch, src, laneidx);
2671 Sb(scratch, dst);
2672 break;
2673 case MSA_H:
2674 copy_u_h(scratch, src, laneidx);
2675 Sh(scratch, dst);
2676 break;
2677 case MSA_W:
2678 if (laneidx == 0) {
2679 FPURegister src_reg = FPURegister::from_code(src.code());
2680 Swc1(src_reg, dst);
2681 } else {
2682 copy_u_w(scratch, src, laneidx);
2683 Sw(scratch, dst);
2684 }
2685 break;
2686 case MSA_D:
2687 if (laneidx == 0) {
2688 FPURegister src_reg = FPURegister::from_code(src.code());
2689 Sdc1(src_reg, dst);
2690 } else {
2691 copy_s_d(scratch, src, laneidx);
2692 Sd(scratch, dst);
2693 }
2694 break;
2695 default:
2696 UNREACHABLE();
2697 }
2698}
2699
2700#define EXT_MUL_BINOP(type, ilv_instr, dotp_instr) \
2701 case type: \
2702 xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); \
2703 ilv_instr(kSimd128ScratchReg, kSimd128RegZero, src1); \
2704 ilv_instr(kSimd128RegZero, kSimd128RegZero, src2); \
2705 dotp_instr(dst, kSimd128ScratchReg, kSimd128RegZero); \
2706 break;
2707
2708void MacroAssembler::ExtMulLow(MSADataType type, MSARegister dst,
2709 MSARegister src1, MSARegister src2) {
2710 switch (type) {
2711 EXT_MUL_BINOP(MSAS8, ilvr_b, dotp_s_h)
2712 EXT_MUL_BINOP(MSAS16, ilvr_h, dotp_s_w)
2713 EXT_MUL_BINOP(MSAS32, ilvr_w, dotp_s_d)
2714 EXT_MUL_BINOP(MSAU8, ilvr_b, dotp_u_h)
2715 EXT_MUL_BINOP(MSAU16, ilvr_h, dotp_u_w)
2716 EXT_MUL_BINOP(MSAU32, ilvr_w, dotp_u_d)
2717 default:
2718 UNREACHABLE();
2719 }
2720}
2721
2722void MacroAssembler::ExtMulHigh(MSADataType type, MSARegister dst,
2723 MSARegister src1, MSARegister src2) {
2724 switch (type) {
2725 EXT_MUL_BINOP(MSAS8, ilvl_b, dotp_s_h)
2726 EXT_MUL_BINOP(MSAS16, ilvl_h, dotp_s_w)
2727 EXT_MUL_BINOP(MSAS32, ilvl_w, dotp_s_d)
2728 EXT_MUL_BINOP(MSAU8, ilvl_b, dotp_u_h)
2729 EXT_MUL_BINOP(MSAU16, ilvl_h, dotp_u_w)
2730 EXT_MUL_BINOP(MSAU32, ilvl_w, dotp_u_d)
2731 default:
2732 UNREACHABLE();
2733 }
2734}
2735#undef EXT_MUL_BINOP
2736
2737void MacroAssembler::LoadSplat(MSASize sz, MSARegister dst, MemOperand src) {
2738 UseScratchRegisterScope temps(this);
2739 Register scratch = temps.Acquire();
2740 switch (sz) {
2741 case MSA_B:
2742 Lb(scratch, src);
2743 fill_b(dst, scratch);
2744 break;
2745 case MSA_H:
2746 Lh(scratch, src);
2747 fill_h(dst, scratch);
2748 break;
2749 case MSA_W:
2750 Lw(scratch, src);
2751 fill_w(dst, scratch);
2752 break;
2753 case MSA_D:
2754 Ld(scratch, src);
2755 fill_d(dst, scratch);
2756 break;
2757 default:
2758 UNREACHABLE();
2759 }
2760}
2761
2762void MacroAssembler::ExtAddPairwise(MSADataType type, MSARegister dst,
2763 MSARegister src) {
2764 switch (type) {
2765 case MSAS8:
2766 hadd_s_h(dst, src, src);
2767 break;
2768 case MSAU8:
2769 hadd_u_h(dst, src, src);
2770 break;
2771 case MSAS16:
2772 hadd_s_w(dst, src, src);
2773 break;
2774 case MSAU16:
2775 hadd_u_w(dst, src, src);
2776 break;
2777 default:
2778 UNREACHABLE();
2779 }
2780}
2781
2782void MacroAssembler::MSARoundW(MSARegister dst, MSARegister src,
2783 FPURoundingMode mode) {
2784 BlockTrampolinePoolScope block_trampoline_pool(this);
2785 Register scratch = t8;
2786 Register scratch2 = at;
2787 cfcmsa(scratch, MSACSR);
2788 if (mode == kRoundToNearest) {
2789 scratch2 = zero_reg;
2790 } else {
2791 li(scratch2, Operand(mode));
2792 }
2793 ctcmsa(MSACSR, scratch2);
2794 frint_w(dst, src);
2795 ctcmsa(MSACSR, scratch);
2796}
2797
2798void MacroAssembler::MSARoundD(MSARegister dst, MSARegister src,
2799 FPURoundingMode mode) {
2800 BlockTrampolinePoolScope block_trampoline_pool(this);
2801 Register scratch = t8;
2802 Register scratch2 = at;
2803 cfcmsa(scratch, MSACSR);
2804 if (mode == kRoundToNearest) {
2805 scratch2 = zero_reg;
2806 } else {
2807 li(scratch2, Operand(mode));
2808 }
2809 ctcmsa(MSACSR, scratch2);
2810 frint_d(dst, src);
2811 ctcmsa(MSACSR, scratch);
2812}
2813
2814void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2815 FPURegister ft, FPURegister scratch) {
2816 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2817 mul_s(scratch, fs, ft);
2818 add_s(fd, fr, scratch);
2819}
2820
2821void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2822 FPURegister ft, FPURegister scratch) {
2823 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2824 mul_d(scratch, fs, ft);
2825 add_d(fd, fr, scratch);
2826}
2827
2828void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2829 FPURegister ft, FPURegister scratch) {
2830 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2831 mul_s(scratch, fs, ft);
2832 sub_s(fd, scratch, fr);
2833}
2834
2835void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2836 FPURegister ft, FPURegister scratch) {
2837 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2838 mul_d(scratch, fs, ft);
2839 sub_d(fd, scratch, fr);
2840}
2841
2843 FPURegister cmp1, FPURegister cmp2) {
2844 if (kArchVariant == kMips64r6) {
2845 sizeField = sizeField == D ? L : W;
2846 DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
2847 cmp(cc, sizeField, kDoubleCompareReg, cmp1, cmp2);
2848 } else {
2849 c(cc, sizeField, cmp1, cmp2);
2850 }
2851}
2852
2853void MacroAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
2854 FPURegister cmp2) {
2855 CompareF(sizeField, UN, cmp1, cmp2);
2856}
2857
2858void MacroAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) {
2859 if (kArchVariant == kMips64r6) {
2860 bc1nez(target, kDoubleCompareReg);
2861 } else {
2862 bc1t(target);
2863 }
2864 if (bd == PROTECT) {
2865 nop();
2866 }
2867}
2868
2869void MacroAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) {
2870 if (kArchVariant == kMips64r6) {
2871 bc1eqz(target, kDoubleCompareReg);
2872 } else {
2873 bc1f(target);
2874 }
2875 if (bd == PROTECT) {
2876 nop();
2877 }
2878}
2879
2880void MacroAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) {
2881 bool long_branch =
2882 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2883 if (long_branch) {
2884 Label skip;
2885 BranchFalseShortF(&skip);
2886 BranchLong(target, bd);
2887 bind(&skip);
2888 } else {
2889 BranchTrueShortF(target, bd);
2890 }
2891}
2892
2893void MacroAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) {
2894 bool long_branch =
2895 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2896 if (long_branch) {
2897 Label skip;
2898 BranchTrueShortF(&skip);
2899 BranchLong(target, bd);
2900 bind(&skip);
2901 } else {
2902 BranchFalseShortF(target, bd);
2903 }
2904}
2905
2906void MacroAssembler::BranchMSA(Label* target, MSABranchDF df,
2907 MSABranchCondition cond, MSARegister wt,
2908 BranchDelaySlot bd) {
2909 {
2910 BlockTrampolinePoolScope block_trampoline_pool(this);
2911
2912 if (target) {
2913 bool long_branch =
2914 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2915 if (long_branch) {
2916 Label skip;
2918 BranchShortMSA(df, &skip, neg_cond, wt, bd);
2919 BranchLong(target, bd);
2920 bind(&skip);
2921 } else {
2922 BranchShortMSA(df, target, cond, wt, bd);
2923 }
2924 }
2925 }
2926}
2927
2928void MacroAssembler::BranchShortMSA(MSABranchDF df, Label* target,
2929 MSABranchCondition cond, MSARegister wt,
2930 BranchDelaySlot bd) {
2931 if (IsEnabled(MIPS_SIMD)) {
2932 BlockTrampolinePoolScope block_trampoline_pool(this);
2933 if (target) {
2934 switch (cond) {
2935 case all_not_zero:
2936 switch (df) {
2937 case MSA_BRANCH_D:
2938 bnz_d(wt, target);
2939 break;
2940 case MSA_BRANCH_W:
2941 bnz_w(wt, target);
2942 break;
2943 case MSA_BRANCH_H:
2944 bnz_h(wt, target);
2945 break;
2946 case MSA_BRANCH_B:
2947 default:
2948 bnz_b(wt, target);
2949 }
2950 break;
2951 case one_elem_not_zero:
2952 bnz_v(wt, target);
2953 break;
2954 case one_elem_zero:
2955 switch (df) {
2956 case MSA_BRANCH_D:
2957 bz_d(wt, target);
2958 break;
2959 case MSA_BRANCH_W:
2960 bz_w(wt, target);
2961 break;
2962 case MSA_BRANCH_H:
2963 bz_h(wt, target);
2964 break;
2965 case MSA_BRANCH_B:
2966 default:
2967 bz_b(wt, target);
2968 }
2969 break;
2970 case all_zero:
2971 bz_v(wt, target);
2972 break;
2973 default:
2974 UNREACHABLE();
2975 }
2976 }
2977 } else {
2978 UNREACHABLE();
2979 }
2980 if (bd == PROTECT) {
2981 nop();
2982 }
2983}
2984
2985void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
2986 UseScratchRegisterScope temps(this);
2987 Register scratch = temps.Acquire();
2988 DCHECK(src_low != scratch);
2989 mfhc1(scratch, dst);
2990 mtc1(src_low, dst);
2991 mthc1(scratch, dst);
2992}
2993
2994void MacroAssembler::Move(FPURegister dst, uint32_t src) {
2995 UseScratchRegisterScope temps(this);
2996 Register scratch = temps.Acquire();
2997 li(scratch, Operand(static_cast<int32_t>(src)));
2998 mtc1(scratch, dst);
2999}
3000
3001void MacroAssembler::Move(FPURegister dst, uint64_t src) {
3002 // Handle special values first.
3004 mov_d(dst, kDoubleRegZero);
3005 } else if (src == base::bit_cast<uint64_t>(-0.0) &&
3007 Neg_d(dst, kDoubleRegZero);
3008 } else {
3009 uint32_t lo = src & 0xFFFFFFFF;
3010 uint32_t hi = src >> 32;
3011 // Move the low part of the double into the lower of the corresponding FPU
3012 // register of FPU register pair.
3013 if (lo != 0) {
3014 UseScratchRegisterScope temps(this);
3015 Register scratch = temps.Acquire();
3016 li(scratch, Operand(lo));
3017 mtc1(scratch, dst);
3018 } else {
3019 mtc1(zero_reg, dst);
3020 }
3021 // Move the high part of the double into the higher of the corresponding FPU
3022 // register of FPU register pair.
3023 if (hi != 0) {
3024 UseScratchRegisterScope temps(this);
3025 Register scratch = temps.Acquire();
3026 li(scratch, Operand(hi));
3027 mthc1(scratch, dst);
3028 } else {
3029 mthc1(zero_reg, dst);
3030 }
3031 if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true;
3032 }
3033}
3034
3035void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
3036 if (kArchVariant == kMips64r6) {
3037 Label done;
3038 Branch(&done, ne, rt, Operand(zero_reg));
3039 mov(rd, rs);
3040 bind(&done);
3041 } else {
3042 movz(rd, rs, rt);
3043 }
3044}
3045
3046void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
3047 if (kArchVariant == kMips64r6) {
3048 Label done;
3049 Branch(&done, eq, rt, Operand(zero_reg));
3050 mov(rd, rs);
3051 bind(&done);
3052 } else {
3053 movn(rd, rs, rt);
3054 }
3055}
3056
3058 Register condition) {
3059 if (kArchVariant == kMips64r6) {
3060 seleqz(dest, dest, condition);
3061 } else {
3062 Movn(dest, zero_reg, condition);
3063 }
3064}
3065
3067 Register condition) {
3068 if (kArchVariant == kMips64r6) {
3069 selnez(dest, dest, condition);
3070 } else {
3071 Movz(dest, zero_reg, condition);
3072 }
3073}
3074
3075void MacroAssembler::LoadZeroIfFPUCondition(Register dest) {
3076 if (kArchVariant == kMips64r6) {
3079 } else {
3080 Movt(dest, zero_reg);
3081 }
3082}
3083
3085 if (kArchVariant == kMips64r6) {
3088 } else {
3089 Movf(dest, zero_reg);
3090 }
3091}
3092
3093void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
3094 movt(rd, rs, cc);
3095}
3096
3097void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
3098 movf(rd, rs, cc);
3099}
3100
3101void MacroAssembler::Clz(Register rd, Register rs) { clz(rd, rs); }
3102
3103void MacroAssembler::Dclz(Register rd, Register rs) { dclz(rd, rs); }
3104
3105void MacroAssembler::Ctz(Register rd, Register rs) {
3106 if (kArchVariant == kMips64r6) {
3107 // We don't have an instruction to count the number of trailing zeroes.
3108 // Start by flipping the bits end-for-end so we can count the number of
3109 // leading zeroes instead.
3110 rotr(rd, rs, 16);
3111 wsbh(rd, rd);
3112 bitswap(rd, rd);
3113 Clz(rd, rd);
3114 } else {
3115 // Convert trailing zeroes to trailing ones, and bits to their left
3116 // to zeroes.
3117 UseScratchRegisterScope temps(this);
3118 Register scratch = temps.Acquire();
3119 Daddu(scratch, rs, -1);
3120 Xor(rd, scratch, rs);
3121 And(rd, rd, scratch);
3122 // Count number of leading zeroes.
3123 Clz(rd, rd);
3124 // Subtract number of leading zeroes from 32 to get number of trailing
3125 // ones. Remember that the trailing ones were formerly trailing zeroes.
3126 li(scratch, 32);
3127 Subu(rd, scratch, rd);
3128 }
3129}
3130
3131void MacroAssembler::Dctz(Register rd, Register rs) {
3132 if (kArchVariant == kMips64r6) {
3133 // We don't have an instruction to count the number of trailing zeroes.
3134 // Start by flipping the bits end-for-end so we can count the number of
3135 // leading zeroes instead.
3136 dsbh(rd, rs);
3137 dshd(rd, rd);
3138 dbitswap(rd, rd);
3139 dclz(rd, rd);
3140 } else {
3141 // Convert trailing zeroes to trailing ones, and bits to their left
3142 // to zeroes.
3143 UseScratchRegisterScope temps(this);
3144 Register scratch = temps.Acquire();
3145 Daddu(scratch, rs, -1);
3146 Xor(rd, scratch, rs);
3147 And(rd, rd, scratch);
3148 // Count number of leading zeroes.
3149 dclz(rd, rd);
3150 // Subtract number of leading zeroes from 64 to get number of trailing
3151 // ones. Remember that the trailing ones were formerly trailing zeroes.
3152 li(scratch, 64);
3153 Dsubu(rd, scratch, rd);
3154 }
3155}
3156
3157void MacroAssembler::Popcnt(Register rd, Register rs) {
3158 ASM_CODE_COMMENT(this);
3159 // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
3160 //
3161 // A generalization of the best bit counting method to integers of
3162 // bit-widths up to 128 (parameterized by type T) is this:
3163 //
3164 // v = v - ((v >> 1) & (T)~(T)0/3); // temp
3165 // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
3166 // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
3167 // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
3168 //
3169 // For comparison, for 32-bit quantities, this algorithm can be executed
3170 // using 20 MIPS instructions (the calls to LoadConst32() generate two
3171 // machine instructions each for the values being used in this algorithm).
3172 // A(n unrolled) loop-based algorithm requires 25 instructions.
3173 //
3174 // For a 64-bit operand this can be performed in 24 instructions compared
3175 // to a(n unrolled) loop based algorithm which requires 38 instructions.
3176 //
3177 // There are algorithms which are faster in the cases where very few
3178 // bits are set but the algorithm here attempts to minimize the total
3179 // number of instructions executed even when a large number of bits
3180 // are set.
3181 uint32_t B0 = 0x55555555; // (T)~(T)0/3
3182 uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
3183 uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
3184 uint32_t value = 0x01010101; // (T)~(T)0/255
3185 uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
3186
3187 UseScratchRegisterScope temps(this);
3188 BlockTrampolinePoolScope block_trampoline_pool(this);
3189 Register scratch = temps.Acquire();
3190 Register scratch2 = t8;
3191 srl(scratch, rs, 1);
3192 li(scratch2, B0);
3193 And(scratch, scratch, scratch2);
3194 Subu(scratch, rs, scratch);
3195 li(scratch2, B1);
3196 And(rd, scratch, scratch2);
3197 srl(scratch, scratch, 2);
3198 And(scratch, scratch, scratch2);
3199 Addu(scratch, rd, scratch);
3200 srl(rd, scratch, 4);
3201 Addu(rd, rd, scratch);
3202 li(scratch2, B2);
3203 And(rd, rd, scratch2);
3204 li(scratch, value);
3205 Mul(rd, rd, scratch);
3206 srl(rd, rd, shift);
3207}
3208
3209void MacroAssembler::Dpopcnt(Register rd, Register rs) {
3210 ASM_CODE_COMMENT(this);
3211 uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
3212 uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
3213 uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
3214 uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
3215 uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
3216
3217 UseScratchRegisterScope temps(this);
3218 BlockTrampolinePoolScope block_trampoline_pool(this);
3219 Register scratch = temps.Acquire();
3220 Register scratch2 = t8;
3221 dsrl(scratch, rs, 1);
3222 li(scratch2, B0);
3223 And(scratch, scratch, scratch2);
3224 Dsubu(scratch, rs, scratch);
3225 li(scratch2, B1);
3226 And(rd, scratch, scratch2);
3227 dsrl(scratch, scratch, 2);
3228 And(scratch, scratch, scratch2);
3229 Daddu(scratch, rd, scratch);
3230 dsrl(rd, scratch, 4);
3231 Daddu(rd, rd, scratch);
3232 li(scratch2, B2);
3233 And(rd, rd, scratch2);
3234 li(scratch, value);
3235 Dmul(rd, rd, scratch);
3236 dsrl32(rd, rd, shift);
3237}
3238
3240 DoubleRegister double_input,
3241 Label* done) {
3242 DoubleRegister single_scratch = kScratchDoubleReg.low();
3243 BlockTrampolinePoolScope block_trampoline_pool(this);
3244 Register scratch = t9;
3245
3246 // Try a conversion to a signed integer.
3247 trunc_w_d(single_scratch, double_input);
3248 mfc1(result, single_scratch);
3249 // Retrieve the FCSR.
3250 cfc1(scratch, FCSR);
3251 // Check for overflow and NaNs.
3252 And(scratch, scratch,
3255 // If we had no exceptions we are done.
3256 Branch(done, eq, scratch, Operand(zero_reg));
3257}
3258
3259void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
3260 Register result,
3261 DoubleRegister double_input,
3262 StubCallMode stub_mode) {
3263 Label done;
3264
3265 TryInlineTruncateDoubleToI(result, double_input, &done);
3266
3267 // If we fell through then inline version didn't succeed - call stub instead.
3268 push(ra);
3269 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
3270 Sdc1(double_input, MemOperand(sp, 0));
3271
3272#if V8_ENABLE_WEBASSEMBLY
3273 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
3274 Call(static_cast<Address>(Builtin::kDoubleToI), RelocInfo::WASM_STUB_CALL);
3275#else
3276 // For balance.
3277 if (false) {
3278#endif // V8_ENABLE_WEBASSEMBLY
3279 } else {
3280 CallBuiltin(Builtin::kDoubleToI);
3281 }
3282 Ld(result, MemOperand(sp, 0));
3283
3284 Daddu(sp, sp, Operand(kDoubleSize));
3285 pop(ra);
3286
3287 bind(&done);
3288}
3289
3290void MacroAssembler::CompareWord(Condition cond, Register dst, Register lhs,
3291 const Operand& rhs) {
3292 switch (cond) {
3293 case eq:
3294 case ne: {
3295 if (rhs.IsImmediate()) {
3296 if (rhs.immediate() == 0) {
3297 if (cond == eq) {
3298 Sltu(dst, lhs, 1);
3299 } else {
3300 Sltu(dst, zero_reg, lhs);
3301 }
3302 } else if (is_int16(-rhs.immediate())) {
3303 Daddu(dst, lhs, Operand(-rhs.immediate()));
3304 if (cond == eq) {
3305 Sltu(dst, dst, 1);
3306 } else {
3307 Sltu(dst, zero_reg, dst);
3308 }
3309 } else {
3310 Xor(dst, lhs, rhs);
3311 if (cond == eq) {
3312 Sltu(dst, dst, 1);
3313 } else {
3314 Sltu(dst, zero_reg, dst);
3315 }
3316 }
3317 } else {
3318 Xor(dst, lhs, rhs);
3319 if (cond == eq) {
3320 Sltu(dst, dst, 1);
3321 } else {
3322 Sltu(dst, zero_reg, dst);
3323 }
3324 }
3325 break;
3326 }
3327 case lt:
3328 Slt(dst, lhs, rhs);
3329 break;
3330 case gt:
3331 Sgt(dst, lhs, rhs);
3332 break;
3333 case le:
3334 Sle(dst, lhs, rhs);
3335 break;
3336 case ge:
3337 Sge(dst, lhs, rhs);
3338 break;
3339 case lo:
3340 Sltu(dst, lhs, rhs);
3341 break;
3342 case hs:
3343 Sgeu(dst, lhs, rhs);
3344 break;
3345 case hi:
3346 Sgtu(dst, lhs, rhs);
3347 break;
3348 case ls:
3349 Sleu(dst, lhs, rhs);
3350 break;
3351 default:
3352 UNREACHABLE();
3353 }
3354}
3355
3356// Emulated conditional branches do not emit a nop in the branch delay slot.
3357//
3358// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
3359#define BRANCH_ARGS_CHECK(cond, rs, rt) \
3360 DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \
3361 (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg)))
3362
3363void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
3364 DCHECK_EQ(kArchVariant, kMips64r6 ? is_int26(offset) : is_int16(offset));
3365 BranchShort(offset, bdslot);
3366}
3367
3368void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
3369 const Operand& rt, BranchDelaySlot bdslot) {
3370 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3371 DCHECK(is_near);
3372 USE(is_near);
3373}
3374
3375void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
3376 if (L->is_bound()) {
3377 if (is_near_branch(L)) {
3378 BranchShort(L, bdslot);
3379 } else {
3380 BranchLong(L, bdslot);
3381 }
3382 } else {
3383 if (is_trampoline_emitted()) {
3384 BranchLong(L, bdslot);
3385 } else {
3386 BranchShort(L, bdslot);
3387 }
3388 }
3389}
3390
3391void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
3392 const Operand& rt, BranchDelaySlot bdslot) {
3393 if (L->is_bound()) {
3394 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
3395 if (cond != cc_always) {
3396 Label skip;
3397 Condition neg_cond = NegateCondition(cond);
3398 BranchShort(&skip, neg_cond, rs, rt);
3399 BranchLong(L, bdslot);
3400 bind(&skip);
3401 } else {
3402 BranchLong(L, bdslot);
3403 }
3404 }
3405 } else {
3406 if (is_trampoline_emitted()) {
3407 if (cond != cc_always) {
3408 Label skip;
3409 Condition neg_cond = NegateCondition(cond);
3410 BranchShort(&skip, neg_cond, rs, rt);
3411 BranchLong(L, bdslot);
3412 bind(&skip);
3413 } else {
3414 BranchLong(L, bdslot);
3415 }
3416 } else {
3417 BranchShort(L, cond, rs, rt, bdslot);
3418 }
3419 }
3420}
3421
3422void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
3423 RootIndex index, BranchDelaySlot bdslot) {
3424 UseScratchRegisterScope temps(this);
3425 Register scratch = temps.Acquire();
3426 LoadRoot(scratch, index);
3427 Branch(L, cond, rs, Operand(scratch), bdslot);
3428}
3429
3430void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
3431 BranchDelaySlot bdslot) {
3432 DCHECK(L == nullptr || offset == 0);
3433 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3434 b(offset);
3435
3436 // Emit a nop in the branch delay slot if required.
3437 if (bdslot == PROTECT) nop();
3438}
3439
3440void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
3441 DCHECK(L == nullptr || offset == 0);
3442 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3443 bc(offset);
3444}
3445
3447 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3448 DCHECK(is_int26(offset));
3449 BranchShortHelperR6(offset, nullptr);
3450 } else {
3451 DCHECK(is_int16(offset));
3452 BranchShortHelper(offset, nullptr, bdslot);
3453 }
3454}
3455
3456void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
3457 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3459 } else {
3460 BranchShortHelper(0, L, bdslot);
3461 }
3462}
3463
3464int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
3465 if (L) {
3466 offset = branch_offset_helper(L, bits) >> 2;
3467 } else {
3468 DCHECK(is_intn(offset, bits));
3469 }
3470 return offset;
3471}
3472
3474 Register scratch) {
3475 Register r2 = no_reg;
3476 if (rt.is_reg()) {
3477 r2 = rt.rm();
3478 } else {
3479 r2 = scratch;
3480 li(r2, rt);
3481 }
3482
3483 return r2;
3484}
3485
3486bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset,
3487 OffsetSize bits) {
3488 if (!is_near(L, bits)) return false;
3489 *offset = GetOffset(*offset, L, bits);
3490 return true;
3491}
3492
3493bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
3494 Register* scratch, const Operand& rt) {
3495 if (!is_near(L, bits)) return false;
3496 *scratch = GetRtAsRegisterHelper(rt, *scratch);
3497 *offset = GetOffset(*offset, L, bits);
3498 return true;
3499}
3500
3501bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
3502 Condition cond, Register rs,
3503 const Operand& rt) {
3504 DCHECK(L == nullptr || offset == 0);
3505 UseScratchRegisterScope temps(this);
3506 BlockTrampolinePoolScope block_trampoline_pool(this);
3507 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
3508
3509 // Be careful to always use shifted_branch_offset only just before the
3510 // branch instruction, as the location will be remember for patching the
3511 // target.
3512 {
3513 BlockTrampolinePoolScope block_trampoline_pool(this);
3514 switch (cond) {
3515 case cc_always:
3516 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3517 bc(offset);
3518 break;
3519 case eq:
3520 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3521 // Pre R6 beq is used here to make the code patchable. Otherwise bc
3522 // should be used which has no condition field so is not patchable.
3523 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3524 return false;
3525 beq(rs, scratch, offset);
3526 nop();
3527 } else if (IsZero(rt)) {
3528 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
3529 beqzc(rs, offset);
3530 } else {
3531 // We don't want any other register but scratch clobbered.
3532 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3533 return false;
3534 beqc(rs, scratch, offset);
3535 }
3536 break;
3537 case ne:
3538 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3539 // Pre R6 bne is used here to make the code patchable. Otherwise we
3540 // should not generate any instruction.
3541 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3542 return false;
3543 bne(rs, scratch, offset);
3544 nop();
3545 } else if (IsZero(rt)) {
3546 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
3547 bnezc(rs, offset);
3548 } else {
3549 // We don't want any other register but scratch clobbered.
3550 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3551 return false;
3552 bnec(rs, scratch, offset);
3553 }
3554 break;
3555
3556 // Signed comparison.
3557 case greater:
3558 // rs > rt
3559 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3560 break; // No code needs to be emitted.
3561 } else if (rs == zero_reg) {
3562 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3563 return false;
3564 bltzc(scratch, offset);
3565 } else if (IsZero(rt)) {
3566 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
3567 bgtzc(rs, offset);
3568 } else {
3569 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3570 return false;
3571 DCHECK(rs != scratch);
3572 bltc(scratch, rs, offset);
3573 }
3574 break;
3575 case greater_equal:
3576 // rs >= rt
3577 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3578 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3579 bc(offset);
3580 } else if (rs == zero_reg) {
3581 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3582 return false;
3583 blezc(scratch, offset);
3584 } else if (IsZero(rt)) {
3585 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
3586 bgezc(rs, offset);
3587 } else {
3588 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3589 return false;
3590 DCHECK(rs != scratch);
3591 bgec(rs, scratch, offset);
3592 }
3593 break;
3594 case less:
3595 // rs < rt
3596 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3597 break; // No code needs to be emitted.
3598 } else if (rs == zero_reg) {
3599 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3600 return false;
3601 bgtzc(scratch, offset);
3602 } else if (IsZero(rt)) {
3603 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
3604 bltzc(rs, offset);
3605 } else {
3606 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3607 return false;
3608 DCHECK(rs != scratch);
3609 bltc(rs, scratch, offset);
3610 }
3611 break;
3612 case less_equal:
3613 // rs <= rt
3614 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3615 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3616 bc(offset);
3617 } else if (rs == zero_reg) {
3618 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3619 return false;
3620 bgezc(scratch, offset);
3621 } else if (IsZero(rt)) {
3622 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
3623 blezc(rs, offset);
3624 } else {
3625 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3626 return false;
3627 DCHECK(rs != scratch);
3628 bgec(scratch, rs, offset);
3629 }
3630 break;
3631
3632 // Unsigned comparison.
3633 case Ugreater:
3634 // rs > rt
3635 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3636 break; // No code needs to be emitted.
3637 } else if (rs == zero_reg) {
3638 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
3639 return false;
3640 bnezc(scratch, offset);
3641 } else if (IsZero(rt)) {
3642 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
3643 bnezc(rs, offset);
3644 } else {
3645 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3646 return false;
3647 DCHECK(rs != scratch);
3648 bltuc(scratch, rs, offset);
3649 }
3650 break;
3651 case Ugreater_equal:
3652 // rs >= rt
3653 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3654 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3655 bc(offset);
3656 } else if (rs == zero_reg) {
3657 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
3658 return false;
3659 beqzc(scratch, offset);
3660 } else if (IsZero(rt)) {
3661 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3662 bc(offset);
3663 } else {
3664 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3665 return false;
3666 DCHECK(rs != scratch);
3667 bgeuc(rs, scratch, offset);
3668 }
3669 break;
3670 case Uless:
3671 // rs < rt
3672 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3673 break; // No code needs to be emitted.
3674 } else if (rs == zero_reg) {
3675 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt))
3676 return false;
3677 bnezc(scratch, offset);
3678 } else if (IsZero(rt)) {
3679 break; // No code needs to be emitted.
3680 } else {
3681 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3682 return false;
3683 DCHECK(rs != scratch);
3684 bltuc(rs, scratch, offset);
3685 }
3686 break;
3687 case Uless_equal:
3688 // rs <= rt
3689 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3690 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3691 bc(offset);
3692 } else if (rs == zero_reg) {
3693 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt))
3694 return false;
3695 bc(offset);
3696 } else if (IsZero(rt)) {
3697 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
3698 beqzc(rs, offset);
3699 } else {
3700 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
3701 return false;
3702 DCHECK(rs != scratch);
3703 bgeuc(scratch, rs, offset);
3704 }
3705 break;
3706 default:
3707 UNREACHABLE();
3708 }
3709 }
3711 return true;
3712}
3713
3714bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
3715 Register rs, const Operand& rt,
3716 BranchDelaySlot bdslot) {
3717 DCHECK(L == nullptr || offset == 0);
3718 if (!is_near(L, OffsetSize::kOffset16)) return false;
3719
3720 UseScratchRegisterScope temps(this);
3721 BlockTrampolinePoolScope block_trampoline_pool(this);
3722 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
3723 int32_t offset32;
3724
3725 // Be careful to always use shifted_branch_offset only just before the
3726 // branch instruction, as the location will be remember for patching the
3727 // target.
3728 {
3729 BlockTrampolinePoolScope block_trampoline_pool(this);
3730 switch (cond) {
3731 case cc_always:
3732 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3733 b(offset32);
3734 break;
3735 case eq:
3736 if (IsZero(rt)) {
3737 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3738 beq(rs, zero_reg, offset32);
3739 } else {
3740 // We don't want any other register but scratch clobbered.
3741 scratch = GetRtAsRegisterHelper(rt, scratch);
3742 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3743 beq(rs, scratch, offset32);
3744 }
3745 break;
3746 case ne:
3747 if (IsZero(rt)) {
3748 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3749 bne(rs, zero_reg, offset32);
3750 } else {
3751 // We don't want any other register but scratch clobbered.
3752 scratch = GetRtAsRegisterHelper(rt, scratch);
3753 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3754 bne(rs, scratch, offset32);
3755 }
3756 break;
3757
3758 // Signed comparison.
3759 case greater:
3760 if (IsZero(rt)) {
3761 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3762 bgtz(rs, offset32);
3763 } else {
3764 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3765 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3766 bne(scratch, zero_reg, offset32);
3767 }
3768 break;
3769 case greater_equal:
3770 if (IsZero(rt)) {
3771 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3772 bgez(rs, offset32);
3773 } else {
3774 Slt(scratch, rs, rt);
3775 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3776 beq(scratch, zero_reg, offset32);
3777 }
3778 break;
3779 case less:
3780 if (IsZero(rt)) {
3781 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3782 bltz(rs, offset32);
3783 } else {
3784 Slt(scratch, rs, rt);
3785 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3786 bne(scratch, zero_reg, offset32);
3787 }
3788 break;
3789 case less_equal:
3790 if (IsZero(rt)) {
3791 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3792 blez(rs, offset32);
3793 } else {
3794 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3795 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3796 beq(scratch, zero_reg, offset32);
3797 }
3798 break;
3799
3800 // Unsigned comparison.
3801 case Ugreater:
3802 if (IsZero(rt)) {
3803 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3804 bne(rs, zero_reg, offset32);
3805 } else {
3806 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3807 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3808 bne(scratch, zero_reg, offset32);
3809 }
3810 break;
3811 case Ugreater_equal:
3812 if (IsZero(rt)) {
3813 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3814 b(offset32);
3815 } else {
3816 Sltu(scratch, rs, rt);
3817 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3818 beq(scratch, zero_reg, offset32);
3819 }
3820 break;
3821 case Uless:
3822 if (IsZero(rt)) {
3823 return true; // No code needs to be emitted.
3824 } else {
3825 Sltu(scratch, rs, rt);
3826 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3827 bne(scratch, zero_reg, offset32);
3828 }
3829 break;
3830 case Uless_equal:
3831 if (IsZero(rt)) {
3832 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3833 beq(rs, zero_reg, offset32);
3834 } else {
3835 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3836 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3837 beq(scratch, zero_reg, offset32);
3838 }
3839 break;
3840 default:
3841 UNREACHABLE();
3842 }
3843 }
3844
3845 // Emit a nop in the branch delay slot if required.
3846 if (bdslot == PROTECT) nop();
3847
3848 return true;
3849}
3850
3851bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3852 Register rs, const Operand& rt,
3853 BranchDelaySlot bdslot) {
3854 BRANCH_ARGS_CHECK(cond, rs, rt);
3855
3856 if (!L) {
3857 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3858 DCHECK(is_int26(offset));
3859 return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
3860 } else {
3861 DCHECK(is_int16(offset));
3862 return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3863 }
3864 } else {
3865 DCHECK_EQ(offset, 0);
3866 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3867 return BranchShortHelperR6(0, L, cond, rs, rt);
3868 } else {
3869 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3870 }
3871 }
3872}
3873
3874void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3875 const Operand& rt, BranchDelaySlot bdslot) {
3876 BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3877}
3878
3879void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
3880 const Operand& rt, BranchDelaySlot bdslot) {
3881 BranchShortCheck(0, L, cond, rs, rt, bdslot);
3882}
3883
3884void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
3885 BranchAndLinkShort(offset, bdslot);
3886}
3887
3888void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3889 const Operand& rt, BranchDelaySlot bdslot) {
3890 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3891 DCHECK(is_near);
3892 USE(is_near);
3893}
3894
3895void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3896 if (L->is_bound()) {
3897 if (is_near_branch(L)) {
3898 BranchAndLinkShort(L, bdslot);
3899 } else {
3900 BranchAndLinkLong(L, bdslot);
3901 }
3902 } else {
3903 if (is_trampoline_emitted()) {
3904 BranchAndLinkLong(L, bdslot);
3905 } else {
3906 BranchAndLinkShort(L, bdslot);
3907 }
3908 }
3909}
3910
3911void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3912 const Operand& rt, BranchDelaySlot bdslot) {
3913 if (L->is_bound()) {
3914 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
3915 Label skip;
3916 Condition neg_cond = NegateCondition(cond);
3917 BranchShort(&skip, neg_cond, rs, rt);
3918 BranchAndLinkLong(L, bdslot);
3919 bind(&skip);
3920 }
3921 } else {
3922 if (is_trampoline_emitted()) {
3923 Label skip;
3924 Condition neg_cond = NegateCondition(cond);
3925 BranchShort(&skip, neg_cond, rs, rt);
3926 BranchAndLinkLong(L, bdslot);
3927 bind(&skip);
3928 } else {
3929 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
3930 }
3931 }
3932}
3933
3935 BranchDelaySlot bdslot) {
3936 DCHECK(L == nullptr || offset == 0);
3937 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3938 bal(offset);
3939
3940 // Emit a nop in the branch delay slot if required.
3941 if (bdslot == PROTECT) nop();
3942}
3943
3945 DCHECK(L == nullptr || offset == 0);
3946 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3947 balc(offset);
3948}
3949
3951 BranchDelaySlot bdslot) {
3952 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3953 DCHECK(is_int26(offset));
3955 } else {
3956 DCHECK(is_int16(offset));
3957 BranchAndLinkShortHelper(offset, nullptr, bdslot);
3958 }
3959}
3960
3962 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3964 } else {
3965 BranchAndLinkShortHelper(0, L, bdslot);
3966 }
3967}
3968
3970 Condition cond, Register rs,
3971 const Operand& rt) {
3972 DCHECK(L == nullptr || offset == 0);
3973 UseScratchRegisterScope temps(this);
3974 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
3975 OffsetSize bits = OffsetSize::kOffset16;
3976
3977 BlockTrampolinePoolScope block_trampoline_pool(this);
3978 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3979 switch (cond) {
3980 case cc_always:
3981 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
3982 balc(offset);
3983 break;
3984 case eq:
3985 if (!is_near(L, bits)) return false;
3986 Subu(scratch, rs, rt);
3987 offset = GetOffset(offset, L, bits);
3988 beqzalc(scratch, offset);
3989 break;
3990 case ne:
3991 if (!is_near(L, bits)) return false;
3992 Subu(scratch, rs, rt);
3993 offset = GetOffset(offset, L, bits);
3994 bnezalc(scratch, offset);
3995 break;
3996
3997 // Signed comparison.
3998 case greater:
3999 // rs > rt
4000 if (rs.code() == rt.rm().code()) {
4001 break; // No code needs to be emitted.
4002 } else if (rs == zero_reg) {
4003 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
4004 return false;
4005 bltzalc(scratch, offset);
4006 } else if (IsZero(rt)) {
4007 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
4008 bgtzalc(rs, offset);
4009 } else {
4010 if (!is_near(L, bits)) return false;
4011 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4012 offset = GetOffset(offset, L, bits);
4013 bnezalc(scratch, offset);
4014 }
4015 break;
4016 case greater_equal:
4017 // rs >= rt
4018 if (rs.code() == rt.rm().code()) {
4019 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
4020 balc(offset);
4021 } else if (rs == zero_reg) {
4022 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
4023 return false;
4024 blezalc(scratch, offset);
4025 } else if (IsZero(rt)) {
4026 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
4027 bgezalc(rs, offset);
4028 } else {
4029 if (!is_near(L, bits)) return false;
4030 Slt(scratch, rs, rt);
4031 offset = GetOffset(offset, L, bits);
4032 beqzalc(scratch, offset);
4033 }
4034 break;
4035 case less:
4036 // rs < rt
4037 if (rs.code() == rt.rm().code()) {
4038 break; // No code needs to be emitted.
4039 } else if (rs == zero_reg) {
4040 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
4041 return false;
4042 bgtzalc(scratch, offset);
4043 } else if (IsZero(rt)) {
4044 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
4045 bltzalc(rs, offset);
4046 } else {
4047 if (!is_near(L, bits)) return false;
4048 Slt(scratch, rs, rt);
4049 offset = GetOffset(offset, L, bits);
4050 bnezalc(scratch, offset);
4051 }
4052 break;
4053 case less_equal:
4054 // rs <= r2
4055 if (rs.code() == rt.rm().code()) {
4056 if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false;
4057 balc(offset);
4058 } else if (rs == zero_reg) {
4059 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt))
4060 return false;
4061 bgezalc(scratch, offset);
4062 } else if (IsZero(rt)) {
4063 if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false;
4064 blezalc(rs, offset);
4065 } else {
4066 if (!is_near(L, bits)) return false;
4067 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4068 offset = GetOffset(offset, L, bits);
4069 beqzalc(scratch, offset);
4070 }
4071 break;
4072
4073 // Unsigned comparison.
4074 case Ugreater:
4075 // rs > r2
4076 if (!is_near(L, bits)) return false;
4077 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4078 offset = GetOffset(offset, L, bits);
4079 bnezalc(scratch, offset);
4080 break;
4081 case Ugreater_equal:
4082 // rs >= r2
4083 if (!is_near(L, bits)) return false;
4084 Sltu(scratch, rs, rt);
4085 offset = GetOffset(offset, L, bits);
4086 beqzalc(scratch, offset);
4087 break;
4088 case Uless:
4089 // rs < r2
4090 if (!is_near(L, bits)) return false;
4091 Sltu(scratch, rs, rt);
4092 offset = GetOffset(offset, L, bits);
4093 bnezalc(scratch, offset);
4094 break;
4095 case Uless_equal:
4096 // rs <= r2
4097 if (!is_near(L, bits)) return false;
4098 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4099 offset = GetOffset(offset, L, bits);
4100 beqzalc(scratch, offset);
4101 break;
4102 default:
4103 UNREACHABLE();
4104 }
4105 return true;
4106}
4107
4108// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
4109// with the slt instructions. We could use sub or add instead but we would miss
4110// overflow cases, so we keep slt and add an intermediate third instruction.
4112 Condition cond, Register rs,
4113 const Operand& rt,
4114 BranchDelaySlot bdslot) {
4115 DCHECK(L == nullptr || offset == 0);
4116 if (!is_near(L, OffsetSize::kOffset16)) return false;
4117
4118 Register scratch = t8;
4119 BlockTrampolinePoolScope block_trampoline_pool(this);
4120
4121 switch (cond) {
4122 case cc_always:
4123 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4124 bal(offset);
4125 break;
4126 case eq:
4127 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
4128 nop();
4129 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4130 bal(offset);
4131 break;
4132 case ne:
4133 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
4134 nop();
4135 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4136 bal(offset);
4137 break;
4138
4139 // Signed comparison.
4140 case greater:
4141 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4142 addiu(scratch, scratch, -1);
4143 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4144 bgezal(scratch, offset);
4145 break;
4146 case greater_equal:
4147 Slt(scratch, rs, rt);
4148 addiu(scratch, scratch, -1);
4149 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4150 bltzal(scratch, offset);
4151 break;
4152 case less:
4153 Slt(scratch, rs, rt);
4154 addiu(scratch, scratch, -1);
4155 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4156 bgezal(scratch, offset);
4157 break;
4158 case less_equal:
4159 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4160 addiu(scratch, scratch, -1);
4161 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4162 bltzal(scratch, offset);
4163 break;
4164
4165 // Unsigned comparison.
4166 case Ugreater:
4167 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4168 addiu(scratch, scratch, -1);
4169 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4170 bgezal(scratch, offset);
4171 break;
4172 case Ugreater_equal:
4173 Sltu(scratch, rs, rt);
4174 addiu(scratch, scratch, -1);
4175 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4176 bltzal(scratch, offset);
4177 break;
4178 case Uless:
4179 Sltu(scratch, rs, rt);
4180 addiu(scratch, scratch, -1);
4181 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4182 bgezal(scratch, offset);
4183 break;
4184 case Uless_equal:
4185 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4186 addiu(scratch, scratch, -1);
4187 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4188 bltzal(scratch, offset);
4189 break;
4190
4191 default:
4192 UNREACHABLE();
4193 }
4194
4195 // Emit a nop in the branch delay slot if required.
4196 if (bdslot == PROTECT) nop();
4197
4198 return true;
4199}
4200
4202 Condition cond, Register rs,
4203 const Operand& rt,
4204 BranchDelaySlot bdslot) {
4205 BRANCH_ARGS_CHECK(cond, rs, rt);
4206
4207 if (!L) {
4208 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
4209 DCHECK(is_int26(offset));
4210 return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
4211 } else {
4212 DCHECK(is_int16(offset));
4213 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
4214 }
4215 } else {
4216 DCHECK_EQ(offset, 0);
4217 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
4218 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
4219 } else {
4220 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
4221 }
4222 }
4223}
4224
4226 int constant_index) {
4227 ASM_CODE_COMMENT(this);
4228 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
4229 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
4232 constant_index * kPointerSize));
4233}
4234
4235void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
4237}
4238
4239void MacroAssembler::StoreRootRelative(int32_t offset, Register value) {
4241}
4242
4244 intptr_t offset) {
4245 if (offset == 0) {
4247 } else {
4248 Daddu(destination, kRootRegister, Operand(offset));
4249 }
4250}
4251
4253 ExternalReference reference, Register scratch) {
4254 if (root_array_available()) {
4255 if (reference.IsIsolateFieldId()) {
4256 return MemOperand(kRootRegister, reference.offset_from_root_register());
4257 }
4258 if (options().enable_root_relative_access) {
4259 int64_t offset =
4261 if (is_int32(offset)) {
4262 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
4263 }
4264 }
4265 if (root_array_available_ && options().isolate_independent_code) {
4266 if (IsAddressableThroughRootRegister(isolate(), reference)) {
4267 // Some external references can be efficiently loaded as an offset from
4268 // kRootRegister.
4269 intptr_t offset =
4271 CHECK(is_int32(offset));
4272 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
4273 } else {
4274 // Otherwise, do a memory load from the external reference table.
4275 DCHECK(scratch.is_valid());
4276 Ld(scratch, MemOperand(kRootRegister,
4278 isolate(), reference)));
4279 return MemOperand(scratch, 0);
4280 }
4281 }
4282 }
4283 DCHECK(scratch.is_valid());
4284 li(scratch, reference);
4285 return MemOperand(scratch, 0);
4286}
4287
4288void MacroAssembler::Jump(Register target, Condition cond, Register rs,
4289 const Operand& rt, BranchDelaySlot bd) {
4290 BlockTrampolinePoolScope block_trampoline_pool(this);
4291 if (kArchVariant == kMips64r6 && bd == PROTECT) {
4292 if (cond == cc_always) {
4293 jic(target, 0);
4294 } else {
4295 BRANCH_ARGS_CHECK(cond, rs, rt);
4296 Branch(2, NegateCondition(cond), rs, rt);
4297 jic(target, 0);
4298 }
4299 } else {
4300 if (cond == cc_always) {
4301 jr(target);
4302 } else {
4303 BRANCH_ARGS_CHECK(cond, rs, rt);
4304 Branch(2, NegateCondition(cond), rs, rt);
4305 jr(target);
4306 }
4307 // Emit a nop in the branch delay slot if required.
4308 if (bd == PROTECT) nop();
4309 }
4310}
4311
4312void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
4313 Condition cond, Register rs, const Operand& rt,
4314 BranchDelaySlot bd) {
4315 Label skip;
4316 if (cond != cc_always) {
4317 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
4318 }
4319 // The first instruction of 'li' may be placed in the delay slot.
4320 // This is not an issue, t9 is expected to be clobbered anyway.
4321 {
4322 BlockTrampolinePoolScope block_trampoline_pool(this);
4323 li(t9, Operand(target, rmode));
4324 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
4325 bind(&skip);
4326 }
4327}
4328
4329void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
4330 Register rs, const Operand& rt, BranchDelaySlot bd) {
4332 Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
4333}
4334
4336 Condition cond, Register rs, const Operand& rt,
4337 BranchDelaySlot bd) {
4339 BlockTrampolinePoolScope block_trampoline_pool(this);
4340 Label skip;
4341 if (cond != cc_always) {
4342 BranchShort(&skip, NegateCondition(cond), rs, rt);
4343 }
4344
4346 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
4347 TailCallBuiltin(builtin);
4348 bind(&skip);
4349 return;
4350 }
4351
4352 Jump(static_cast<intptr_t>(code.address()), rmode, cc_always, rs, rt, bd);
4353 bind(&skip);
4354}
4355
4356void MacroAssembler::Jump(const ExternalReference& reference) {
4357 li(t9, reference);
4358 Jump(t9);
4359}
4360
4361// Note: To call gcc-compiled C code on mips, you must call through t9.
4362void MacroAssembler::Call(Register target, Condition cond, Register rs,
4363 const Operand& rt, BranchDelaySlot bd) {
4364 BlockTrampolinePoolScope block_trampoline_pool(this);
4365 if (kArchVariant == kMips64r6 && bd == PROTECT) {
4366 if (cond == cc_always) {
4367 jialc(target, 0);
4368 } else {
4369 BRANCH_ARGS_CHECK(cond, rs, rt);
4370 Branch(2, NegateCondition(cond), rs, rt);
4371 jialc(target, 0);
4372 }
4373 } else {
4374 if (cond == cc_always) {
4375 jalr(target);
4376 } else {
4377 BRANCH_ARGS_CHECK(cond, rs, rt);
4378 Branch(2, NegateCondition(cond), rs, rt);
4379 jalr(target);
4380 }
4381 // Emit a nop in the branch delay slot if required.
4382 if (bd == PROTECT) nop();
4383 }
4385}
4386
4387void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
4388 unsigned higher_limit,
4389 Label* on_in_range) {
4390 ASM_CODE_COMMENT(this);
4391 if (lower_limit != 0) {
4392 UseScratchRegisterScope temps(this);
4393 Register scratch = temps.Acquire();
4394 Dsubu(scratch, value, Operand(lower_limit));
4395 Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit));
4396 } else {
4397 Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit));
4398 }
4399}
4400
4401void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
4402 Register rs, const Operand& rt, BranchDelaySlot bd) {
4403 BlockTrampolinePoolScope block_trampoline_pool(this);
4404 li(t9, Operand(static_cast<int64_t>(target), rmode), ADDRESS_LOAD);
4405 Call(t9, cond, rs, rt, bd);
4406}
4407
4409 Condition cond, Register rs, const Operand& rt,
4410 BranchDelaySlot bd) {
4411 BlockTrampolinePoolScope block_trampoline_pool(this);
4413 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
4414 CallBuiltin(builtin);
4415 return;
4416 }
4418 Call(code.address(), rmode, cond, rs, rt, bd);
4419}
4420
4421void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index,
4422 Register target) {
4423 ASM_CODE_COMMENT(this);
4424 static_assert(kSystemPointerSize == 8);
4425 static_assert(kSmiTagSize == 1);
4426 static_assert(kSmiTag == 0);
4427
4428 // The builtin_index register contains the builtin index as a Smi.
4429 SmiUntag(target, builtin_index);
4430 Dlsa(target, kRootRegister, target, kSystemPointerSizeLog2);
4431 Ld(target, MemOperand(target, IsolateData::builtin_entry_table_offset()));
4432}
4434 Register destination) {
4436}
4441}
4442
4443void MacroAssembler::CallBuiltinByIndex(Register builtin_index,
4444 Register target) {
4445 ASM_CODE_COMMENT(this);
4446 LoadEntryFromBuiltinIndex(builtin_index, target);
4447 Call(target);
4448}
4451 Register temp = t9;
4452 switch (options().builtin_call_jump_mode) {
4454 li(temp, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
4455 Call(temp);
4456 break;
4457 }
4459 LoadEntryFromBuiltin(builtin, temp);
4460 Call(temp);
4461 break;
4462 }
4464 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
4465 IndirectLoadConstant(temp, code);
4467 break;
4468 }
4470 // Short builtin calls is unsupported in mips64.
4471 UNREACHABLE();
4472 }
4473}
4474
4476 Register type, Operand range) {
4477 if (cond != cc_always) {
4478 Label done;
4479 Branch(&done, NegateCondition(cond), type, range);
4480 TailCallBuiltin(builtin);
4481 bind(&done);
4482 } else {
4483 TailCallBuiltin(builtin);
4484 }
4485}
4486
4489 CommentForOffHeapTrampoline("tail call", builtin));
4490 Register temp = t9;
4491
4492 switch (options().builtin_call_jump_mode) {
4494 li(temp, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
4495 Jump(temp);
4496 break;
4497 }
4499 LoadEntryFromBuiltin(builtin, temp);
4500 Jump(temp);
4501 break;
4502 }
4504 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
4505 IndirectLoadConstant(temp, code);
4507 break;
4508 }
4510 UNREACHABLE();
4511 }
4512}
4513
4515 if (kArchVariant != kMips64r6) {
4516 ASM_CODE_COMMENT(this);
4517 UseScratchRegisterScope temps(this);
4518 Register scratch = temps.Acquire();
4519 mov(scratch, ra);
4520 bal(1); // jump to ld
4521 nop(); // in the delay slot
4522 ld(t9, MemOperand(ra, kInstrSize * 3)); // ra == pc_
4523 jr(t9);
4524 mov(ra, scratch); // in delay slot
4525 DCHECK_EQ(reinterpret_cast<uint64_t>(pc_) % 8, 0);
4526 *reinterpret_cast<uint64_t*>(pc_) = target; // pc_ should be align.
4527 pc_ += sizeof(uint64_t);
4528 } else {
4529 // TODO(mips r6): Implement.
4530 UNIMPLEMENTED();
4531 }
4532}
4533
4534void MacroAssembler::StoreReturnAddressAndCall(Register target) {
4535 ASM_CODE_COMMENT(this);
4536 // This generates the final instruction sequence for calls to C functions
4537 // once an exit frame has been constructed.
4538 //
4539 // Note that this assumes the caller code (i.e. the InstructionStream object
4540 // currently being generated) is immovable or that the callee function cannot
4541 // trigger GC, since the callee function will return to it.
4542
4543 // Compute the return address in lr to return to after the jump below. The pc
4544 // is already at '+ 8' from the current instruction; but return is after three
4545 // instructions, so add another 4 to pc to get the return address.
4546
4547 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
4548 static constexpr int kNumInstructionsToJump = 4;
4549 Label find_ra;
4550 // Adjust the value in ra to point to the correct return location, 2nd
4551 // instruction past the real call into C code (the jalr(t9)), and push it.
4552 // This is the return address of the exit frame.
4553 if (kArchVariant >= kMips64r6) {
4554 addiupc(ra, kNumInstructionsToJump + 1);
4555 } else {
4556 // This no-op-and-link sequence saves PC + 8 in ra register on pre-r6 MIPS
4557 nal(); // nal has branch delay slot.
4558 Daddu(ra, ra, kNumInstructionsToJump * kInstrSize);
4559 }
4560 bind(&find_ra);
4561
4562 // This spot was reserved in EnterExitFrame.
4563 Sd(ra, MemOperand(sp));
4564 // Stack space reservation moved to the branch delay slot below.
4565 // Stack is still aligned.
4566
4567 // Call the C routine.
4568 mov(t9, target); // Function pointer to t9 to conform to ABI for PIC.
4569 jalr(t9);
4570 // Set up sp in the delay slot.
4571 daddiu(sp, sp, -kCArgsSlotsSize);
4572 // Make sure the stored 'ra' points to this position.
4573 DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
4574}
4575
4576void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt,
4577 BranchDelaySlot bd) {
4578 Jump(ra, cond, rs, rt, bd);
4579}
4580
4581void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
4582 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
4583 (!L->is_bound() || is_near_r6(L))) {
4585 } else {
4586 // Generate position independent long branch.
4587 BlockTrampolinePoolScope block_trampoline_pool(this);
4588 int64_t imm64 = branch_long_offset(L);
4589 DCHECK(is_int32(imm64));
4590 int32_t imm32 = static_cast<int32_t>(imm64);
4591 or_(t8, ra, zero_reg);
4592 nal(); // Read PC into ra register.
4593 lui(t9, (imm32 & kHiMaskOf32) >> kLuiShift); // Branch delay slot.
4594 ori(t9, t9, (imm32 & kImm16Mask));
4595 daddu(t9, ra, t9);
4596 if (bdslot == USE_DELAY_SLOT) {
4597 or_(ra, t8, zero_reg);
4598 }
4599 jr(t9);
4600 // Emit a or_ in the branch delay slot if it's protected.
4601 if (bdslot == PROTECT) or_(ra, t8, zero_reg);
4602 }
4603}
4604
4606 if (kArchVariant == kMips64r6 && bdslot == PROTECT && (is_int26(offset))) {
4607 BranchShortHelperR6(offset, nullptr);
4608 } else {
4609 BlockTrampolinePoolScope block_trampoline_pool(this);
4610 or_(t8, ra, zero_reg);
4611 nal(); // Read PC into ra register.
4612 lui(t9, (offset & kHiMaskOf32) >> kLuiShift); // Branch delay slot.
4613 ori(t9, t9, (offset & kImm16Mask));
4614 daddu(t9, ra, t9);
4615 if (bdslot == USE_DELAY_SLOT) {
4616 or_(ra, t8, zero_reg);
4617 }
4618 jr(t9);
4619 // Emit a or_ in the branch delay slot if it's protected.
4620 if (bdslot == PROTECT) or_(ra, t8, zero_reg);
4621 }
4622}
4623
4625 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
4626 (!L->is_bound() || is_near_r6(L))) {
4628 } else {
4629 // Generate position independent long branch and link.
4630 BlockTrampolinePoolScope block_trampoline_pool(this);
4631 int64_t imm64 = branch_long_offset(L);
4632 DCHECK(is_int32(imm64));
4633 int32_t imm32 = static_cast<int32_t>(imm64);
4634 lui(t8, (imm32 & kHiMaskOf32) >> kLuiShift);
4635 nal(); // Read PC into ra register.
4636 ori(t8, t8, (imm32 & kImm16Mask)); // Branch delay slot.
4637 daddu(t8, ra, t8);
4638 jalr(t8);
4639 // Emit a nop in the branch delay slot if required.
4640 if (bdslot == PROTECT) nop();
4641 }
4642}
4643
4644void MacroAssembler::DropArguments(Register count) {
4645 Dlsa(sp, sp, count, kPointerSizeLog2);
4646}
4647
4649 Register receiver) {
4650 DCHECK(!AreAliased(argc, receiver));
4651 DropArguments(argc);
4652 push(receiver);
4653}
4654
4655void MacroAssembler::DropAndRet(int drop) {
4656 int32_t drop_size = drop * kSystemPointerSize;
4657 DCHECK(is_int31(drop_size));
4658
4659 if (is_int16(drop_size)) {
4661 daddiu(sp, sp, drop_size);
4662 } else {
4663 UseScratchRegisterScope temps(this);
4664 Register scratch = temps.Acquire();
4665 li(scratch, drop_size);
4667 daddu(sp, sp, scratch);
4668 }
4669}
4670
4671void MacroAssembler::DropAndRet(int drop, Condition cond, Register r1,
4672 const Operand& r2) {
4673 // Both Drop and Ret need to be conditional.
4674 Label skip;
4675 if (cond != cc_always) {
4676 Branch(&skip, NegateCondition(cond), r1, r2);
4677 }
4678
4679 Drop(drop);
4680 Ret();
4681
4682 if (cond != cc_always) {
4683 bind(&skip);
4684 }
4685}
4686
4687void MacroAssembler::Drop(int count, Condition cond, Register reg,
4688 const Operand& op) {
4689 if (count <= 0) {
4690 return;
4691 }
4692
4693 Label skip;
4694
4695 if (cond != al) {
4696 Branch(&skip, NegateCondition(cond), reg, op);
4697 }
4698
4699 Daddu(sp, sp, Operand(count * kPointerSize));
4700
4701 if (cond != al) {
4702 bind(&skip);
4703 }
4704}
4705
4706void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
4707 if (scratch == no_reg) {
4708 Xor(reg1, reg1, Operand(reg2));
4709 Xor(reg2, reg2, Operand(reg1));
4710 Xor(reg1, reg1, Operand(reg2));
4711 } else {
4712 mov(scratch, reg1);
4713 mov(reg1, reg2);
4714 mov(reg2, scratch);
4715 }
4716}
4717
4718void MacroAssembler::Call(Label* target) { BranchAndLink(target); }
4719
4720void MacroAssembler::LoadAddress(Register dst, Label* target) {
4721 uint64_t address = jump_address(target);
4722 li(dst, address);
4723}
4724
4725void MacroAssembler::LoadAddressPCRelative(Register dst, Label* target) {
4726 ASM_CODE_COMMENT(this);
4727 nal();
4728 // daddiu could handle 16-bit pc offset.
4729 int32_t offset = branch_offset_helper(target, OffsetSize::kOffset16);
4730 DCHECK(is_int16(offset));
4731 mov(t8, ra);
4732 daddiu(dst, ra, offset);
4733 mov(ra, t8);
4734}
4735
4737 UseScratchRegisterScope temps(this);
4738 Register scratch = temps.Acquire();
4739 li(scratch, Operand(smi));
4740 push(scratch);
4741}
4742
4743void MacroAssembler::Push(Handle<HeapObject> handle) {
4744 UseScratchRegisterScope temps(this);
4745 Register scratch = temps.Acquire();
4746 li(scratch, Operand(handle));
4747 push(scratch);
4748}
4749
4750void MacroAssembler::PushArray(Register array, Register size, Register scratch,
4751 Register scratch2, PushArrayOrder order) {
4752 DCHECK(!AreAliased(array, size, scratch, scratch2));
4753 Label loop, entry;
4754 if (order == PushArrayOrder::kReverse) {
4755 mov(scratch, zero_reg);
4756 jmp(&entry);
4757 bind(&loop);
4758 Dlsa(scratch2, array, scratch, kPointerSizeLog2);
4759 Ld(scratch2, MemOperand(scratch2));
4760 push(scratch2);
4761 Daddu(scratch, scratch, Operand(1));
4762 bind(&entry);
4763 Branch(&loop, less, scratch, Operand(size));
4764 } else {
4765 mov(scratch, size);
4766 jmp(&entry);
4767 bind(&loop);
4768 Dlsa(scratch2, array, scratch, kPointerSizeLog2);
4769 Ld(scratch2, MemOperand(scratch2));
4770 push(scratch2);
4771 bind(&entry);
4772 Daddu(scratch, scratch, Operand(-1));
4773 Branch(&loop, greater_equal, scratch, Operand(zero_reg));
4774 }
4775}
4776
4777// ---------------------------------------------------------------------------
4778// Exception handling.
4779
4781 // Adjust this code if not the case.
4782 static_assert(StackHandlerConstants::kSize == 2 * kPointerSize);
4784
4785 Push(Smi::zero()); // Padding.
4786
4787 // Link the current handler as the next handler.
4788 li(t2,
4789 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
4790 Ld(t1, MemOperand(t2));
4791 push(t1);
4792
4793 // Set this new handler as the current one.
4794 Sd(sp, MemOperand(t2));
4795}
4796
4798 static_assert(StackHandlerConstants::kNextOffset == 0);
4799 pop(a1);
4800 Daddu(sp, sp,
4801 Operand(
4802 static_cast<int64_t>(StackHandlerConstants::kSize - kPointerSize)));
4803 UseScratchRegisterScope temps(this);
4804 Register scratch = temps.Acquire();
4805 li(scratch,
4806 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
4807 Sd(a1, MemOperand(scratch));
4808}
4809
4811 const DoubleRegister src) {
4812 sub_d(dst, src, kDoubleRegZero);
4813}
4814
4816 if (IsMipsSoftFloatABI) {
4817 if (kArchEndian == kLittle) {
4818 Move(dst, v0, v1);
4819 } else {
4820 Move(dst, v1, v0);
4821 }
4822 } else {
4823 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
4824 }
4825}
4826
4828 if (IsMipsSoftFloatABI) {
4829 if (kArchEndian == kLittle) {
4830 Move(dst, a0, a1);
4831 } else {
4832 Move(dst, a1, a0);
4833 }
4834 } else {
4835 Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
4836 }
4837}
4838
4840 if (!IsMipsSoftFloatABI) {
4841 Move(f12, src);
4842 } else {
4843 if (kArchEndian == kLittle) {
4844 Move(a0, a1, src);
4845 } else {
4846 Move(a1, a0, src);
4847 }
4848 }
4849}
4850
4852 if (!IsMipsSoftFloatABI) {
4853 Move(f0, src);
4854 } else {
4855 if (kArchEndian == kLittle) {
4856 Move(v0, v1, src);
4857 } else {
4858 Move(v1, v0, src);
4859 }
4860 }
4861}
4862
4864 DoubleRegister src2) {
4865 if (!IsMipsSoftFloatABI) {
4866 const DoubleRegister fparg2 = f13;
4867 if (src2 == f12) {
4868 DCHECK(src1 != fparg2);
4869 Move(fparg2, src2);
4870 Move(f12, src1);
4871 } else {
4872 Move(f12, src1);
4873 Move(fparg2, src2);
4874 }
4875 } else {
4876 if (kArchEndian == kLittle) {
4877 Move(a0, a1, src1);
4878 Move(a2, a3, src2);
4879 } else {
4880 Move(a1, a0, src1);
4881 Move(a3, a2, src2);
4882 }
4883 }
4884}
4885
4886// -----------------------------------------------------------------------------
4887// JavaScript invokes.
4888
4890 ASM_CODE_COMMENT(this);
4894 : IsolateData::jslimit_offset();
4895
4897}
4898
4899void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
4900 Register scratch2,
4901 Label* stack_overflow) {
4902 ASM_CODE_COMMENT(this);
4903 // Check the stack for overflow. We are not trying to catch
4904 // interruptions (e.g. debug break and preemption) here, so the "real stack
4905 // limit" is checked.
4906
4908 // Make scratch1 the space we have left. The stack might already be overflowed
4909 // here which will cause scratch1 to become negative.
4910 dsubu(scratch1, sp, scratch1);
4911 // Check if the arguments will overflow the stack.
4912 dsll(scratch2, num_args, kPointerSizeLog2);
4913 // Signed comparison.
4914 Branch(stack_overflow, le, scratch1, Operand(scratch2));
4915}
4916
4917#ifdef V8_ENABLE_LEAPTIERING
4918void MacroAssembler::LoadEntrypointFromJSDispatchTable(Register destination,
4919 Register dispatch_handle,
4920 Register scratch) {
4921 DCHECK(!AreAliased(destination, dispatch_handle, scratch));
4922 ASM_CODE_COMMENT(this);
4923
4924 Register index = destination;
4925 li(scratch, ExternalReference::js_dispatch_table_address());
4926 dsrl(index, dispatch_handle, kJSDispatchHandleShift);
4928 Daddu(scratch, scratch, destination);
4929 Ld(destination, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
4930}
4931
4932void MacroAssembler::LoadParameterCountFromJSDispatchTable(
4933 Register destination, Register dispatch_handle, Register scratch) {
4934 DCHECK(!AreAliased(destination, dispatch_handle, scratch));
4935 ASM_CODE_COMMENT(this);
4936
4937 // MSARegister index = MSARegister::from_code(destination.code());
4938 Register index = destination;
4939 li(scratch, ExternalReference::js_dispatch_table_address());
4940 dsrl(index, dispatch_handle, kJSDispatchHandleShift);
4942 Daddu(scratch, scratch, destination);
4943 static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
4944 Lhu(destination, MemOperand(scratch, JSDispatchEntry::kCodeObjectOffset));
4945}
4946
4947void MacroAssembler::LoadEntrypointAndParameterCountFromJSDispatchTable(
4948 Register entrypoint, Register parameter_count, Register dispatch_handle,
4949 Register scratch) {
4950 DCHECK(!AreAliased(entrypoint, parameter_count, dispatch_handle, scratch));
4951 ASM_CODE_COMMENT(this);
4952
4953 // MSARegister index = MSARegister::from_code(parameter_count.code());
4954 Register index = parameter_count;
4955 li(scratch, ExternalReference::js_dispatch_table_address());
4956 dsrl(index, dispatch_handle, kJSDispatchHandleShift);
4958 Daddu(scratch, scratch, parameter_count);
4959 Ld(entrypoint, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
4960 static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
4961 Lhu(parameter_count, MemOperand(scratch, JSDispatchEntry::kCodeObjectOffset));
4962}
4963#endif
4964
4966 Register code_data_container, Register scratch, Condition cond,
4967 Label* target) {
4968 Lwu(scratch, FieldMemOperand(code_data_container, Code::kFlagsOffset));
4969 And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
4970 Branch(target, cond, scratch, Operand(zero_reg));
4971}
4972
4973Operand MacroAssembler::ClearedValue() const {
4974 return Operand(static_cast<int32_t>(i::ClearedValue(isolate()).ptr()));
4975}
4976
4977void MacroAssembler::InvokePrologue(Register expected_parameter_count,
4978 Register actual_parameter_count,
4979 InvokeType type) {
4980 ASM_CODE_COMMENT(this);
4981 Label regular_invoke;
4982
4983 // a0: actual arguments count
4984 // a1: function (passed through to callee)
4985 // a2: expected arguments count
4986
4987 DCHECK_EQ(actual_parameter_count, a0);
4988 DCHECK_EQ(expected_parameter_count, a2);
4989
4990 // If overapplication or if the actual argument count is equal to the
4991 // formal parameter count, no need to push extra undefined values.
4992 Dsubu(expected_parameter_count, expected_parameter_count,
4993 actual_parameter_count);
4994 Branch(&regular_invoke, le, expected_parameter_count, Operand(zero_reg));
4995
4996 Label stack_overflow;
4997 StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
4998 // Underapplication. Move the arguments already in the stack, including the
4999 // receiver and the return address.
5000 {
5001 Label copy;
5002 Register src = a6, dest = a7;
5003 mov(src, sp);
5004 dsll(t0, expected_parameter_count, kSystemPointerSizeLog2);
5005 Dsubu(sp, sp, Operand(t0));
5006 // Update stack pointer.
5007 mov(dest, sp);
5008 mov(t0, actual_parameter_count);
5009 bind(&copy);
5010 Ld(t1, MemOperand(src, 0));
5011 Sd(t1, MemOperand(dest, 0));
5012 Dsubu(t0, t0, Operand(1));
5013 Daddu(src, src, Operand(kSystemPointerSize));
5014 Daddu(dest, dest, Operand(kSystemPointerSize));
5015 Branch(&copy, gt, t0, Operand(zero_reg));
5016 }
5017
5018 // Fill remaining expected arguments with undefined values.
5019 LoadRoot(t0, RootIndex::kUndefinedValue);
5020 {
5021 Label loop;
5022 bind(&loop);
5023 Sd(t0, MemOperand(a7, 0));
5024 Dsubu(expected_parameter_count, expected_parameter_count, Operand(1));
5025 Daddu(a7, a7, Operand(kSystemPointerSize));
5026 Branch(&loop, gt, expected_parameter_count, Operand(zero_reg));
5027 }
5028 b(&regular_invoke);
5029 nop();
5030
5031 bind(&stack_overflow);
5032 {
5033 FrameScope frame(
5034 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
5035 CallRuntime(Runtime::kThrowStackOverflow);
5036 break_(0xCC);
5037 }
5038
5039 bind(&regular_invoke);
5040}
5041
5043 Register fun, Register new_target,
5044 Register expected_parameter_count_or_dispatch_handle,
5045 Register actual_parameter_count) {
5046 DCHECK(!AreAliased(t0, fun, new_target,
5047 expected_parameter_count_or_dispatch_handle,
5048 actual_parameter_count));
5049 Label skip_hook;
5050
5051 li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
5052 Lb(t0, MemOperand(t0));
5053 Branch(&skip_hook, eq, t0, Operand(zero_reg));
5054 {
5055 // Load receiver to pass it later to DebugOnFunctionCall hook.
5056 LoadReceiver(t0);
5057 FrameScope frame(
5058 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
5059 SmiTag(expected_parameter_count_or_dispatch_handle);
5060 SmiTag(actual_parameter_count);
5061 Push(expected_parameter_count_or_dispatch_handle, actual_parameter_count);
5062 if (new_target.is_valid()) {
5064 }
5065 Push(fun, fun, t0);
5066 CallRuntime(Runtime::kDebugOnFunctionCall);
5067 Pop(fun);
5068 if (new_target.is_valid()) {
5069 Pop(new_target);
5070 }
5071
5072 Pop(expected_parameter_count_or_dispatch_handle, actual_parameter_count);
5073 SmiUntag(actual_parameter_count);
5074
5075 SmiUntag(expected_parameter_count_or_dispatch_handle);
5076 }
5077 bind(&skip_hook);
5078}
5079
5080#ifdef V8_ENABLE_LEAPTIERING
5082 Register function, Register actual_parameter_count, InvokeType type,
5083 ArgumentAdaptionMode argument_adaption_mode) {
5084 ASM_CODE_COMMENT(this);
5085 // You can't call a function without a valid frame.
5086 DCHECK(type == InvokeType::kJump || has_frame());
5087
5088 // Contract with called JS functions requires that function is passed in a1.
5089 // (See FullCodeGenerator::Generate().)
5090 DCHECK_EQ(function, a1);
5091
5092 // Set up the context.
5093 Ld(cp, FieldMemOperand(function, JSFunction::kContextOffset));
5094
5095 InvokeFunctionCode(function, no_reg, actual_parameter_count, type,
5096 argument_adaption_mode);
5097}
5098
5100 Register function, Register new_target, Register actual_parameter_count,
5101 InvokeType type) {
5102 ASM_CODE_COMMENT(this);
5103 // You can't call a function without a valid frame.
5104 DCHECK(type == InvokeType::kJump || has_frame());
5105
5106 // Contract with called JS functions requires that function is passed in a1.
5107 // (See FullCodeGenerator::Generate().)
5108 DCHECK_EQ(function, a1);
5109
5110 Ld(cp, FieldMemOperand(function, JSFunction::kContextOffset));
5111
5112 InvokeFunctionCode(function, new_target, actual_parameter_count, type);
5113}
5114
5116 Register function, Register new_target, Register actual_parameter_count,
5117 InvokeType type, ArgumentAdaptionMode argument_adaption_mode) {
5118 ASM_CODE_COMMENT(this);
5119 // You can't call a function without a valid frame.
5121 DCHECK_EQ(function, a1);
5122 DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
5123
5125 Lw(dispatch_handle,
5126 FieldMemOperand(function, JSFunction::kDispatchHandleOffset));
5127
5128 // On function call, call into the debugger if necessary.
5129 Label debug_hook, continue_after_hook;
5130 {
5131 li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
5132 Lb(t0, MemOperand(t0, 0));
5133 BranchShort(&debug_hook, ne, t0, Operand(zero_reg));
5134 }
5135 bind(&continue_after_hook);
5136
5137 // Clear the new.target register if not given.
5138 if (!new_target.is_valid()) {
5139 LoadRoot(a3, RootIndex::kUndefinedValue);
5140 }
5141
5142 Register scratch = s1;
5143 if (argument_adaption_mode == ArgumentAdaptionMode::kAdapt) {
5144 Register expected_parameter_count = a2;
5145 LoadParameterCountFromJSDispatchTable(expected_parameter_count,
5146 dispatch_handle, scratch);
5147 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
5148 }
5149
5150 // We call indirectly through the code field in the function to
5151 // allow recompilation to take effect without changing any of the
5152 // call sites.
5153 LoadEntrypointFromJSDispatchTable(kJavaScriptCallCodeStartRegister,
5154 dispatch_handle, scratch);
5155 switch (type) {
5156 case InvokeType::kCall:
5158 break;
5159 case InvokeType::kJump:
5161 break;
5162 }
5163 Label done;
5164 Branch(&done);
5165
5166 // Deferred debug hook.
5167 bind(&debug_hook);
5168 CheckDebugHook(function, new_target, dispatch_handle, actual_parameter_count);
5169 Branch(&continue_after_hook);
5170
5171 bind(&done);
5172}
5173#else
5174void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
5175 Register expected_parameter_count,
5176 Register actual_parameter_count,
5177 InvokeType type) {
5178 // You can't call a function without a valid frame.
5180 DCHECK_EQ(function, a1);
5181 DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
5182
5183 // On function call, call into the debugger if necessary.
5184 CheckDebugHook(function, new_target, expected_parameter_count,
5185 actual_parameter_count);
5186
5187 // Clear the new.target register if not given.
5188 if (!new_target.is_valid()) {
5189 LoadRoot(a3, RootIndex::kUndefinedValue);
5190 }
5191
5192 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
5193 // We call indirectly through the code field in the function to
5194 // allow recompilation to take effect without changing any of the
5195 // call sites.
5196 constexpr int unused_argument_count = 0;
5197 switch (type) {
5198 case InvokeType::kCall:
5199 CallJSFunction(function, unused_argument_count);
5200 break;
5201 case InvokeType::kJump:
5202 JumpJSFunction(function);
5203 break;
5204 }
5205 Label done;
5206 // Continue here if InvokePrologue does handle the invocation due to
5207 // mismatched parameter counts.
5208 bind(&done);
5209}
5210
5212 Register function, Register new_target, Register actual_parameter_count,
5213 InvokeType type) {
5214 ASM_CODE_COMMENT(this);
5215 // You can't call a function without a valid frame.
5217
5218 // Contract with called JS functions requires that function is passed in a1.
5219 DCHECK_EQ(function, a1);
5220 Register expected_parameter_count = a2;
5221 Register temp_reg = t0;
5222 Ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
5223 Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5224 // The argument count is stored as uint16_t
5225 Lhu(expected_parameter_count,
5226 FieldMemOperand(temp_reg,
5227 SharedFunctionInfo::kFormalParameterCountOffset));
5228
5229 InvokeFunctionCode(a1, new_target, expected_parameter_count,
5230 actual_parameter_count, type);
5231}
5232
5233void MacroAssembler::InvokeFunction(Register function,
5234 Register expected_parameter_count,
5235 Register actual_parameter_count,
5236 InvokeType type) {
5237 ASM_CODE_COMMENT(this);
5238 // You can't call a function without a valid frame.
5240
5241 // Contract with called JS functions requires that function is passed in a1.
5242 DCHECK_EQ(function, a1);
5243
5244 // Get the function and setup the context.
5245 Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5246
5247 InvokeFunctionCode(a1, no_reg, expected_parameter_count,
5248 actual_parameter_count, type);
5249}
5250#endif // V8_ENABLE_LEAPTIERING
5251// ---------------------------------------------------------------------------
5252// Support functions.
5253
5254void MacroAssembler::GetObjectType(Register object, Register map,
5255 Register type_reg) {
5256 LoadMap(map, object);
5257 Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
5258}
5259
5260void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
5261 InstanceType lower_limit,
5262 Register range) {
5263 Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
5264 Dsubu(range, type_reg, Operand(lower_limit));
5265}
5266
5267// -----------------------------------------------------------------------------
5268// Runtime calls.
5269
5270void MacroAssembler::DaddOverflow(Register dst, Register left,
5271 const Operand& right, Register overflow) {
5272 ASM_CODE_COMMENT(this);
5273 BlockTrampolinePoolScope block_trampoline_pool(this);
5274 Register right_reg = no_reg;
5275 Register scratch = t8;
5276 if (!right.is_reg()) {
5277 li(at, Operand(right));
5278 right_reg = at;
5279 } else {
5280 right_reg = right.rm();
5281 }
5282
5283 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
5284 overflow != scratch);
5285 DCHECK(overflow != left && overflow != right_reg);
5286
5287 if (dst == left || dst == right_reg) {
5288 daddu(scratch, left, right_reg);
5289 xor_(overflow, scratch, left);
5290 xor_(at, scratch, right_reg);
5291 and_(overflow, overflow, at);
5292 mov(dst, scratch);
5293 } else {
5294 daddu(dst, left, right_reg);
5295 xor_(overflow, dst, left);
5296 xor_(at, dst, right_reg);
5297 and_(overflow, overflow, at);
5298 }
5299}
5300
5301void MacroAssembler::DsubOverflow(Register dst, Register left,
5302 const Operand& right, Register overflow) {
5303 ASM_CODE_COMMENT(this);
5304 BlockTrampolinePoolScope block_trampoline_pool(this);
5305 Register right_reg = no_reg;
5306 Register scratch = t8;
5307 if (!right.is_reg()) {
5308 li(at, Operand(right));
5309 right_reg = at;
5310 } else {
5311 right_reg = right.rm();
5312 }
5313
5314 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
5315 overflow != scratch);
5316 DCHECK(overflow != left && overflow != right_reg);
5317
5318 if (dst == left || dst == right_reg) {
5319 dsubu(scratch, left, right_reg);
5320 xor_(overflow, left, scratch);
5321 xor_(at, left, right_reg);
5322 and_(overflow, overflow, at);
5323 mov(dst, scratch);
5324 } else {
5325 dsubu(dst, left, right_reg);
5326 xor_(overflow, left, dst);
5327 xor_(at, left, right_reg);
5328 and_(overflow, overflow, at);
5329 }
5330}
5331
5332void MacroAssembler::MulOverflow(Register dst, Register left,
5333 const Operand& right, Register overflow) {
5334 ASM_CODE_COMMENT(this);
5335 BlockTrampolinePoolScope block_trampoline_pool(this);
5336 Register right_reg = no_reg;
5337 Register scratch = t8;
5338 if (!right.is_reg()) {
5339 li(at, Operand(right));
5340 right_reg = at;
5341 } else {
5342 right_reg = right.rm();
5343 }
5344
5345 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
5346 overflow != scratch);
5347 DCHECK(overflow != left && overflow != right_reg);
5348
5349 if (dst == left || dst == right_reg) {
5350 Mul(scratch, left, right_reg);
5351 Mulh(overflow, left, right_reg);
5352 mov(dst, scratch);
5353 } else {
5354 Mul(dst, left, right_reg);
5355 Mulh(overflow, left, right_reg);
5356 }
5357
5358 dsra32(scratch, dst, 0);
5359 xor_(overflow, overflow, scratch);
5360}
5361
5362void MacroAssembler::DMulOverflow(Register dst, Register left,
5363 const Operand& right, Register overflow) {
5364 ASM_CODE_COMMENT(this);
5365 BlockTrampolinePoolScope block_trampoline_pool(this);
5366 Register right_reg = no_reg;
5367 Register scratch = t8;
5368 if (!right.is_reg()) {
5369 li(at, Operand(right));
5370 right_reg = at;
5371 } else {
5372 right_reg = right.rm();
5373 }
5374
5375 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
5376 overflow != scratch);
5377 DCHECK(overflow != left && overflow != right_reg);
5378
5379 if (dst == left || dst == right_reg) {
5380 Dmul(scratch, left, right_reg);
5381 Dmulh(overflow, left, right_reg);
5382 mov(dst, scratch);
5383 } else {
5384 Dmul(dst, left, right_reg);
5385 Dmulh(overflow, left, right_reg);
5386 }
5387
5388 dsra32(scratch, dst, 31);
5389 xor_(overflow, overflow, scratch);
5390}
5391
5392void MacroAssembler::CallRuntime(const Runtime::Function* f,
5393 int num_arguments) {
5394 ASM_CODE_COMMENT(this);
5395 // All parameters are on the stack. v0 has the return value after call.
5396
5397 // If the expected number of arguments of the runtime function is
5398 // constant, we check that the actual number of arguments match the
5399 // expectation.
5400 CHECK(f->nargs < 0 || f->nargs == num_arguments);
5401
5402 // TODO(1236192): Most runtime routines don't need the number of
5403 // arguments passed in because it is constant. At some point we
5404 // should remove this need and make the runtime routine entry code
5405 // smarter.
5406 PrepareCEntryArgs(num_arguments);
5408 bool switch_to_central_stack = options().is_wasm;
5409 CallBuiltin(Builtins::RuntimeCEntry(f->result_size, switch_to_central_stack));
5410}
5411
5413 ASM_CODE_COMMENT(this);
5414 const Runtime::Function* function = Runtime::FunctionForId(fid);
5415 DCHECK_EQ(1, function->result_size);
5416 if (function->nargs >= 0) {
5417 PrepareCEntryArgs(function->nargs);
5418 }
5420}
5421
5422void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
5423 bool builtin_exit_frame) {
5424 PrepareCEntryFunction(builtin);
5425 TailCallBuiltin(Builtins::CEntry(1, ArgvMode::kStack, builtin_exit_frame));
5426}
5427
5428void MacroAssembler::LoadWeakValue(Register out, Register in,
5429 Label* target_if_cleared) {
5430 Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
5431
5432 And(out, in, Operand(~kWeakHeapObjectMask));
5433}
5434
5435void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
5436 Register scratch1,
5437 Register scratch2) {
5438 DCHECK_GT(value, 0);
5439 if (v8_flags.native_code_counters && counter->Enabled()) {
5440 ASM_CODE_COMMENT(this);
5441 // This operation has to be exactly 32-bit wide in case the external
5442 // reference table redirects the counter to a uint32_t dummy_stats_counter_
5443 // field.
5444 li(scratch2, ExternalReference::Create(counter));
5445 Lw(scratch1, MemOperand(scratch2));
5446 Addu(scratch1, scratch1, Operand(value));
5447 Sw(scratch1, MemOperand(scratch2));
5448 }
5449}
5450
5451void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
5452 Register scratch1,
5453 Register scratch2) {
5454 DCHECK_GT(value, 0);
5455 if (v8_flags.native_code_counters && counter->Enabled()) {
5456 ASM_CODE_COMMENT(this);
5457 // This operation has to be exactly 32-bit wide in case the external
5458 // reference table redirects the counter to a uint32_t dummy_stats_counter_
5459 // field.
5460 li(scratch2, ExternalReference::Create(counter));
5461 Lw(scratch1, MemOperand(scratch2));
5462 Subu(scratch1, scratch1, Operand(value));
5463 Sw(scratch1, MemOperand(scratch2));
5464 }
5465}
5466
5467// -----------------------------------------------------------------------------
5468// Debugging.
5469
5470void MacroAssembler::Trap() { stop(); }
5471void MacroAssembler::DebugBreak() { stop(); }
5472
5473void MacroAssembler::Check(Condition cc, AbortReason reason, Register rs,
5474 Operand rt) {
5475 Label L;
5476 Branch(&L, cc, rs, rt);
5477 Abort(reason);
5478 // Will not return here.
5479 bind(&L);
5480}
5481void MacroAssembler::SbxCheck(Condition cc, AbortReason reason, Register rj,
5482 Operand rk) {
5483 Check(cc, reason, rj, rk);
5484}
5486 ASM_CODE_COMMENT(this);
5487 if (v8_flags.code_comments) {
5488 RecordComment("Abort message:", SourceLocation{});
5489 RecordComment(GetAbortReason(reason), SourceLocation{});
5490 }
5491
5492 // Without debug code, save the code size and just trap.
5493 if (!v8_flags.debug_code || v8_flags.trap_on_abort) {
5494 stop();
5495 return;
5496 }
5497
5498 if (should_abort_hard()) {
5499 // We don't care if we constructed a frame. Just pretend we did.
5500 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
5501 PrepareCallCFunction(1, a0);
5502 li(a0, Operand(static_cast<int>(reason)));
5503 li(a1, ExternalReference::abort_with_reason());
5504 // Use Call directly to avoid any unneeded overhead. The function won't
5505 // return anyway.
5506 Call(a1);
5507 return;
5508 }
5509
5510 Label abort_start;
5511 bind(&abort_start);
5512
5513 Move(a0, Smi::FromInt(static_cast<int>(reason)));
5514
5515 {
5516 // We don't actually want to generate a pile of code for this, so just
5517 // claim there is a stack frame, without generating one.
5518 FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
5519 if (root_array_available()) {
5520 // Generate an indirect call via builtins entry table here in order to
5521 // ensure that the interpreter_entry_return_pc_offset is the same for
5522 // InterpreterEntryTrampoline and InterpreterEntryTrampolineForProfiling
5523 // when v8_flags.debug_code is enabled.
5524 LoadEntryFromBuiltin(Builtin::kAbort, t9);
5525 Call(t9);
5526 } else {
5527 CallBuiltin(Builtin::kAbort);
5528 }
5529 }
5530 // Will not return here.
5532 // If the calling code cares about the exact number of
5533 // instructions generated, we insert padding here to keep the size
5534 // of the Abort macro constant.
5535 // Currently in debug mode with debug_code enabled the number of
5536 // generated instructions is 10, so we use this as a maximum value.
5537 static const int kExpectedAbortInstructions = 10;
5538 int abort_instructions = InstructionsGeneratedSince(&abort_start);
5539 DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
5540 while (abort_instructions++ < kExpectedAbortInstructions) {
5541 nop();
5542 }
5543 }
5544}
5545
5546void MacroAssembler::LoadMap(Register destination, Register object) {
5548}
5549
5550void MacroAssembler::LoadFeedbackVector(Register dst, Register closure,
5551 Register scratch, Label* fbv_undef) {
5552 Label done;
5553 // Load the feedback vector from the closure.
5554 Ld(dst, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
5555 Ld(dst, FieldMemOperand(dst, FeedbackCell::kValueOffset));
5556
5557 // Check if feedback vector is valid.
5559 Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5560 Branch(&done, eq, scratch, Operand(FEEDBACK_VECTOR_TYPE));
5561
5562 // Not valid, load undefined.
5563 LoadRoot(dst, RootIndex::kUndefinedValue);
5564 Branch(fbv_undef);
5565
5566 bind(&done);
5567}
5568
5569void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
5570 LoadMap(dst, cp);
5571 Ld(dst,
5572 FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
5573 Ld(dst, MemOperand(dst, Context::SlotOffset(index)));
5574}
5575
5577 UseScratchRegisterScope temps(this);
5578 Register scratch = temps.Acquire();
5579 li(scratch, Operand(StackFrame::TypeToMarker(type)));
5580 PushCommonFrame(scratch);
5581}
5582
5584
5586 ASM_CODE_COMMENT(this);
5587 BlockTrampolinePoolScope block_trampoline_pool(this);
5588 Push(ra, fp);
5589 Move(fp, sp);
5590 if (!StackFrame::IsJavaScript(type)) {
5591 li(kScratchReg, Operand(StackFrame::TypeToMarker(type)));
5593 }
5594#if V8_ENABLE_WEBASSEMBLY
5595 if (type == StackFrame::WASM || type == StackFrame::WASM_LIFTOFF_SETUP) {
5597 }
5598#endif // V8_ENABLE_WEBASSEMBLY
5599}
5600
5602 ASM_CODE_COMMENT(this);
5603 daddiu(sp, fp, 2 * kPointerSize);
5604 Ld(ra, MemOperand(fp, 1 * kPointerSize));
5605 Ld(fp, MemOperand(fp, 0 * kPointerSize));
5606}
5607
5608void MacroAssembler::EnterExitFrame(Register scratch, int stack_space,
5609 StackFrame::Type frame_type) {
5610 ASM_CODE_COMMENT(this);
5611 DCHECK(frame_type == StackFrame::EXIT ||
5612 frame_type == StackFrame::BUILTIN_EXIT ||
5613 frame_type == StackFrame::API_ACCESSOR_EXIT ||
5614 frame_type == StackFrame::API_CALLBACK_EXIT);
5615
5616 using ER = ExternalReference;
5617
5618 // Set up the frame structure on the stack.
5622
5623 // This is how the stack will look:
5624 // fp + 2 (==kCallerSPDisplacement) - old stack's end
5625 // [fp + 1 (==kCallerPCOffset)] - saved old ra
5626 // [fp + 0 (==kCallerFPOffset)] - saved old fp
5627 // [fp - 1 frame_type Smi
5628 // [fp - 2 (==kSPOffset)] - sp of the called function
5629 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
5630 // new stack (will contain saved ra)
5631
5632 // Save registers and reserve room for saved entry sp.
5634 Sd(ra, MemOperand(sp, 3 * kPointerSize));
5635 Sd(fp, MemOperand(sp, 2 * kPointerSize));
5636 li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
5637 Sd(scratch, MemOperand(sp, 1 * kPointerSize));
5638
5639 // Set up new frame pointer.
5641
5642 if (v8_flags.debug_code) {
5644 }
5645
5646 // Save the frame pointer and the context in top.
5647 ER c_entry_fp_address =
5648 ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
5649 Sd(fp, ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
5650
5651 ER context_address = ER::Create(IsolateAddressId::kContextAddress, isolate());
5652 Sd(cp, ExternalReferenceAsOperand(context_address, no_reg));
5653
5654 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5655
5656 // Reserve place for the return address, stack space and align the frame
5657 // preparing for calling the runtime function.
5658 DCHECK_GE(stack_space, 0);
5659 Dsubu(sp, sp, Operand((stack_space + 1) * kPointerSize));
5660 if (frame_alignment > 0) {
5661 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
5662 And(sp, sp, Operand(-frame_alignment)); // Align stack.
5663 }
5664
5665 // Set the exit frame sp value to point just before the return address
5666 // location.
5667 daddiu(scratch, sp, kPointerSize);
5669}
5670
5671void MacroAssembler::LeaveExitFrame(Register scratch) {
5672 ASM_CODE_COMMENT(this);
5673 BlockTrampolinePoolScope block_trampoline_pool(this);
5674
5675 using ER = ExternalReference;
5676
5677 // Restore current context from top and clear it in debug mode.
5678 ER context_address = ER::Create(IsolateAddressId::kContextAddress, isolate());
5679 Ld(cp, ExternalReferenceAsOperand(context_address, no_reg));
5680
5681 if (v8_flags.debug_code) {
5682 li(scratch, Operand(Context::kInvalidContext));
5683 Sd(scratch, ExternalReferenceAsOperand(context_address, no_reg));
5684 }
5685
5686 // Clear the top frame.
5687 ER c_entry_fp_address =
5688 ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
5689 Sd(zero_reg, ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
5690
5691 // Pop the arguments, restore registers, and return.
5692 mov(sp, fp); // Respect ABI stack constraint.
5695
5696 daddiu(sp, sp, 2 * kPointerSize);
5697}
5698
5700#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5701 // Running on the real platform. Use the alignment as mandated by the local
5702 // environment.
5703 // Note: This will break if we ever start generating snapshots on one Mips
5704 // platform for another Mips platform with a different alignment.
5706#else // V8_HOST_ARCH_MIPS
5707 // If we are using the simulator then we should always align to the expected
5708 // alignment. As the simulator is used to generate snapshots we do not know
5709 // if the target platform will need alignment, so this is controlled from a
5710 // flag.
5711 return v8_flags.sim_stack_alignment;
5712#endif // V8_HOST_ARCH_MIPS
5713}
5714
5715void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) {
5716 if (SmiValuesAre32Bits()) {
5717 Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
5718 } else {
5720 Lw(dst, src);
5721 SmiUntag(dst);
5722 }
5723}
5724
5725void MacroAssembler::JumpIfSmi(Register value, Label* smi_label,
5726 BranchDelaySlot bd) {
5727 DCHECK_EQ(0, kSmiTag);
5728 UseScratchRegisterScope temps(this);
5729 Register scratch = temps.Acquire();
5730 andi(scratch, value, kSmiTagMask);
5731 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5732}
5733
5734void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
5735 BranchDelaySlot bd) {
5736 DCHECK_EQ(0, kSmiTag);
5737 UseScratchRegisterScope temps(this);
5738 Register scratch = temps.Acquire();
5739 andi(scratch, value, kSmiTagMask);
5740 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5741}
5742
5743#ifdef V8_ENABLE_DEBUG_CODE
5744
5745void MacroAssembler::Assert(Condition cc, AbortReason reason, Register rs,
5746 Operand rt) {
5747 if (v8_flags.debug_code) Check(cc, reason, rs, rt);
5748}
5749
5750void MacroAssembler::AssertJSAny(Register object, Register map_tmp,
5751 Register tmp, AbortReason abort_reason) {
5752 if (!v8_flags.debug_code) return;
5753
5754 ASM_CODE_COMMENT(this);
5755 DCHECK(!AreAliased(object, map_tmp, tmp));
5756 Label ok;
5757
5758 JumpIfSmi(object, &ok);
5759
5760 GetObjectType(object, map_tmp, tmp);
5761
5762 Branch(&ok, kUnsignedLessThanEqual, tmp, Operand(LAST_NAME_TYPE));
5763
5764 Branch(&ok, kUnsignedGreaterThanEqual, tmp, Operand(FIRST_JS_RECEIVER_TYPE));
5765
5766 Branch(&ok, kEqual, map_tmp, RootIndex::kHeapNumberMap);
5767
5768 Branch(&ok, kEqual, map_tmp, RootIndex::kBigIntMap);
5769
5770 Branch(&ok, kEqual, object, RootIndex::kUndefinedValue);
5771
5772 Branch(&ok, kEqual, object, RootIndex::kTrueValue);
5773
5774 Branch(&ok, kEqual, object, RootIndex::kFalseValue);
5775
5776 Branch(&ok, kEqual, object, RootIndex::kNullValue);
5777
5778 Abort(abort_reason);
5779 bind(&ok);
5780}
5781
5782void MacroAssembler::AssertNotSmi(Register object) {
5783 if (v8_flags.debug_code) {
5784 ASM_CODE_COMMENT(this);
5785 static_assert(kSmiTag == 0);
5786 UseScratchRegisterScope temps(this);
5787 Register scratch = temps.Acquire();
5788 andi(scratch, object, kSmiTagMask);
5789 Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
5790 }
5791}
5792
5793void MacroAssembler::AssertSmi(Register object) {
5794 if (v8_flags.debug_code) {
5795 ASM_CODE_COMMENT(this);
5796 static_assert(kSmiTag == 0);
5797 UseScratchRegisterScope temps(this);
5798 Register scratch = temps.Acquire();
5799 andi(scratch, object, kSmiTagMask);
5800 Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
5801 }
5802}
5803
5805 if (v8_flags.debug_code) {
5806 ASM_CODE_COMMENT(this);
5807 const int frame_alignment = ActivationFrameAlignment();
5808 const int frame_alignment_mask = frame_alignment - 1;
5809
5810 if (frame_alignment > kPointerSize) {
5811 Label alignment_as_expected;
5812 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
5813 {
5814 UseScratchRegisterScope temps(this);
5815 Register scratch = temps.Acquire();
5816 andi(scratch, sp, frame_alignment_mask);
5817 Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
5818 }
5819 // Don't use Check here, as it will call Runtime_Abort re-entering here.
5820 stop();
5821 bind(&alignment_as_expected);
5822 }
5823 }
5824}
5825
5826void MacroAssembler::AssertConstructor(Register object) {
5827 if (v8_flags.debug_code) {
5828 ASM_CODE_COMMENT(this);
5829 BlockTrampolinePoolScope block_trampoline_pool(this);
5830 static_assert(kSmiTag == 0);
5831 SmiTst(object, t8);
5832 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
5833 Operand(zero_reg));
5834
5835 LoadMap(t8, object);
5836 Lbu(t8, FieldMemOperand(t8, Map::kBitFieldOffset));
5837 And(t8, t8, Operand(Map::Bits1::IsConstructorBit::kMask));
5838 Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg));
5839 }
5840}
5841
5842void MacroAssembler::AssertFunction(Register object) {
5843 if (v8_flags.debug_code) {
5844 ASM_CODE_COMMENT(this);
5845 BlockTrampolinePoolScope block_trampoline_pool(this);
5846 static_assert(kSmiTag == 0);
5847 SmiTst(object, t8);
5848 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
5849 Operand(zero_reg));
5850 push(object);
5851 LoadMap(object, object);
5852 GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, t8);
5853 Check(ls, AbortReason::kOperandIsNotAFunction, t8,
5854 Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
5855 pop(object);
5856 }
5857}
5858
5859void MacroAssembler::AssertCallableFunction(Register object) {
5860 if (v8_flags.debug_code) {
5861 ASM_CODE_COMMENT(this);
5862 BlockTrampolinePoolScope block_trampoline_pool(this);
5863 static_assert(kSmiTag == 0);
5864 SmiTst(object, t8);
5865 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
5866 Operand(zero_reg));
5867 push(object);
5868 LoadMap(object, object);
5870 Check(ls, AbortReason::kOperandIsNotACallableFunction, t8,
5873 pop(object);
5874 }
5875}
5876
5877void MacroAssembler::AssertBoundFunction(Register object) {
5878 if (v8_flags.debug_code) {
5879 ASM_CODE_COMMENT(this);
5880 BlockTrampolinePoolScope block_trampoline_pool(this);
5881 static_assert(kSmiTag == 0);
5882 SmiTst(object, t8);
5883 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
5884 Operand(zero_reg));
5885 GetObjectType(object, t8, t8);
5886 Check(eq, AbortReason::kOperandIsNotABoundFunction, t8,
5887 Operand(JS_BOUND_FUNCTION_TYPE));
5888 }
5889}
5890
5891void MacroAssembler::AssertGeneratorObject(Register object) {
5892 if (!v8_flags.debug_code) return;
5893 ASM_CODE_COMMENT(this);
5894 BlockTrampolinePoolScope block_trampoline_pool(this);
5895 static_assert(kSmiTag == 0);
5896 SmiTst(object, t8);
5897 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
5898 Operand(zero_reg));
5899 GetObjectType(object, t8, t8);
5900 Dsubu(t8, t8, Operand(FIRST_JS_GENERATOR_OBJECT_TYPE));
5901 Check(
5902 ls, AbortReason::kOperandIsNotAGeneratorObject, t8,
5903 Operand(LAST_JS_GENERATOR_OBJECT_TYPE - FIRST_JS_GENERATOR_OBJECT_TYPE));
5904}
5905
5907 Register scratch) {
5908 if (v8_flags.debug_code) {
5909 ASM_CODE_COMMENT(this);
5910 Label done_checking;
5911 AssertNotSmi(object);
5912 LoadRoot(scratch, RootIndex::kUndefinedValue);
5913 Branch(&done_checking, eq, object, Operand(scratch));
5914 GetObjectType(object, scratch, scratch);
5915 Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
5916 Operand(ALLOCATION_SITE_TYPE));
5917 bind(&done_checking);
5918 }
5919}
5920
5921#endif // V8_ENABLE_DEBUG_CODE
5922
5923void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
5924 FPURegister src2, Label* out_of_line) {
5925 ASM_CODE_COMMENT(this);
5926 if (src1 == src2) {
5927 Move_s(dst, src1);
5928 return;
5929 }
5930
5931 // Check if one of operands is NaN.
5932 CompareIsNanF32(src1, src2);
5933 BranchTrueF(out_of_line);
5934
5935 if (kArchVariant >= kMips64r6) {
5936 max_s(dst, src1, src2);
5937 } else {
5938 Label return_left, return_right, done;
5939
5940 CompareF32(OLT, src1, src2);
5941 BranchTrueShortF(&return_right);
5942 CompareF32(OLT, src2, src1);
5943 BranchTrueShortF(&return_left);
5944
5945 // Operands are equal, but check for +/-0.
5946 {
5947 BlockTrampolinePoolScope block_trampoline_pool(this);
5948 mfc1(t8, src1);
5949 dsll32(t8, t8, 0);
5950 Branch(&return_left, eq, t8, Operand(zero_reg));
5951 Branch(&return_right);
5952 }
5953
5954 bind(&return_right);
5955 if (src2 != dst) {
5956 Move_s(dst, src2);
5957 }
5958 Branch(&done);
5959
5960 bind(&return_left);
5961 if (src1 != dst) {
5962 Move_s(dst, src1);
5963 }
5964
5965 bind(&done);
5966 }
5967}
5968
5969void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
5970 FPURegister src2) {
5971 add_s(dst, src1, src2);
5972}
5973
5974void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
5975 FPURegister src2, Label* out_of_line) {
5976 ASM_CODE_COMMENT(this);
5977 if (src1 == src2) {
5978 Move_s(dst, src1);
5979 return;
5980 }
5981
5982 // Check if one of operands is NaN.
5983 CompareIsNanF32(src1, src2);
5984 BranchTrueF(out_of_line);
5985
5986 if (kArchVariant >= kMips64r6) {
5987 min_s(dst, src1, src2);
5988 } else {
5989 Label return_left, return_right, done;
5990
5991 CompareF32(OLT, src1, src2);
5992 BranchTrueShortF(&return_left);
5993 CompareF32(OLT, src2, src1);
5994 BranchTrueShortF(&return_right);
5995
5996 // Left equals right => check for -0.
5997 {
5998 BlockTrampolinePoolScope block_trampoline_pool(this);
5999 mfc1(t8, src1);
6000 dsll32(t8, t8, 0);
6001 Branch(&return_right, eq, t8, Operand(zero_reg));
6002 Branch(&return_left);
6003 }
6004
6005 bind(&return_right);
6006 if (src2 != dst) {
6007 Move_s(dst, src2);
6008 }
6009 Branch(&done);
6010
6011 bind(&return_left);
6012 if (src1 != dst) {
6013 Move_s(dst, src1);
6014 }
6015
6016 bind(&done);
6017 }
6018}
6019
6020void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
6021 FPURegister src2) {
6022 add_s(dst, src1, src2);
6023}
6024
6025void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1,
6026 FPURegister src2, Label* out_of_line) {
6027 ASM_CODE_COMMENT(this);
6028 if (src1 == src2) {
6029 Move_d(dst, src1);
6030 return;
6031 }
6032
6033 // Check if one of operands is NaN.
6034 CompareIsNanF64(src1, src2);
6035 BranchTrueF(out_of_line);
6036
6037 if (kArchVariant >= kMips64r6) {
6038 max_d(dst, src1, src2);
6039 } else {
6040 Label return_left, return_right, done;
6041
6042 CompareF64(OLT, src1, src2);
6043 BranchTrueShortF(&return_right);
6044 CompareF64(OLT, src2, src1);
6045 BranchTrueShortF(&return_left);
6046
6047 // Left equals right => check for -0.
6048 {
6049 BlockTrampolinePoolScope block_trampoline_pool(this);
6050 dmfc1(t8, src1);
6051 Branch(&return_left, eq, t8, Operand(zero_reg));
6052 Branch(&return_right);
6053 }
6054
6055 bind(&return_right);
6056 if (src2 != dst) {
6057 Move_d(dst, src2);
6058 }
6059 Branch(&done);
6060
6061 bind(&return_left);
6062 if (src1 != dst) {
6063 Move_d(dst, src1);
6064 }
6065
6066 bind(&done);
6067 }
6068}
6069
6070void MacroAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
6071 FPURegister src2) {
6072 add_d(dst, src1, src2);
6073}
6074
6075void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1,
6076 FPURegister src2, Label* out_of_line) {
6077 ASM_CODE_COMMENT(this);
6078 if (src1 == src2) {
6079 Move_d(dst, src1);
6080 return;
6081 }
6082
6083 // Check if one of operands is NaN.
6084 CompareIsNanF64(src1, src2);
6085 BranchTrueF(out_of_line);
6086
6087 if (kArchVariant >= kMips64r6) {
6088 min_d(dst, src1, src2);
6089 } else {
6090 Label return_left, return_right, done;
6091
6092 CompareF64(OLT, src1, src2);
6093 BranchTrueShortF(&return_left);
6094 CompareF64(OLT, src2, src1);
6095 BranchTrueShortF(&return_right);
6096
6097 // Left equals right => check for -0.
6098 {
6099 BlockTrampolinePoolScope block_trampoline_pool(this);
6100 dmfc1(t8, src1);
6101 Branch(&return_right, eq, t8, Operand(zero_reg));
6102 Branch(&return_left);
6103 }
6104
6105 bind(&return_right);
6106 if (src2 != dst) {
6107 Move_d(dst, src2);
6108 }
6109 Branch(&done);
6110
6111 bind(&return_left);
6112 if (src1 != dst) {
6113 Move_d(dst, src1);
6114 }
6115
6116 bind(&done);
6117 }
6118}
6119
6120void MacroAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
6121 FPURegister src2) {
6122 add_d(dst, src1, src2);
6123}
6124
6125int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
6126 int num_double_arguments) {
6127 int stack_passed_words = 0;
6128 int num_args = num_reg_arguments + num_double_arguments;
6129
6130 // Up to eight arguments are passed in FPURegisters and GPRegisters.
6131 if (num_args > kRegisterPassedArguments) {
6132 stack_passed_words = num_args - kRegisterPassedArguments;
6133 }
6134 stack_passed_words += kCArgSlotCount;
6135 return stack_passed_words;
6136}
6137
6138void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6139 int num_double_arguments,
6140 Register scratch) {
6141 ASM_CODE_COMMENT(this);
6142 int frame_alignment = ActivationFrameAlignment();
6143
6144 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
6145 // O32: Up to four simple arguments are passed in registers a0..a3.
6146 // Those four arguments must have reserved argument slots on the stack for
6147 // mips, even though those argument slots are not normally used.
6148 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
6149 // address than) the (O32) argument slots. (arg slot calculation handled by
6150 // CalculateStackPassedWords()).
6151 int stack_passed_arguments =
6152 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
6153 if (frame_alignment > kPointerSize) {
6154 // Make stack end at alignment and make room for num_arguments - 4 words
6155 // and the original value of sp.
6156 mov(scratch, sp);
6157 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
6158 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
6159 And(sp, sp, Operand(-frame_alignment));
6160 Sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
6161 } else {
6162 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6163 }
6164}
6165
6166void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6167 Register scratch) {
6168 PrepareCallCFunction(num_reg_arguments, 0, scratch);
6169}
6170
6171int MacroAssembler::CallCFunction(ExternalReference function,
6172 int num_reg_arguments,
6173 int num_double_arguments,
6174 SetIsolateDataSlots set_isolate_data_slots,
6175 Label* return_location) {
6176 ASM_CODE_COMMENT(this);
6177 BlockTrampolinePoolScope block_trampoline_pool(this);
6178 li(t9, function);
6179 return CallCFunctionHelper(t9, num_reg_arguments, num_double_arguments,
6180 set_isolate_data_slots, return_location);
6181}
6182
6183int MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
6184 int num_double_arguments,
6185 SetIsolateDataSlots set_isolate_data_slots,
6186 Label* return_location) {
6187 ASM_CODE_COMMENT(this);
6188 return CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
6189 set_isolate_data_slots, return_location);
6190}
6191
6192int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments,
6193 SetIsolateDataSlots set_isolate_data_slots,
6194 Label* return_location) {
6195 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
6196 return_location);
6197}
6198
6199int MacroAssembler::CallCFunction(Register function, int num_arguments,
6200 SetIsolateDataSlots set_isolate_data_slots,
6201 Label* return_location) {
6202 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
6203 return_location);
6204}
6205
6207 Register function, int num_reg_arguments, int num_double_arguments,
6208 SetIsolateDataSlots set_isolate_data_slots, Label* return_location) {
6209 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
6210 DCHECK(has_frame());
6211
6212 Label get_pc;
6213
6214 // Make sure that the stack is aligned before calling a C function unless
6215 // running in the simulator. The simulator has its own alignment check which
6216 // provides more information.
6217 // The argument stots are presumed to have been set up by
6218 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
6219
6220#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
6221 if (v8_flags.debug_code) {
6222 int frame_alignment = base::OS::ActivationFrameAlignment();
6223 int frame_alignment_mask = frame_alignment - 1;
6224 if (frame_alignment > kPointerSize) {
6225 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
6226 Label alignment_as_expected;
6227 {
6228 UseScratchRegisterScope temps(this);
6229 Register scratch = temps.Acquire();
6230 And(scratch, sp, Operand(frame_alignment_mask));
6231 Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
6232 }
6233 // Don't use Check here, as it will call Runtime_Abort possibly
6234 // re-entering here.
6235 stop();
6236 bind(&alignment_as_expected);
6237 }
6238 }
6239#endif // V8_HOST_ARCH_MIPS
6240
6241 // Just call directly. The function called cannot cause a GC, or
6242 // allow preemption, so the return address in the link register
6243 // stays correct.
6244 {
6245 BlockTrampolinePoolScope block_trampoline_pool(this);
6246 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
6247 if (function != t9) {
6248 mov(t9, function);
6249 function = t9;
6250 }
6251
6252 // Save the frame pointer and PC so that the stack layout remains
6253 // iterable, even without an ExitFrame which normally exists between JS
6254 // and C frames. 't' registers are caller-saved so this is safe as a
6255 // scratch register.
6256 Register pc_scratch = t1;
6257 DCHECK(!AreAliased(pc_scratch, function));
6259
6260 LoadAddressPCRelative(pc_scratch, &get_pc);
6261
6262 Sd(pc_scratch,
6263 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerPC));
6264 Sd(fp, ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
6265 }
6266
6267 Call(function);
6268 int call_pc_offset = pc_offset();
6269 bind(&get_pc);
6270
6271 if (return_location) bind(return_location);
6272
6273 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
6274 // We don't unset the PC; the FP is the source of truth.
6275 Sd(zero_reg,
6276 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
6277 }
6278
6279 int stack_passed_arguments =
6280 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
6281
6283 Ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
6284 } else {
6285 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6286 }
6287
6289
6290 return call_pc_offset;
6291 }
6292}
6293
6294#undef BRANCH_ARGS_CHECK
6295
6296void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask,
6297 Condition cc, Label* condition_met) {
6298 ASM_CODE_COMMENT(this);
6299 And(scratch, object, Operand(~MemoryChunk::GetAlignmentMaskForAssembler()));
6300 Ld(scratch, MemOperand(scratch, MemoryChunk::FlagsOffset()));
6301 And(scratch, scratch, Operand(mask));
6302 Branch(condition_met, cc, scratch, Operand(zero_reg));
6303}
6304
6305Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
6306 Register reg4, Register reg5,
6307 Register reg6) {
6308 RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
6309
6310 const RegisterConfiguration* config = RegisterConfiguration::Default();
6311 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
6312 int code = config->GetAllocatableGeneralCode(i);
6313 Register candidate = Register::from_code(code);
6314 if (regs.has(candidate)) continue;
6315 return candidate;
6316 }
6317 UNREACHABLE();
6318}
6319void MacroAssembler::ComputeCodeStartAddress(Register dst) {
6320 // This push on ra and the pop below together ensure that we restore the
6321 // register ra, which is needed while computing the code start address.
6322 push(ra);
6323
6324 // The nal instruction puts the address of the current instruction into
6325 // the return address (ra) register, which we can use later on.
6326 if (kArchVariant == kMips64r6) {
6327 addiupc(ra, 1);
6328 } else {
6329 nal();
6330 nop();
6331 }
6332 int pc = pc_offset();
6333 li(dst, Operand(pc));
6334 Dsubu(dst, ra, dst);
6335
6336 pop(ra); // Restore ra
6337}
6338// Check if the code object is marked for deoptimization. If it is, then it
6339// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
6340// to:
6341// 1. read from memory the word that contains that bit, which can be found in
6342// the flags in the referenced {Code} object;
6343// 2. test kMarkedForDeoptimizationBit in those flags; and
6344// 3. if it is not zero then it jumps to the builtin.
6345//
6346// Note: With leaptiering we simply assert the code is not deoptimized.
6348 // UseScratchRegisterScope temps(this);
6349 // Register scratch = temps.Acquire();
6350 if (v8_flags.debug_code || !V8_ENABLE_LEAPTIERING_BOOL) {
6351 int offset =
6352 InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
6353 // LoadProtectedPointerField(
6354 // scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
6356 Lwu(kScratchReg, FieldMemOperand(kScratchReg, Code::kFlagsOffset));
6357 }
6358#ifdef V8_ENABLE_LEAPTIERING
6359 if (v8_flags.debug_code) {
6360 Label not_deoptimized;
6362 Operand(1 << Code::kMarkedForDeoptimizationBit));
6363 Branch(&not_deoptimized, eq, kScratchReg, Operand(zero_reg));
6364 Abort(AbortReason::kInvalidDeoptimizedCode);
6365 bind(&not_deoptimized);
6366 }
6367#else
6369 Operand(1 << Code::kMarkedForDeoptimizationBit));
6370 TailCallBuiltin(Builtin::kCompileLazyDeoptimizedCode, ne, kScratchReg,
6371 Operand(zero_reg));
6372#endif
6373}
6374
6375void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
6376 DeoptimizeKind kind, Label* ret,
6377 Label*) {
6378 ASM_CODE_COMMENT(this);
6379 BlockTrampolinePoolScope block_trampoline_pool(this);
6380 Ld(t9,
6382 Call(t9);
6386}
6387
6389 Register destination, Register code_data_container_object,
6390 CodeEntrypointTag tag) {
6391 ASM_CODE_COMMENT(this);
6392 Ld(destination, FieldMemOperand(code_data_container_object,
6393 Code::kInstructionStartOffset));
6394}
6395
6396void MacroAssembler::CallCodeObject(Register code_data_container_object,
6397 CodeEntrypointTag tag) {
6398 ASM_CODE_COMMENT(this);
6399 LoadCodeInstructionStart(code_data_container_object,
6400 code_data_container_object, tag);
6401 Call(code_data_container_object);
6402}
6403
6404void MacroAssembler::JumpCodeObject(Register code_data_container_object,
6405 CodeEntrypointTag tag, JumpMode jump_mode) {
6406 ASM_CODE_COMMENT(this);
6407 DCHECK_EQ(JumpMode::kJump, jump_mode);
6408 LoadCodeInstructionStart(code_data_container_object,
6409 code_data_container_object, tag);
6410 Jump(code_data_container_object);
6411}
6412
6413void MacroAssembler::CallJSFunction(Register function_object,
6414 uint16_t argument_count) {
6416#ifdef V8_ENABLE_LEAPTIERING
6419 Register scratch = s2;
6420
6421 Lw(dispatch_handle,
6422 FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
6423 LoadEntrypointAndParameterCountFromJSDispatchTable(code, parameter_count,
6424 dispatch_handle, scratch);
6425
6426 // Force a safe crash if the parameter count doesn't match.
6427 SbxCheck(le, AbortReason::kJSSignatureMismatch, parameter_count,
6428 Operand(argument_count));
6429 Call(code);
6430#else
6431 Ld(code, FieldMemOperand(function_object, JSFunction::kCodeOffset));
6433#endif
6434}
6435
6436void MacroAssembler::JumpJSFunction(Register function_object,
6437 JumpMode jump_mode) {
6438#ifdef V8_ENABLE_LEAPTIERING
6439 // This implementation is not currently used because callers usually need
6440 // to load both entry point and parameter count and then do something with
6441 // the latter before the actual call.
6442 UNREACHABLE();
6443#else
6445 Ld(code, FieldMemOperand(function_object, JSFunction::kCodeOffset));
6446 JumpCodeObject(code, kJSEntrypointTag, jump_mode);
6447#endif
6448}
6449
6450#ifdef V8_ENABLE_WEBASSEMBLY
6451
6452void MacroAssembler::ResolveWasmCodePointer(Register target) {
6453 ASM_CODE_COMMENT(this);
6454 ExternalReference global_jump_table =
6455 ExternalReference::wasm_code_pointer_table();
6456 UseScratchRegisterScope temps(this);
6457 Register scratch = temps.Acquire();
6458 xor_(zero_reg, scratch, scratch);
6459 li(scratch, global_jump_table);
6460 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == kSystemPointerSize);
6461 sll(target, target, kSystemPointerSizeLog2);
6462 daddu(scratch, scratch, target);
6463 Ld(target, MemOperand(scratch, 0));
6464}
6465
6466void MacroAssembler::CallWasmCodePointer(Register target,
6467 CallJumpMode call_jump_mode) {
6468 ResolveWasmCodePointer(target);
6469 if (call_jump_mode == CallJumpMode::kTailCall) {
6470 Jump(target);
6471 } else {
6472 Call(target);
6473 }
6474}
6475
6476#endif
6477
6478namespace {
6479
6480#ifndef V8_ENABLE_LEAPTIERING
6481// Only used when leaptiering is disabled.
6482void TailCallOptimizedCodeSlot(MacroAssembler* masm,
6483 Register optimized_code_entry, Register scratch1,
6484 Register scratch2) {
6485 // ----------- S t a t e -------------
6486 // -- a0 : actual argument count
6487 // -- a3 : new target (preserved for callee if needed, and caller)
6488 // -- a1 : target function (preserved for callee if needed, and caller)
6489 // -----------------------------------
6490 ASM_CODE_COMMENT(masm);
6491 DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
6492
6493 Label heal_optimized_code_slot;
6494
6495 // If the optimized code is cleared, go to runtime to update the optimization
6496 // marker field.
6497 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
6498 &heal_optimized_code_slot);
6499
6500 // The entry references a CodeWrapper object. Unwrap it now.
6501 __ Ld(optimized_code_entry,
6502 FieldMemOperand(optimized_code_entry, CodeWrapper::kCodeOffset));
6503
6504 // Check if the optimized code is marked for deopt. If it is, call the
6505 // runtime to clear it.
6506 __ TestCodeIsMarkedForDeoptimizationAndJump(optimized_code_entry, scratch1,
6507 ne, &heal_optimized_code_slot);
6508
6509 // Optimized code is good, get it into the closure and link the closure into
6510 // the optimized functions list, then tail call the optimized code.
6511 // The feedback vector is no longer used, so reuse it as a scratch
6512 // register.
6513 __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, a1, scratch1,
6514 scratch2);
6515
6516 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
6517 __ LoadCodeInstructionStart(a2, optimized_code_entry, kJSEntrypointTag);
6518 __ Jump(a2);
6519
6520 // Optimized code slot contains deoptimized code or code is cleared and
6521 // optimized code marker isn't updated. Evict the code, update the marker
6522 // and re-enter the closure's code.
6523 __ bind(&heal_optimized_code_slot);
6524 __ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
6525}
6526#endif // V8_ENABLE_LEAPTIERING
6527
6528} // namespace
6529
6530#ifdef V8_ENABLE_DEBUG_CODE
6531void MacroAssembler::AssertFeedbackCell(Register object, Register scratch) {
6532 if (v8_flags.debug_code) {
6533 GetObjectType(object, scratch, scratch);
6534 Assert(eq, AbortReason::kExpectedFeedbackCell, scratch,
6535 Operand(FEEDBACK_CELL_TYPE));
6536 }
6537}
6538void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
6539 if (v8_flags.debug_code) {
6540 GetObjectType(object, scratch, scratch);
6541 Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
6542 Operand(FEEDBACK_VECTOR_TYPE));
6543 }
6544}
6545#endif // V8_ENABLE_DEBUG_CODE
6546
6548 Register optimized_code, Register closure, Register scratch1,
6549 Register scratch2) {
6550 ASM_CODE_COMMENT(this);
6551 DCHECK(!AreAliased(optimized_code, closure, scratch1, scratch2));
6552
6553#ifdef V8_ENABLE_LEAPTIERING
6554 UNREACHABLE();
6555#else
6556 // Store code entry in the closure.
6557 Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
6558 mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
6559 RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
6562#endif // V8_ENABLE_LEAPTIERING
6563}
6564
6566 Runtime::FunctionId function_id) {
6567 // ----------- S t a t e -------------
6568 // -- a0 : actual argument count
6569 // -- a1 : target function (preserved for callee)
6570 // -- a3 : new target (preserved for callee)
6571 // -----------------------------------
6572 ASM_CODE_COMMENT(this);
6573 {
6574 FrameScope scope(this, StackFrame::INTERNAL);
6575 // Push a copy of the target function, the new target and the actual
6576 // argument count.
6577 // Push function as parameter to the runtime call.
6581#ifdef V8_ENABLE_LEAPTIERING
6582 // No need to SmiTag since dispatch handles always look like Smis.
6583 static_assert(kJSDispatchHandleShift > 0);
6585#endif
6586 // Function is also the parameter to the runtime call.
6588 CallRuntime(function_id, 1);
6589 // Restore target function, new target and actual argument count.
6590#ifdef V8_ENABLE_LEAPTIERING
6592#endif
6596 }
6597
6598 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
6600 Jump(a2);
6601}
6602#ifndef V8_ENABLE_LEAPTIERING
6604 Register flags, Register feedback_vector, CodeKind current_code_kind,
6605 Label* flags_need_processing) {
6606 ASM_CODE_COMMENT(this);
6607 DCHECK(CodeKindCanTierUp(current_code_kind));
6608 Register scratch = t2;
6609 uint32_t flag_mask =
6611 Lhu(flags, FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
6612 And(scratch, flags, Operand(flag_mask));
6613 Branch(flags_need_processing, ne, scratch, Operand(zero_reg));
6614}
6615
6617 Register flags, Register feedback_vector) {
6618 ASM_CODE_COMMENT(this);
6619 Label maybe_has_optimized_code, maybe_needs_logging;
6620 // Check if optimized code marker is available.
6621 {
6622 UseScratchRegisterScope temps(this);
6623 Register scratch = temps.Acquire();
6624 And(scratch, flags,
6626 Branch(&maybe_needs_logging, eq, scratch, Operand(zero_reg));
6627 }
6628
6629 GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
6630
6631 bind(&maybe_needs_logging);
6632 {
6633 UseScratchRegisterScope temps(this);
6634 Register scratch = temps.Acquire();
6635 And(scratch, flags, Operand(FeedbackVector::LogNextExecutionBit::kMask));
6636 Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
6637 }
6638
6639 GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution);
6640
6641 bind(&maybe_has_optimized_code);
6642 Register optimized_code_entry = flags;
6643 Ld(optimized_code_entry,
6644 FieldMemOperand(feedback_vector,
6645 FeedbackVector::kMaybeOptimizedCodeOffset));
6646 TailCallOptimizedCodeSlot(this, optimized_code_entry, t3, a5);
6647}
6648
6649#endif // !V8_ENABLE_LEAPTIERING
6650// Calls an API function. Allocates HandleScope, extracts returned value
6651// from handle and propagates exceptions. Clobbers C argument registers
6652// and C caller-saved registers. Restores context. On return removes
6653// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
6654// (GCed, includes the call JS arguments space and the additional space
6655// allocated for the fast call).
6656void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
6657 Register function_address,
6658 ExternalReference thunk_ref, Register thunk_arg,
6659 int slots_to_drop_on_return,
6660 MemOperand* argc_operand,
6661 MemOperand return_value_operand) {
6662 using ER = ExternalReference;
6663
6664 Isolate* isolate = masm->isolate();
6666 ER::handle_scope_next_address(isolate), no_reg);
6668 ER::handle_scope_limit_address(isolate), no_reg);
6670 ER::handle_scope_level_address(isolate), no_reg);
6671
6672 Register return_value = v0;
6673 Register scratch = a4;
6674 Register scratch2 = a5;
6675
6676 // Allocate HandleScope in callee-saved registers.
6677 // We will need to restore the HandleScope after the call to the API function,
6678 // by allocating it in callee-saved registers it'll be preserved by C code.
6679 Register prev_next_address_reg = s0;
6680 Register prev_limit_reg = s1;
6681 Register prev_level_reg = s2;
6682
6683 // C arguments (kCArgRegs[0/1]) are expected to be initialized outside, so
6684 // this function must not corrupt them (return_value overlaps with
6685 // kCArgRegs[0] but that's ok because we start using it only after the C
6686 // call).
6687 DCHECK(!AreAliased(kCArgRegs[0], kCArgRegs[1], // C args
6688 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
6689 // function_address and thunk_arg might overlap but this function must not
6690 // corrupted them until the call is made (i.e. overlap with return_value is
6691 // fine).
6692 DCHECK(!AreAliased(function_address, // incoming parameters
6693 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
6694 DCHECK(!AreAliased(thunk_arg, // incoming parameters
6695 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
6696
6697 {
6699 "Allocate HandleScope in callee-save registers.");
6700 __ Ld(prev_next_address_reg, next_mem_op);
6701 __ Ld(prev_limit_reg, limit_mem_op);
6702 __ Lw(prev_level_reg, level_mem_op);
6703 __ Addu(scratch, prev_level_reg, Operand(1));
6704 __ Sw(scratch, level_mem_op);
6705 }
6706
6707 Label profiler_or_side_effects_check_enabled, done_api_call;
6708 if (with_profiling) {
6709 __ RecordComment("Check if profiler or side effects check is enabled");
6710 __ Lb(scratch,
6711 __ ExternalReferenceAsOperand(IsolateFieldId::kExecutionMode));
6712 __ Branch(&profiler_or_side_effects_check_enabled, ne, scratch,
6713 Operand(zero_reg));
6714#ifdef V8_RUNTIME_CALL_STATS
6715 __ RecordComment("Check if RCS is enabled");
6716 __ li(scratch, ER::address_of_runtime_stats_flag());
6717 __ Lw(scratch, MemOperand(scratch, 0));
6718 __ Branch(&profiler_or_side_effects_check_enabled, ne, scratch,
6719 Operand(zero_reg));
6720#endif // V8_RUNTIME_CALL_STATS
6721 }
6722
6723 __ RecordComment("Call the api function directly.");
6724 __ StoreReturnAddressAndCall(function_address);
6725 __ bind(&done_api_call);
6726
6727 Label propagate_exception;
6728 Label delete_allocated_handles;
6729 Label leave_exit_frame;
6730
6731 __ RecordComment("Load value from ReturnValue.");
6732 __ Ld(return_value, return_value_operand);
6733
6734 {
6736 masm,
6737 "No more valid handles (the result handle was the last one)."
6738 "Restore previous handle scope.");
6739 __ Sd(prev_next_address_reg, next_mem_op);
6740 if (v8_flags.debug_code) {
6741 __ Lw(scratch, level_mem_op);
6742 __ Subu(scratch, scratch, Operand(1));
6743 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, scratch,
6744 Operand(prev_level_reg));
6745 }
6746 __ Sw(prev_level_reg, level_mem_op);
6747 __ Ld(scratch, limit_mem_op);
6748 __ Branch(&delete_allocated_handles, ne, prev_limit_reg, Operand(scratch));
6749 }
6750
6751 __ RecordComment("Leave the API exit frame.");
6752 __ bind(&leave_exit_frame);
6753
6754 Register argc_reg = prev_limit_reg;
6755 if (argc_operand != nullptr) {
6756 // Load the number of stack slots to drop before LeaveExitFrame modifies sp.
6757 __ Ld(argc_reg, *argc_operand);
6758 }
6759
6760 __ LeaveExitFrame(scratch);
6761
6762 {
6764 "Check if the function scheduled an exception.");
6765 __ LoadRoot(scratch, RootIndex::kTheHoleValue);
6767 ER::exception_address(isolate), no_reg));
6768 __ Branch(&propagate_exception, ne, scratch, Operand(scratch2));
6769 }
6770
6771 __ AssertJSAny(return_value, scratch, scratch2,
6772 AbortReason::kAPICallReturnedInvalidObject);
6773
6774 if (argc_operand == nullptr) {
6775 DCHECK_NE(slots_to_drop_on_return, 0);
6776 __ Daddu(sp, sp, Operand(slots_to_drop_on_return * kSystemPointerSize));
6777 } else {
6778 // {argc_operand} was loaded into {argc_reg} above.
6779 if (slots_to_drop_on_return != 0) {
6780 __ Daddu(sp, sp, Operand(slots_to_drop_on_return * kSystemPointerSize));
6781 }
6782 __ Dlsa(sp, sp, argc_reg, kSystemPointerSizeLog2);
6783 }
6784
6785 __ Ret();
6786
6787 if (with_profiling) {
6788 ASM_CODE_COMMENT_STRING(masm, "Call the api function via thunk wrapper.");
6789 __ bind(&profiler_or_side_effects_check_enabled);
6790 // Additional parameter is the address of the actual callback.
6791 if (thunk_arg.is_valid()) {
6792 MemOperand thunk_arg_mem_op = __ ExternalReferenceAsOperand(
6793 IsolateFieldId::kApiCallbackThunkArgument);
6794 __ Sd(thunk_arg, thunk_arg_mem_op);
6795 }
6796 __ li(scratch, thunk_ref);
6798 __ Branch(&done_api_call);
6799 }
6800
6801 __ RecordComment("An exception was thrown. Propagate it.");
6802 __ bind(&propagate_exception);
6803 __ TailCallRuntime(Runtime::kPropagateException);
6804
6805 {
6807 masm, "HandleScope limit has changed. Delete allocated extensions.");
6808 __ bind(&delete_allocated_handles);
6809 __ Sd(prev_limit_reg, limit_mem_op);
6810 // Save the return value in a callee-save register.
6811 Register saved_result = prev_limit_reg;
6812 __ mov(saved_result, v0);
6813 __ mov(kCArgRegs[0], v0);
6814 __ PrepareCallCFunction(1, prev_level_reg);
6815 __ li(kCArgRegs[0], ER::isolate_address());
6816 __ CallCFunction(ER::delete_handle_scope_extensions(), 1);
6817 __ mov(v0, saved_result);
6818 __ jmp(&leave_exit_frame);
6819 }
6820}
6821
6822} // namespace internal
6823} // namespace v8
6824
6825#undef __
6826
6827#endif // V8_TARGET_ARCH_MIPS64
friend Zone
Definition asm-types.cc:195
#define Assert(condition)
int16_t parameter_count
Definition builtins.cc:67
Builtins::Kind kind
Definition builtins.cc:40
SourcePosition pos
static constexpr uint64_t kSignMask
Definition double.h:27
static int ActivationFrameAlignment()
void RequestHeapNumber(HeapNumberRequest request)
Definition assembler.cc:262
bool IsEnabled(CpuFeature f)
Definition assembler.h:352
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
const AssemblerOptions & options() const
Definition assembler.h:339
void bal(int16_t offset)
void beqc(Register rs, Register rt, int16_t offset)
void dsubu(Register rd, Register rs, Register rt)
void bltz(Register rs, int16_t offset)
void copy_u_h(Register rd, MSARegister ws, uint32_t n)
void ld(Register rd, const MemOperand &rs)
void dmuhu(Register rd, Register rs, Register rt)
void sd(Register rd, const MemOperand &rs)
void fill_w(MSARegister wd, Register rs)
void seb(Register rd, Register rt)
void dextm_(Register rt, Register rs, uint16_t pos, uint16_t size)
bool is_near(Label *L, OffsetSize bits)
void frint_d(FPURegister fd, FPURegister fj)
void dotp_s_h(MSARegister wd, MSARegister ws, MSARegister wt)
void lwl(Register rd, const MemOperand &rs)
void jic(Register rt, int16_t offset)
void ld_d(Register rd, Register rj, int32_t si12)
void lbu(Register rd, const MemOperand &rs)
void dati(Register rs, int32_t j)
void movf(Register rd, Register rs, uint16_t cc=0)
void break_(uint32_t code, bool break_as_stop=false)
void mfhc1(Register rt, FPURegister fs)
void dmod(Register rd, Register rs, Register rt)
void bgezal(Register rs, int16_t offset)
void cvt_s_w(FPURegister fd, FPURegister fs)
void round_l_d(FPURegister fd, FPURegister fs)
void fill_d(MSARegister wd, Register rs)
void bgeuc(Register rs, Register rt, int16_t offset)
void addu(Register rd, Register rs, Register rt)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void frint_w(MSARegister wd, MSARegister ws)
void movn(const Register &rd, uint64_t imm, int shift=-1)
void scd(Register rd, const MemOperand &rs)
void hint(SystemHint code)
void pref(int32_t hint, const MemOperand &rs)
void dmult(Register rs, Register rt)
void ctc1(Register rt, FPUControlRegister fs)
void mul_d(Register rd, Register rj, Register rk)
void beqzalc(Register rt, int16_t offset)
void copy_u_b(Register rd, MSARegister ws, uint32_t n)
void jr(Register target)
void lwr(Register rd, const MemOperand &rs)
void bgez(Register rs, int16_t offset)
void swc1(FPURegister fs, const MemOperand &dst)
void lb(Register rd, const MemOperand &rs)
void jalr(Register rs, Register rd=ra)
void sltiu(Register rd, Register rs, int32_t j)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void bc1f(int16_t offset, uint16_t cc=0)
void jialc(Register rt, int16_t offset)
void bnezalc(Register rt, int16_t offset)
void dbitswap(Register rd, Register rt)
void bc1nez(int16_t offset, FPURegister ft)
void bc1eqz(int16_t offset, FPURegister ft)
void bne(Register rj, Register rd, int32_t offset)
void sdc1(FPURegister fs, const MemOperand &dst)
void bltuc(Register rs, Register rt, int16_t offset)
void mov_d(FPURegister fd, FPURegister fs)
void mflo(Register rd)
void min_d(FPURegister fd, FPURegister fs, FPURegister ft)
void modu(Register rd, Register rs, Register rt)
void rint_d(FPURegister fd, FPURegister fs)
void ddivu(Register rs, Register rt)
void lld(Register rd, const MemOperand &rs)
void div(Register src)
void hadd_s_h(MSARegister wd, MSARegister ws, MSARegister wt)
void sdr(Register rd, const MemOperand &rs)
void dlsa(Register rd, Register rt, Register rs, uint8_t sa)
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc=0)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void mul_s(FPURegister fd, FPURegister fs, FPURegister ft)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
bool is_trampoline_pool_blocked() const
void slti(Register rd, Register rj, int32_t si12)
void bc1t(int16_t offset, uint16_t cc=0)
void ilvr_h(MSARegister wd, MSARegister ws, MSARegister wt)
int InstructionsGeneratedSince(Label *label)
void balc(int32_t offset)
void insert_b(MSARegister wd, uint32_t n, Register rs)
void neg_d(FPURegister fd, FPURegister fs)
void blezc(Register rt, int16_t offset)
void rint_s(FPURegister fd, FPURegister fs)
void bnec(Register rs, Register rt, int16_t offset)
void ilvl_b(MSARegister wd, MSARegister ws, MSARegister wt)
void bltc(Register rs, Register rt, int16_t offset)
void bnvc(Register rs, Register rt, int16_t offset)
void trunc_l_d(FPURegister fd, FPURegister fs)
void hadd_u_w(MSARegister wd, MSARegister ws, MSARegister wt)
void ll(Register rd, const MemOperand &rs)
void fill_b(MSARegister wd, Register rs)
Simd128Register Simd128Register ra
void dsrl(Register rd, Register rt, uint16_t sa)
void lsa(Register rd, Register rt, Register rs, uint8_t sa)
void cmp(Register src1, const Operand &src2, Condition cond=al)
void dmuh(Register rd, Register rs, Register rt)
void bnz_d(MSARegister wt, int16_t offset)
uint64_t jump_address(Label *L)
void dotp_u_h(MSARegister wd, MSARegister ws, MSARegister wt)
void rotr(Register rd, Register rt, uint16_t sa)
void dmul(Register rd, Register rs, Register rt)
void bz_h(MSARegister wt, int16_t offset)
void mtc1(Register rt, FPURegister fs)
void lw(Register rd, const MemOperand &rs)
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data=0)
void dmodu(Register rd, Register rs, Register rt)
void floor_l_d(FPURegister fd, FPURegister fs)
void bz_w(MSARegister wt, int16_t offset)
void dsrl32(Register rt, Register rd, uint16_t sa)
void bltzc(Register rt, int16_t offset)
void dsra32(Register rt, Register rd, uint16_t sa)
void xori(Register rd, Register rj, int32_t ui12)
friend class UseScratchRegisterScope
void ilvr_w(MSARegister wd, MSARegister ws, MSARegister wt)
bool MustUseReg(RelocInfo::Mode rmode)
void insert_d(MSARegister wd, uint32_t n, Register rs)
void ilvl_w(MSARegister wd, MSARegister ws, MSARegister wt)
void bc(int32_t offset)
void dotp_s_d(MSARegister wd, MSARegister ws, MSARegister wt)
void addiu(Register rd, Register rs, int32_t j)
void rotrv(Register rd, Register rt, Register rs)
void mfhi(Register rd)
void trunc_w_d(FPURegister fd, FPURegister fs)
void dshd(Register rd, Register rt)
void nor(Register rd, Register rj, Register rk)
void addiupc(Register rs, int32_t imm19)
void max_d(FPURegister fd, FPURegister fs, FPURegister ft)
void selnez(Register rs, Register rt, Register rd)
void shift(Operand dst, Immediate shift_amount, int subcode, int size)
void dmultu(Register rs, Register rt)
void clz(Register dst, Register src, Condition cond=al)
void sc(Register rd, const MemOperand &rs)
void or_(Register dst, int32_t imm32)
void bz_d(MSARegister wt, int16_t offset)
void sub_s(FPURegister fd, FPURegister fs, FPURegister ft)
void add_s(FPURegister fd, FPURegister fs, FPURegister ft)
void mov_s(FPURegister fd, FPURegister fs)
void swl(Register rd, const MemOperand &rs)
void trunc_l_s(FPURegister fd, FPURegister fs)
void sb(Register rd, const MemOperand &rs)
void xor_(Register dst, int32_t imm32)
void daddiu(Register rd, Register rs, int32_t j)
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size)
void dotp_u_w(MSARegister wd, MSARegister ws, MSARegister wt)
void andi(Register rd, Register rj, int32_t ui12)
void lui(Register rd, int32_t j)
void lh(Register rd, const MemOperand &rs)
void dext_(Register rt, Register rs, uint16_t pos, uint16_t size)
void dinsu_(Register rt, Register rs, uint16_t pos, uint16_t size)
void max_s(FPURegister fd, FPURegister fs, FPURegister ft)
void muh(Register rd, Register rs, Register rt)
void sltu(Register rd, Register rj, Register rk)
void sdl(Register rd, const MemOperand &rs)
void ldc1(FPURegister fd, const MemOperand &src)
void bltzal(Register rs, int16_t offset)
void dotp_u_d(MSARegister wd, MSARegister ws, MSARegister wt)
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size)
void mult(Register rs, Register rt)
void ctcmsa(MSAControlRegister cd, Register rs)
void dotp_s_w(MSARegister wd, MSARegister ws, MSARegister wt)
void cfcmsa(Register rd, MSAControlRegister cs)
void add_d(Register rd, Register rj, Register rk)
void lwu(Register rd, const MemOperand &rs)
void dinsm_(Register rt, Register rs, uint16_t pos, uint16_t size)
void beqzc(Register rs, int32_t offset)
void bz_v(MSARegister wt, int16_t offset)
void bovc(Register rs, Register rt, int16_t offset)
void srl(Register rd, Register rt, uint16_t sa)
void cfc1(Register rt, FPUControlRegister fs)
void mod(Register rd, Register rs, Register rt)
void movz(const Register &rd, uint64_t imm, int shift=-1)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
int32_t branch_offset_helper(Label *L, OffsetSize bits)
void copy_u_w(Register rd, MSARegister ws, uint32_t n)
void dsrav(Register rd, Register rt, Register rs)
void cvt_d_l(FPURegister fd, FPURegister fs)
void insert_h(MSARegister wd, uint32_t n, Register rs)
void mfc1(Register rt, FPURegister fs)
void dclz(Register rd, Register rs)
void ddiv(Register rs, Register rt)
void bnz_b(MSARegister wt, int16_t offset)
void blez(Register rs, int16_t offset)
void round_w_d(FPURegister fd, FPURegister fs)
void ori(Register rd, Register rj, int32_t ui12)
void ilvl_h(MSARegister wd, MSARegister ws, MSARegister wt)
void bgezalc(Register rt, int16_t offset)
void cvt_s_l(FPURegister fd, FPURegister fs)
void neg_s(FPURegister fd, FPURegister fs)
void dextu_(Register rt, Register rs, uint16_t pos, uint16_t size)
void bnz_v(MSARegister wt, int16_t offset)
void ilvr_b(MSARegister wd, MSARegister ws, MSARegister wt)
void movt(Register reg, uint32_t immediate, Condition cond=al)
void dmtc1(Register rt, FPURegister fs)
void min_s(FPURegister fd, FPURegister fs, FPURegister ft)
void copy_s_d(Register rd, MSARegister ws, uint32_t n)
void bnezc(Register rt, int32_t offset)
void lhu(Register rd, const MemOperand &rs)
void bgtz(Register rs, int16_t offset)
void bgec(Register rs, Register rt, int16_t offset)
void sw(Register rd, const MemOperand &rs)
void hadd_s_w(MSARegister wd, MSARegister ws, MSARegister wt)
void drotrv(Register rd, Register rt, Register rs)
void insert_w(MSARegister wd, uint32_t n, Register rs)
void blezalc(Register rt, int16_t offset)
void ldl(Register rd, const MemOperand &rs)
void muhu(Register rd, Register rs, Register rt)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
uint64_t branch_long_offset(Label *L)
void mthc1(Register rt, FPURegister fs)
void AdjustBaseAndOffset(MemOperand *src)
void slt(Register rd, Register rj, Register rk)
void ceil_l_d(FPURegister fd, FPURegister fs)
void dsbh(Register rd, Register rt)
void hadd_u_h(MSARegister wd, MSARegister ws, MSARegister wt)
void seleqz(Register rd, Register rs, Register rt)
void dahi(Register rs, int32_t j)
void daddu(Register rd, Register rs, Register rt)
void lwc1(FPURegister fd, const MemOperand &src)
void bz_b(MSARegister wt, int16_t offset)
void sh(Register rd, const MemOperand &rs)
void multu(Register rs, Register rt)
void bitswap(Register rd, Register rt)
void bnz_w(MSARegister wt, int16_t offset)
void st_d(Register rd, Register rj, int32_t si12)
void bltzalc(Register rt, int16_t offset)
void fill_h(MSARegister wd, Register rs)
void wsbh(Register rd, Register rt)
void divu(Register rs, Register rt)
void bnz_h(MSARegister wt, int16_t offset)
void dins_(Register rt, Register rs, uint16_t pos, uint16_t size)
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
void swr(Register rd, const MemOperand &rs)
void drotr32(Register rd, Register rt, uint16_t sa)
void CheckTrampolinePoolQuick(int extra_instructions=0)
int SizeOfCodeGeneratedSince(Label *label)
void bgtzc(Register rt, int16_t offset)
void ceil_w_d(FPURegister fd, FPURegister fs)
Instruction * pc() const
void dsll(Register rd, Register rt, uint16_t sa)
void dsll32(Register rt, Register rd, uint16_t sa)
void dmfc1(Register rt, FPURegister fs)
void beq(Register rj, Register rd, int32_t offset)
void floor_w_d(FPURegister fd, FPURegister fs)
void sub_d(Register rd, Register rj, Register rk)
void bgtzalc(Register rt, int16_t offset)
void drotr(Register rd, Register rt, uint16_t sa)
void subu(Register rd, Register rs, Register rt)
void bgezc(Register rt, int16_t offset)
void seh(Register rd, Register rt)
static constexpr Builtin RecordWrite(SaveFPRegsMode fp_mode)
V8_EXPORT_PRIVATE Handle< Code > code_handle(Builtin builtin)
Definition builtins.cc:154
static constexpr Builtin RuntimeCEntry(int result_size, bool switch_to_central_stack=false)
static constexpr Builtin EphemeronKeyBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin CEntry(int result_size, ArgvMode argv_mode, bool builtin_exit_frame=false, bool switch_to_central_stack=false)
static const int kMarkedForDeoptimizationBit
Definition code.h:456
static const int kInvalidContext
Definition contexts.h:578
static V8_INLINE constexpr int SlotOffset(int index)
Definition contexts.h:516
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static constexpr int kCallerSPDisplacement
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr uint32_t kFlagsTieringStateIsAnyRequested
static constexpr uint32_t FlagMaskForNeedsProcessingCheckFrom(CodeKind code_kind)
static const int kMantissaBits
Definition heap-number.h:39
static const int kExponentBits
Definition heap-number.h:40
static const int kExponentBias
Definition heap-number.h:41
static const int kExponentShift
Definition heap-number.h:42
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr int BuiltinEntrySlotOffset(Builtin id)
static constexpr int real_jslimit_offset()
Builtins * builtins()
Definition isolate.h:1443
Address BuiltinEntry(Builtin builtin)
static bool IsAddressableThroughRootRegister(Isolate *isolate, const ExternalReference &reference)
V8_INLINE std::string CommentForOffHeapTrampoline(const char *prefix, Builtin builtin)
static int32_t RootRegisterOffsetForExternalReferenceTableEntry(Isolate *isolate, const ExternalReference &reference)
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
void IndirectLoadConstant(Register destination, Handle< HeapObject > object)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
void IndirectLoadExternalReference(Register destination, ExternalReference reference)
void Mul(const Register &rd, const Register &rn, const Register &rm)
void LoadStackLimit(Register destination, StackLimitKind kind)
void GetObjectType(Register function, Register map, Register type_reg)
void Call(Register target, Condition cond=al)
void CallJSFunction(Register function_object, uint16_t argument_count)
void LiLower32BitHelper(Register rd, Operand j)
void Lbu(Register rd, const MemOperand &rs)
void LoadAddress(Register destination, ExternalReference source)
void BranchAndLinkLong(Label *L, BranchDelaySlot bdslot)
void LoadSplat(MSASize sz, MSARegister dst, MemOperand src)
void ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend=false)
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch)
void MultiPopFPU(DoubleRegList regs)
void Drop(int count, Condition cond=al)
void Ctz(Register rd, Register rs)
void Cvt_s_uw(FPURegister fd, FPURegister fs)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void MultiPushFPU(DoubleRegList regs)
void Neg(const Register &rd, const Operand &operand)
void MovFromFloatResult(DwVfpRegister dst)
void Trunc_l_d(FPURegister fd, FPURegister fs)
void Scd(Register rd, const MemOperand &rs)
void mov(Register rd, Register rj)
void Dins(Register rt, Register rs, uint16_t pos, uint16_t size)
void Sh(Register rd, const MemOperand &rs)
void SmiUntag(Register reg, SBit s=LeaveCC)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void Neg_s(FPURegister fd, FPURegister fj)
void Ceil_l_d(FPURegister fd, FPURegister fs)
void Uswc1(FPURegister fd, const MemOperand &rs, Register scratch)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void Ext(const VRegister &vd, const VRegister &vn, const VRegister &vm, int index)
void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2, CFRegister cd=FCC0)
void Lb(Register rd, const MemOperand &rs)
void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void TestCodeIsMarkedForDeoptimizationAndJump(Register code_data_container, Register scratch, Condition cond, Label *target)
void PushStandardFrame(Register function_reg)
void Floor_l_d(FPURegister fd, FPURegister fs)
void BranchFalseF(Label *target, CFRegister cc=FCC0)
void LoadZeroIfNotFPUCondition(Register dest, CFRegister=FCC0)
void Uld(Register rd, const MemOperand &rs)
void Swc1(FPURegister fs, const MemOperand &dst)
void MovFromFloatParameter(DwVfpRegister dst)
void LoadLane(NeonSize sz, NeonListOperand dst_list, uint8_t lane, NeonMemOperand src)
void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot=PROTECT)
void Move(Register dst, Tagged< Smi > smi)
void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd, bool f32=true)
void SmiTst(Register value)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void li_optimized(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void StoreReturnAddressAndCall(Register target)
void LoadZeroIfConditionZero(Register dest, Register condition)
void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
int32_t GetOffset(Label *L, OffsetSize bits)
void Movz(Register rd, Register rj, Register rk)
void LoadRootRelative(Register destination, int32_t offset) final
void JumpIfSmi(Register value, Label *smi_label)
void Round_d_d(FPURegister fd, FPURegister fs)
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void Cvt_s_ul(FPURegister fd, FPURegister fs)
void CallCodeObject(Register code_object)
void Ceil_d_d(FPURegister fd, FPURegister fs)
void Lwu(Register rd, const MemOperand &rs)
int CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void Ror(const Register &rd, const Register &rs, unsigned shift)
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
void BranchFalseShortF(Label *target, CFRegister cc=FCC0)
void CheckDebugHook(Register fun, Register new_target, Register expected_parameter_count_or_dispatch_handle, Register actual_parameter_count)
void Clz(const Register &rd, const Register &rn)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void Trunc_w_d(FPURegister fd, FPURegister fs)
void Ulwu(Register rd, const MemOperand &rs)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
bool BranchAndLinkShortCheck(int32_t offset, Label *L, Condition cond, Register rs, const Operand &rt, BranchDelaySlot bdslot)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Cvt_d_ul(FPURegister fd, FPURegister fs)
void MultiPushMSA(DoubleRegList regs)
void Bnvc(Register rt, Register rs, Label *L)
void LoadAddressPCRelative(Register dst, Label *target)
void Movn(Register rd, Register rj, Register rk)
void BranchTrueF(Label *target, CFRegister cc=FCC0)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void Sd(Register rd, const MemOperand &rs)
void BranchShortHelper(int16_t offset, Label *L, BranchDelaySlot bdslot)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void Lwc1(FPURegister fd, const MemOperand &src)
void Move_d(FPURegister dst, FPURegister src)
void Trunc_s_s(FPURegister fd, FPURegister fs)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode)
void DaddOverflow(Register dst, Register left, const Operand &right, Register overflow)
void Uldc1(FPURegister fd, const MemOperand &rs, Register scratch)
void SmiTag(Register reg, SBit s=LeaveCC)
void SbxCheck(Condition cc, AbortReason reason)
void PushArray(Register array, Register size, Register scratch, PushArrayOrder order=PushArrayOrder::kNormal)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void MovToFloatResult(DwVfpRegister src)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void CompareWord(Condition cond, Register dst, Register lhs, const Operand &rhs)
void Ins(const VRegister &vd, int vd_index, const VRegister &vn, int vn_index)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void MovToFloatParameter(DwVfpRegister src)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void PushCommonFrame(Register marker_reg=no_reg)
void Lsa(Register rd, Register rs, Register rt, uint8_t sa, Register scratch=at)
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode)
int LeaveFrame(StackFrame::Type type)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void Movt(Register rd, Register rs, uint16_t cc=0)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void Sw(Register rd, const MemOperand &rs)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
static int InstrCountForLi64Bit(int64_t value)
void MultiPopMSA(DoubleRegList regs)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void BranchAndLinkShortHelper(int16_t offset, Label *L, BranchDelaySlot bdslot)
void MulOverflow(Register dst, Register left, const Operand &right, Register overflow)
void Lhu(Register rd, const MemOperand &rs)
void InsertBits(Register dest, Register source, Register pos, int size)
void BranchLong(int32_t offset, BranchDelaySlot bdslot=PROTECT)
void Msub_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void FmoveLow(Register dst_low, FPURegister src)
void Jump(Register target, Condition cond=al)
void Usw(Register rd, const MemOperand &rs)
void ExtMulHigh(MSADataType type, MSARegister dst, MSARegister src1, MSARegister src2)
void LoadRoot(Register destination, RootIndex index) final
void Trunc_d_d(FPURegister fd, FPURegister fs)
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void BranchShortMSA(MSABranchDF df, Label *target, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd=PROTECT)
void Madd_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Floor_d_d(FPURegister fd, FPURegister fs)
void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
void Ceil_w_d(FPURegister fd, FPURegister fs)
void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void Round_s_s(FPURegister fd, FPURegister fs)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void MSARoundD(MSARegister dst, MSARegister src, FPURoundingMode mode)
void LoadZeroIfConditionNotZero(Register dest, Register condition)
void Dlsa(Register rd, Register rs, Register rt, uint8_t sa, Register scratch=at)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void StoreLane(NeonSize sz, NeonListOperand src_list, uint8_t lane, NeonMemOperand dst)
void LoadFromConstantsTable(Register destination, int constant_index) final
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void Sdc1(FPURegister fs, const MemOperand &dst)
void PrepareCEntryFunction(const ExternalReference &ref)
void MaybeSaveRegisters(RegList registers)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void ExtAddPairwise(MSADataType type, MSARegister dst, MSARegister src)
void LoadWordPair(Register rd, const MemOperand &rs, Register scratch=at)
void Lld(Register rd, const MemOperand &rs)
void JumpIfNotSmi(Register value, Label *not_smi_label)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void Ulhu(Register rd, const MemOperand &rs)
void Lw(Register rd, const MemOperand &rs)
void Neg_d(FPURegister fd, FPURegister fk)
void BranchShortHelperR6(int32_t offset, Label *L)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
bool BranchShortCheck(int32_t offset, Label *L, Condition cond, Register rs, const Operand &rt, BranchDelaySlot bdslot)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void CallBuiltin(Builtin builtin, Condition cond=al)
void ByteSwapUnsigned(Register dest, Register src, int operand_size)
void Lh(Register rd, const MemOperand &rs)
void Usdc1(FPURegister fd, const MemOperand &rs, Register scratch)
void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void Xor(Register dst, Register src)
void Cvt_d_uw(FPURegister fd, FPURegister fs)
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void Check(Condition cond, AbortReason reason)
void Usd(Register rd, const MemOperand &rs)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void Or(Register dst, Register src)
bool CalculateOffset(Label *L, int32_t *offset, OffsetSize bits)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void Sc(Register rd, const MemOperand &rs)
void Bovc(Register rt, Register rs, Label *L)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void MSARoundW(MSARegister dst, MSARegister src, FPURoundingMode mode)
Register GetRtAsRegisterHelper(const Operand &rt, Register scratch)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void Popcnt(Register dst, Register src)
void Dpopcnt(Register rd, Register rs)
void Ll(Register rd, const MemOperand &rs)
void Ulwc1(FPURegister fd, const MemOperand &rs, Register scratch)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void Round_l_d(FPURegister fd, FPURegister fs)
void DMulOverflow(Register dst, Register left, const Operand &right, Register overflow)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Madd_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk)
void StoreWordPair(Register rd, const MemOperand &rs, Register scratch=at)
void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void Sb(Register rd, const MemOperand &rs)
void Branch(Label *label, bool need_link=false)
void Ld(Register rd, const MemOperand &rs)
void Ceil_s_s(FPURegister fd, FPURegister fs)
void GetInstanceTypeRange(Register map, Register type_reg, InstanceType lower_limit, Register range)
void Ulw(Register rd, const MemOperand &rs)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void Ulh(Register rd, const MemOperand &rs)
void Movf(Register rd, Register rs, uint16_t cc=0)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadIsolateField(Register dst, IsolateFieldId id)
void LoadZeroIfFPUCondition(Register dest, CFRegister=FCC0)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2, CFRegister cd=FCC0)
void Move_s(FPURegister dst, FPURegister src)
void BranchTrueShortF(Label *target, CFRegister cc=FCC0)
void ByteSwapSigned(Register dest, Register src, int operand_size)
void MaybeRestoreRegisters(RegList registers)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Floor_s_s(FPURegister fd, FPURegister fs)
void Dclz(Register rd, Register rs)
void BranchAndLinkShortHelperR6(int32_t offset, Label *L)
void Dctz(Register rd, Register rs)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void Pref(int32_t hint, const MemOperand &rs)
void Round_w_d(FPURegister fd, FPURegister fs)
void BranchMSA(Label *target, MSABranchDF df, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd=PROTECT)
void DsubOverflow(Register dst, Register left, const Operand &right, Register overflow)
void StubPrologue(StackFrame::Type type)
void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2)
void StoreRootRelative(int32_t offset, Register value) final
void LoadMap(Register destination, Register object)
void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void AssertStackIsAligned() NOOP_UNLESS_DEBUG_CODE
void TailCallRuntime(Runtime::FunctionId fid)
void ExtMulLow(MSADataType type, MSARegister dst, MSARegister src1, MSARegister src2)
void Swap(Register srcdst0, Register srcdst1)
void Dext(Register rt, Register rs, uint16_t pos, uint16_t size)
void LoadNativeContextSlot(Register dst, int index)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void Floor_w_d(FPURegister fd, FPURegister fs)
void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd, bool f32=true)
void Ldc1(FPURegister fd, const MemOperand &src)
void Msub_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk)
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void Ush(Register rd, const MemOperand &rs, Register scratch)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr intptr_t FlagsOffset()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static constexpr intptr_t GetAlignmentMaskForAssembler()
static constexpr FPURegister from_code(int8_t code)
constexpr int8_t code() const
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr bool IsWasmCanonicalSigId(Mode mode)
Definition reloc-info.h:217
static constexpr bool IsCodeTarget(Mode mode)
Definition reloc-info.h:196
static constexpr bool IsWasmCodePointerTableEntry(Mode mode)
Definition reloc-info.h:220
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
Definition runtime.cc:350
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > zero()
Definition smi.h:99
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static bool IsJavaScript(Type t)
Definition frames.h:284
static constexpr int kFixedFrameSizeFromFp
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode)
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define V8_ENABLE_LEAPTIERING_BOOL
Definition globals.h:151
const bool IsMipsSoftFloatABI
@ kLittle
@ kMips64r6
@ kMips64r2
static const ArchVariants kArchVariant
uint32_t count
DirectHandle< Object > new_target
Definition execution.cc:75
int32_t offset
TNode< Object > receiver
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int x
uint32_t const mask
#define SmiWordOffset(offset)
#define BRANCH_ARGS_CHECK(cond, rs, rt)
SmiCheck
ArgumentAdaptionMode
InvokeType
SetIsolateDataSlots
JumpMode
RegListBase< RegisterT > registers
InstructionOperand destination
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
signed short int16_t
Definition unicode.cc:38
constexpr unsigned CountTrailingZeros64(uint64_t value)
Definition bits.h:164
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr Register kRootRegister
constexpr VFPRoundingMode kRoundToNearest
constexpr int W
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
MSABranchCondition NegateMSABranchCondition(MSABranchCondition cond)
constexpr int kSimd128Size
Definition globals.h:706
constexpr int kPointerSizeLog2
Definition globals.h:600
constexpr DoubleRegister kDoubleCompareReg
DwVfpRegister DoubleRegister
const uint32_t kFCSRInvalidOpCauseMask
const int kHiMaskOf32
constexpr DoubleRegister kScratchDoubleReg
constexpr uint32_t kBinary32SignMask
Definition globals.h:690
const int kSmiTagSize
Definition v8-internal.h:87
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr bool CodeKindCanTierUp(CodeKind kind)
Definition code-kind.h:95
constexpr Register kJavaScriptCallTargetRegister
constexpr int kPointerSize
Definition globals.h:599
static int InstrCountForLiLower32Bit(int64_t value)
const Address kWeakHeapObjectMask
Definition globals.h:967
const int64_t kUpper16MaskOf64
constexpr FPUControlRegister FCSR
constexpr int B0
const int kFloat32ExponentBias
constexpr Register kJavaScriptCallArgCountRegister
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
constexpr Register kScratchReg
constexpr int kImm16Mask
static const int kRegisterPassedArguments
Flag flags[]
Definition flags.cc:3797
constexpr int L
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
const char * GetAbortReason(AbortReason reason)
static constexpr int kMaxCParameters
constexpr uint32_t kZapValue
Definition globals.h:1005
constexpr MSAControlRegister MSACSR
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
constexpr bool is_intn(int64_t x, unsigned n)
Definition utils.h:568
const uint32_t kFCSROverflowCauseMask
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
const DoubleRegList kCallerSavedFPU
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kCArgsSlotsSize
const int kHeapObjectTag
Definition v8-internal.h:72
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
const int kFloat32ExponentBits
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
const RegList kJSCallerSaved
Definition reglist-arm.h:23
Register ToRegister(int num)
constexpr bool SmiValuesAre32Bits()
const uint32_t kFCSRUnderflowCauseMask
const int kFloat32MantissaBits
constexpr Register kJavaScriptCallCodeStartRegister
constexpr int kJSDispatchTableEntrySizeLog2
Definition globals.h:562
const intptr_t kSmiTagMask
Definition v8-internal.h:88
const int64_t kHigher16MaskOf64
return value
Definition map-inl.h:893
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
const int kLuiShift
constexpr uint8_t kInstrSize
const int kSmiTag
Definition v8-internal.h:86
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr Register cp
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallDispatchHandleRegister
const uint32_t kClearedWeakHeapObjectLower32
Definition globals.h:981
constexpr Register kJavaScriptCallNewTargetRegister
constexpr int kNumRegisters
static bool IsZero(const Operand &rt)
const int kCArgSlotCount
Local< T > Handle
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define OFFSET_OF_DATA_START(Type)