v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-loong64.cc
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <limits.h> // For LONG_MIN, LONG_MAX.
6
7#if V8_TARGET_ARCH_LOONG64
8
9#include <optional>
10
11#include "src/base/bits.h"
21#include "src/debug/debug.h"
28#include "src/runtime/runtime.h"
30
31// Satisfy cpplint check, but don't include platform-specific header. It is
32// included recursively via macro-assembler.h.
33#if 0
35#endif
36
37#define __ ACCESS_MASM(masm)
38
39namespace v8 {
40namespace internal {
41
42static inline bool IsZero(const Operand& rk) {
43 if (rk.is_reg()) {
44 return rk.rm() == zero_reg;
45 } else {
46 return rk.immediate() == 0;
47 }
48}
49
51 Register exclusion1,
52 Register exclusion2,
53 Register exclusion3) const {
54 int bytes = 0;
55
56 RegList exclusions = {exclusion1, exclusion2, exclusion3};
57 RegList list = kJSCallerSaved - exclusions;
58 bytes += list.Count() * kSystemPointerSize;
59
60 if (fp_mode == SaveFPRegsMode::kSave) {
61 bytes += kCallerSavedFPU.Count() * kDoubleSize;
62 }
63
64 return bytes;
65}
66
67int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
68 Register exclusion2, Register exclusion3) {
69 ASM_CODE_COMMENT(this);
70 int bytes = 0;
71
72 RegList exclusions = {exclusion1, exclusion2, exclusion3};
73 RegList list = kJSCallerSaved - exclusions;
74 MultiPush(list);
75 bytes += list.Count() * kSystemPointerSize;
76
77 if (fp_mode == SaveFPRegsMode::kSave) {
79 bytes += kCallerSavedFPU.Count() * kDoubleSize;
80 }
81
82 return bytes;
83}
84
85int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
86 Register exclusion2, Register exclusion3) {
87 ASM_CODE_COMMENT(this);
88 int bytes = 0;
89 if (fp_mode == SaveFPRegsMode::kSave) {
91 bytes += kCallerSavedFPU.Count() * kDoubleSize;
92 }
93
94 RegList exclusions = {exclusion1, exclusion2, exclusion3};
95 RegList list = kJSCallerSaved - exclusions;
96 MultiPop(list);
97 bytes += list.Count() * kSystemPointerSize;
98
99 return bytes;
100}
101
102void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
104 is_int12(ReadOnlyRootPtr(index))) {
106 return;
107 }
108 // Many roots have addresses that are too large to fit into addition immediate
109 // operands. Evidence suggests that the extra instruction for decompression
110 // costs us more than the load.
112}
115 is_int12(ReadOnlyRootPtr(index))) {
116 li(destination, (int32_t)ReadOnlyRootPtr(index));
117 return;
118 }
120}
121
122void MacroAssembler::PushCommonFrame(Register marker_reg) {
123 if (marker_reg.is_valid()) {
124 Push(ra, fp, marker_reg);
125 Add_d(fp, sp, Operand(kSystemPointerSize));
126 } else {
127 Push(ra, fp);
128 mov(fp, sp);
129 }
130}
131
132void MacroAssembler::PushStandardFrame(Register function_reg) {
134 if (function_reg.is_valid()) {
135 Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
137 } else {
140 }
141 Add_d(fp, sp, Operand(offset));
142}
143
144// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
145// The register 'object' contains a heap object pointer. The heap object
146// tag is shifted away.
147void MacroAssembler::RecordWriteField(Register object, int offset,
148 Register value, RAStatus ra_status,
149 SaveFPRegsMode save_fp,
150 SmiCheck smi_check, SlotDescriptor slot) {
151 ASM_CODE_COMMENT(this);
152 // First, check if a write barrier is even needed. The tests below
153 // catch stores of Smis.
154 Label done;
155
156 // Skip barrier if writing a smi.
157 if (smi_check == SmiCheck::kInline) {
158 JumpIfSmi(value, &done);
159 }
160
161 // Although the object register is tagged, the offset is relative to the start
162 // of the object, so offset must be a multiple of kPointerSize.
164
165 if (v8_flags.slow_debug_code) {
166 Label ok;
167 BlockTrampolinePoolScope block_trampoline_pool(this);
168 UseScratchRegisterScope temps(this);
169 Register scratch = temps.Acquire();
170 Add_d(scratch, object, offset - kHeapObjectTag);
171 And(scratch, scratch, Operand(kTaggedSize - 1));
172 Branch(&ok, eq, scratch, Operand(zero_reg));
173 Abort(AbortReason::kUnalignedCellInWriteBarrier);
174 bind(&ok);
175 }
176
177 RecordWrite(object, Operand(offset - kHeapObjectTag), value, ra_status,
178 save_fp, SmiCheck::kOmit, slot);
179
180 bind(&done);
181}
182
183void MacroAssembler::DecodeSandboxedPointer(Register value) {
184 ASM_CODE_COMMENT(this);
185#ifdef V8_ENABLE_SANDBOX
186 srli_d(value, value, kSandboxedPointerShift);
187 Add_d(value, value, kPtrComprCageBaseRegister);
188#else
189 UNREACHABLE();
190#endif
191}
192
194 MemOperand field_operand) {
195#ifdef V8_ENABLE_SANDBOX
196 ASM_CODE_COMMENT(this);
197 Ld_d(destination, field_operand);
199#else
200 UNREACHABLE();
201#endif
202}
203
205 MemOperand dst_field_operand) {
206#ifdef V8_ENABLE_SANDBOX
207 ASM_CODE_COMMENT(this);
208 UseScratchRegisterScope temps(this);
209 Register scratch = temps.Acquire();
210 Sub_d(scratch, value, kPtrComprCageBaseRegister);
211 slli_d(scratch, scratch, kSandboxedPointerShift);
212 St_d(scratch, dst_field_operand);
213#else
214 UNREACHABLE();
215#endif
216}
217
219 MemOperand field_operand,
220 ExternalPointerTagRange tag_range,
221 Register isolate_root) {
222 DCHECK(!AreAliased(destination, isolate_root));
223 ASM_CODE_COMMENT(this);
224#ifdef V8_ENABLE_SANDBOX
225 DCHECK(!tag_range.IsEmpty());
227 UseScratchRegisterScope temps(this);
228 Register external_table = temps.Acquire();
229 if (isolate_root == no_reg) {
231 isolate_root = kRootRegister;
232 }
233 Ld_d(external_table,
234 MemOperand(isolate_root,
235 IsolateData::external_pointer_table_offset() +
237 Ld_wu(destination, field_operand);
238 srli_d(destination, destination, kExternalPointerIndexShift);
239 slli_d(destination, destination, kExternalPointerTableEntrySizeLog2);
240 Ld_d(destination, MemOperand(external_table, destination));
241
242 // We don't expect to see empty fields here. If this is ever needed, consider
243 // using an dedicated empty value entry for those tags instead (i.e. an entry
244 // with the right tag and nullptr payload).
246
247 // We need another scratch register for the 64-bit tag constant. Instead of
248 // forcing the `And` to allocate a new temp register (which we may not have),
249 // reuse the temp register that we used for the external pointer table base.
250 Register scratch = external_table;
251 if (tag_range.Size() == 1) {
252 // The common and simple case: we expect exactly one tag.
253 static_assert(kExternalPointerShiftedTagMask == 0x7f);
256 SbxCheck(eq, AbortReason::kExternalPointerTagMismatch, scratch,
257 Operand(tag_range.first));
259 } else {
260 // Not currently supported. Implement once needed.
262 UNREACHABLE();
263 }
264#else
265 Ld_d(destination, field_operand);
266#endif // V8_ENABLE_SANDBOX
267}
268
270 MemOperand field_operand,
271 IndirectPointerTag tag) {
272#ifdef V8_ENABLE_SANDBOX
273 LoadIndirectPointerField(destination, field_operand, tag);
274#else
275 LoadTaggedField(destination, field_operand);
276#endif
277}
278
280 MemOperand dst_field_operand) {
281#ifdef V8_ENABLE_SANDBOX
282 StoreIndirectPointerField(value, dst_field_operand);
283#else
284 StoreTaggedField(value, dst_field_operand);
285#endif
286}
287
289 MemOperand field_operand,
290 IndirectPointerTag tag) {
291#ifdef V8_ENABLE_SANDBOX
292 ASM_CODE_COMMENT(this);
293 UseScratchRegisterScope temps(this);
294 Register handle = temps.Acquire();
295 Ld_wu(handle, field_operand);
296
297 ResolveIndirectPointerHandle(destination, handle, tag);
298#else
299 UNREACHABLE();
300#endif // V8_ENABLE_SANDBOX
301}
302
304 MemOperand dst_field_operand) {
305#ifdef V8_ENABLE_SANDBOX
306 UseScratchRegisterScope temps(this);
307 Register scratch = temps.Acquire();
308 Ld_w(scratch, FieldMemOperand(
309 value, ExposedTrustedObject::kSelfIndirectPointerOffset));
310 St_w(scratch, dst_field_operand);
311#else
312 UNREACHABLE();
313#endif
314}
315
316#ifdef V8_ENABLE_SANDBOX
317void MacroAssembler::ResolveIndirectPointerHandle(Register destination,
318 Register handle,
319 IndirectPointerTag tag) {
320 // The tag implies which pointer table to use.
321 if (tag == kUnknownIndirectPointerTag) {
322 // In this case we have to rely on the handle marking to determine which
323 // pointer table to use.
324 Label is_trusted_pointer_handle, done;
327 Branch(&is_trusted_pointer_handle, eq, destination, Operand(zero_reg));
328 ResolveCodePointerHandle(destination, handle);
329 Branch(&done);
330 bind(&is_trusted_pointer_handle);
331 ResolveTrustedPointerHandle(destination, handle,
333 bind(&done);
334 } else if (tag == kCodeIndirectPointerTag) {
335 ResolveCodePointerHandle(destination, handle);
336 } else {
337 ResolveTrustedPointerHandle(destination, handle, tag);
338 }
339}
340
341void MacroAssembler::ResolveTrustedPointerHandle(Register destination,
342 Register handle,
343 IndirectPointerTag tag) {
344 DCHECK_NE(tag, kCodeIndirectPointerTag);
346
348 Register table = destination;
349 Ld_d(table,
350 MemOperand(kRootRegister, IsolateData::trusted_pointer_table_offset()));
354 // Untag the pointer and remove the marking bit in one operation.
355 Register tag_reg = handle;
356 li(tag_reg, Operand(~(tag | kTrustedPointerTableMarkBit)));
357 and_(destination, destination, tag_reg);
358}
359
360void MacroAssembler::ResolveCodePointerHandle(Register destination,
361 Register handle) {
363
364 Register table = destination;
365 LoadCodePointerTableBase(table);
370 // The LSB is used as marking bit by the code pointer table, so here we have
371 // to set it using a bitwise OR as it may or may not be set.
373}
374
375void MacroAssembler::LoadCodeEntrypointViaCodePointer(Register destination,
376 MemOperand field_operand,
377 CodeEntrypointTag tag) {
379 ASM_CODE_COMMENT(this);
380 UseScratchRegisterScope temps(this);
381 Register scratch = temps.Acquire();
382 LoadCodePointerTableBase(scratch);
383 Ld_wu(destination, field_operand);
387 if (tag != 0) {
388 li(scratch, Operand(tag));
389 xor_(destination, destination, scratch);
390 }
391}
392
393void MacroAssembler::LoadCodePointerTableBase(Register destination) {
394#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
395 if (!options().isolate_independent_code && isolate()) {
396 // Embed the code pointer table address into the code.
398 ExternalReference::code_pointer_table_base_address(isolate()));
399 } else {
400 // Force indirect load via root register as a workaround for
401 // isolate-independent code (for example, for Wasm).
405 destination));
406 }
407#else
408 // Embed the code pointer table address into the code.
409 li(destination, ExternalReference::global_code_pointer_table_base_address());
410#endif // V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
411}
412#endif // V8_ENABLE_SANDBOX
413
414#ifdef V8_ENABLE_LEAPTIERING
415void MacroAssembler::LoadEntrypointFromJSDispatchTable(Register destination,
416 Register dispatch_handle,
417 Register scratch) {
418 DCHECK(!AreAliased(destination, dispatch_handle, scratch));
419 ASM_CODE_COMMENT(this);
420
421 Register index = destination;
422 li(scratch, ExternalReference::js_dispatch_table_address());
423 srli_d(index, dispatch_handle, kJSDispatchHandleShift);
425 Add_d(scratch, scratch, destination);
426 Ld_d(destination, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
427}
428
429void MacroAssembler::LoadEntrypointFromJSDispatchTable(
430 Register destination, JSDispatchHandle dispatch_handle, Register scratch) {
431 DCHECK(!AreAliased(destination, scratch));
432 ASM_CODE_COMMENT(this);
433
434 li(scratch, ExternalReference::js_dispatch_table_address());
435 // WARNING: This offset calculation is only safe if we have already stored a
436 // RelocInfo for the dispatch handle, e.g. in CallJSDispatchEntry, (thus
437 // keeping the dispatch entry alive) _and_ because the entrypoints are not
438 // compatible (thus meaning that the offset calculation is not invalidated by
439 // a compaction).
440 // TODO(leszeks): Make this less of a footgun.
441 static_assert(!JSDispatchTable::kSupportsCompaction);
442 int offset = JSDispatchTable::OffsetOfEntry(dispatch_handle) +
443 JSDispatchEntry::kEntrypointOffset;
444 Ld_d(destination, MemOperand(scratch, offset));
445}
446
447void MacroAssembler::LoadParameterCountFromJSDispatchTable(
448 Register destination, Register dispatch_handle, Register scratch) {
449 DCHECK(!AreAliased(destination, dispatch_handle, scratch));
450 ASM_CODE_COMMENT(this);
451
452 Register index = destination;
453 li(scratch, ExternalReference::js_dispatch_table_address());
454 srli_d(index, dispatch_handle, kJSDispatchHandleShift);
456 Add_d(scratch, scratch, destination);
457 static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
458 Ld_hu(destination, MemOperand(scratch, JSDispatchEntry::kCodeObjectOffset));
459}
460
461void MacroAssembler::LoadEntrypointAndParameterCountFromJSDispatchTable(
462 Register entrypoint, Register parameter_count, Register dispatch_handle,
463 Register scratch) {
464 DCHECK(!AreAliased(entrypoint, parameter_count, dispatch_handle, scratch));
465 ASM_CODE_COMMENT(this);
466
468 li(scratch, ExternalReference::js_dispatch_table_address());
469 srli_d(index, dispatch_handle, kJSDispatchHandleShift);
471 Add_d(scratch, scratch, parameter_count);
472 Ld_d(entrypoint, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
473 static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
475 MemOperand(scratch, JSDispatchEntry::kCodeObjectOffset));
476}
477#endif
478
480 MemOperand field_operand) {
482#ifdef V8_ENABLE_SANDBOX
483 DecompressProtected(destination, field_operand);
484#else
485 LoadTaggedField(destination, field_operand);
486#endif
487}
488
490 if (registers.is_empty()) return;
492}
493
495 if (registers.is_empty()) return;
497}
498
499void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
500 SaveFPRegsMode fp_mode) {
501 ASM_CODE_COMMENT(this);
504
506 Register slot_address_parameter =
508
509 MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
510
513}
514
515void MacroAssembler::CallIndirectPointerBarrier(Register object, Operand offset,
516 SaveFPRegsMode fp_mode,
517 IndirectPointerTag tag) {
518 ASM_CODE_COMMENT(this);
522
526 offset);
528 Operand(tag));
529
532}
533
535 Operand offset,
536 SaveFPRegsMode fp_mode,
537 StubCallMode mode) {
538 ASM_CODE_COMMENT(this);
541
543 Register slot_address_parameter =
545
546 MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
547
548 CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode);
549
551}
552
553void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
554 SaveFPRegsMode fp_mode,
555 StubCallMode mode) {
556 // Use CallRecordWriteStubSaveRegisters if the object and slot registers
557 // need to be caller saved.
560#if V8_ENABLE_WEBASSEMBLY
561 if (mode == StubCallMode::kCallWasmRuntimeStub) {
562 auto wasm_target =
563 static_cast<Address>(wasm::WasmCode::GetRecordWriteBuiltin(fp_mode));
564 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
565#else
566 if (false) {
567#endif
568 } else {
570 }
571}
572
573void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
574 Register object, Operand offset) {
575 ASM_CODE_COMMENT(this);
576 DCHECK_NE(dst_object, dst_slot);
577 // If `offset` is a register, it cannot overlap with `object`.
578 DCHECK_IMPLIES(!offset.IsImmediate(), offset.rm() != object);
579
580 // If the slot register does not overlap with the object register, we can
581 // overwrite it.
582 if (dst_slot != object) {
583 Add_d(dst_slot, object, offset);
584 mov(dst_object, object);
585 return;
586 }
587
588 DCHECK_EQ(dst_slot, object);
589
590 // If the destination object register does not overlap with the offset
591 // register, we can overwrite it.
592 if (offset.IsImmediate() || (offset.rm() != dst_object)) {
593 mov(dst_object, dst_slot);
594 Add_d(dst_slot, dst_slot, offset);
595 return;
596 }
597
598 DCHECK_EQ(dst_object, offset.rm());
599
600 // We only have `dst_slot` and `dst_object` left as distinct registers so we
601 // have to swap them. We write this as a add+sub sequence to avoid using a
602 // scratch register.
603 Add_d(dst_slot, dst_slot, dst_object);
604 Sub_d(dst_object, dst_slot, dst_object);
605}
606
607// If lr_status is kLRHasBeenSaved, lr will be clobbered.
608// TODO(LOONG_dev): LOONG64 Check this comment
609// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
610// The register 'object' contains a heap object pointer. The heap object
611// tag is shifted away.
612void MacroAssembler::RecordWrite(Register object, Operand offset,
613 Register value, RAStatus ra_status,
614 SaveFPRegsMode fp_mode, SmiCheck smi_check,
615 SlotDescriptor slot) {
616 DCHECK(!AreAliased(object, value));
617
618 if (v8_flags.slow_debug_code) {
619 UseScratchRegisterScope temps(this);
620 Register scratch = temps.Acquire();
621 Add_d(scratch, object, offset);
622 if (slot.contains_indirect_pointer()) {
623 LoadIndirectPointerField(scratch, MemOperand(scratch, 0),
624 slot.indirect_pointer_tag());
625 } else {
626 DCHECK(slot.contains_direct_pointer());
627 LoadTaggedField(scratch, MemOperand(scratch, 0));
628 }
629 Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
630 Operand(value));
631 }
632
633 if (v8_flags.disable_write_barriers) {
634 return;
635 }
636
637 // First, check if a write barrier is even needed. The tests below
638 // catch stores of smis and stores into the young generation.
639 Label done;
640
641 if (smi_check == SmiCheck::kInline) {
642 DCHECK_EQ(0, kSmiTag);
643 JumpIfSmi(value, &done);
644 }
645
647 &done);
648
650 &done);
651
652 // Record the actual write.
653 if (ra_status == kRAHasNotBeenSaved) {
654 Push(ra);
655 }
656
658 DCHECK(!AreAliased(object, slot_address, value));
659 if (slot.contains_direct_pointer()) {
660 DCHECK(offset.IsImmediate());
661 Add_d(slot_address, object, offset);
662 CallRecordWriteStub(object, slot_address, fp_mode,
664 } else {
665 DCHECK(slot.contains_indirect_pointer());
666 CallIndirectPointerBarrier(object, offset, fp_mode,
667 slot.indirect_pointer_tag());
668 }
669 if (ra_status == kRAHasNotBeenSaved) {
670 Pop(ra);
671 }
672
673 bind(&done);
674}
675
676// ---------------------------------------------------------------------------
677// Instruction macros.
678
679void MacroAssembler::Add_w(Register rd, Register rj, const Operand& rk) {
680 if (rk.is_reg()) {
681 add_w(rd, rj, rk.rm());
682 } else {
683 if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
684 addi_w(rd, rj, static_cast<int32_t>(rk.immediate()));
685 } else {
686 // li handles the relocation.
687 UseScratchRegisterScope temps(this);
688 Register scratch = temps.Acquire();
689 DCHECK(rj != scratch);
690 li(scratch, rk);
691 add_w(rd, rj, scratch);
692 }
693 }
694}
695
696void MacroAssembler::Add_d(Register rd, Register rj, const Operand& rk) {
697 if (rk.is_reg()) {
698 add_d(rd, rj, rk.rm());
699 } else {
700 if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
701 addi_d(rd, rj, static_cast<int32_t>(rk.immediate()));
702 } else {
703 // li handles the relocation.
704 UseScratchRegisterScope temps(this);
705 Register scratch = temps.Acquire();
706 DCHECK(rj != scratch);
707 li(scratch, rk);
708 add_d(rd, rj, scratch);
709 }
710 }
711}
712
713void MacroAssembler::Sub_w(Register rd, Register rj, const Operand& rk) {
714 if (rk.is_reg()) {
715 sub_w(rd, rj, rk.rm());
716 } else {
717 DCHECK(is_int32(rk.immediate()));
718 if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) {
719 // No subi_w instr, use addi_w(x, y, -imm).
720 addi_w(rd, rj, static_cast<int32_t>(-rk.immediate()));
721 } else {
722 UseScratchRegisterScope temps(this);
723 Register scratch = temps.Acquire();
724 DCHECK(rj != scratch);
725 if (-rk.immediate() >> 12 == 0 && !MustUseReg(rk.rmode())) {
726 // Use load -imm and addu when loading -imm generates one instruction.
727 li(scratch, -rk.immediate());
728 add_w(rd, rj, scratch);
729 } else {
730 // li handles the relocation.
731 li(scratch, rk);
732 sub_w(rd, rj, scratch);
733 }
734 }
735 }
736}
737
738void MacroAssembler::Sub_d(Register rd, Register rj, const Operand& rk) {
739 if (rk.is_reg()) {
740 sub_d(rd, rj, rk.rm());
741 } else if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) {
742 // No subi_d instr, use addi_d(x, y, -imm).
743 addi_d(rd, rj, static_cast<int32_t>(-rk.immediate()));
744 } else {
745 int li_count = InstrCountForLi64Bit(rk.immediate());
746 int li_neg_count = InstrCountForLi64Bit(-rk.immediate());
747 if (li_neg_count < li_count && !MustUseReg(rk.rmode())) {
748 // Use load -imm and add_d when loading -imm generates one instruction.
749 DCHECK(rk.immediate() != std::numeric_limits<int32_t>::min());
750 UseScratchRegisterScope temps(this);
751 Register scratch = temps.Acquire();
752 li(scratch, Operand(-rk.immediate()));
753 add_d(rd, rj, scratch);
754 } else {
755 // li handles the relocation.
756 UseScratchRegisterScope temps(this);
757 Register scratch = temps.Acquire();
758 li(scratch, rk);
759 sub_d(rd, rj, scratch);
760 }
761 }
762}
763
764void MacroAssembler::Mul_w(Register rd, Register rj, const Operand& rk) {
765 if (rk.is_reg()) {
766 mul_w(rd, rj, rk.rm());
767 } else {
768 // li handles the relocation.
769 UseScratchRegisterScope temps(this);
770 Register scratch = temps.Acquire();
771 DCHECK(rj != scratch);
772 li(scratch, rk);
773 mul_w(rd, rj, scratch);
774 }
775}
776
777void MacroAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) {
778 if (rk.is_reg()) {
779 mulh_w(rd, rj, rk.rm());
780 } else {
781 // li handles the relocation.
782 UseScratchRegisterScope temps(this);
783 Register scratch = temps.Acquire();
784 DCHECK(rj != scratch);
785 li(scratch, rk);
786 mulh_w(rd, rj, scratch);
787 }
788}
789
790void MacroAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) {
791 if (rk.is_reg()) {
792 mulh_wu(rd, rj, rk.rm());
793 } else {
794 // li handles the relocation.
795 UseScratchRegisterScope temps(this);
796 Register scratch = temps.Acquire();
797 DCHECK(rj != scratch);
798 li(scratch, rk);
799 mulh_wu(rd, rj, scratch);
800 }
801}
802
803void MacroAssembler::Mul_d(Register rd, Register rj, const Operand& rk) {
804 if (rk.is_reg()) {
805 mul_d(rd, rj, rk.rm());
806 } else {
807 // li handles the relocation.
808 UseScratchRegisterScope temps(this);
809 Register scratch = temps.Acquire();
810 DCHECK(rj != scratch);
811 li(scratch, rk);
812 mul_d(rd, rj, scratch);
813 }
814}
815
816void MacroAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) {
817 if (rk.is_reg()) {
818 mulh_d(rd, rj, rk.rm());
819 } else {
820 // li handles the relocation.
821 UseScratchRegisterScope temps(this);
822 Register scratch = temps.Acquire();
823 DCHECK(rj != scratch);
824 li(scratch, rk);
825 mulh_d(rd, rj, scratch);
826 }
827}
828
829void MacroAssembler::Mulh_du(Register rd, Register rj, const Operand& rk) {
830 if (rk.is_reg()) {
831 mulh_du(rd, rj, rk.rm());
832 } else {
833 // li handles the relocation.
834 UseScratchRegisterScope temps(this);
835 Register scratch = temps.Acquire();
836 DCHECK(rj != scratch);
837 li(scratch, rk);
838 mulh_du(rd, rj, scratch);
839 }
840}
841
842void MacroAssembler::Div_w(Register rd, Register rj, const Operand& rk) {
843 if (rk.is_reg()) {
844 div_w(rd, rj, rk.rm());
845 } else {
846 // li handles the relocation.
847 UseScratchRegisterScope temps(this);
848 Register scratch = temps.Acquire();
849 DCHECK(rj != scratch);
850 li(scratch, rk);
851 div_w(rd, rj, scratch);
852 }
853}
854
855void MacroAssembler::Mod_w(Register rd, Register rj, const Operand& rk) {
856 if (rk.is_reg()) {
857 mod_w(rd, rj, rk.rm());
858 } else {
859 // li handles the relocation.
860 UseScratchRegisterScope temps(this);
861 Register scratch = temps.Acquire();
862 DCHECK(rj != scratch);
863 li(scratch, rk);
864 mod_w(rd, rj, scratch);
865 }
866}
867
868void MacroAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) {
869 if (rk.is_reg()) {
870 mod_wu(rd, rj, rk.rm());
871 } else {
872 // li handles the relocation.
873 UseScratchRegisterScope temps(this);
874 Register scratch = temps.Acquire();
875 DCHECK(rj != scratch);
876 li(scratch, rk);
877 mod_wu(rd, rj, scratch);
878 }
879}
880
881void MacroAssembler::Div_d(Register rd, Register rj, const Operand& rk) {
882 if (rk.is_reg()) {
883 div_d(rd, rj, rk.rm());
884 } else {
885 // li handles the relocation.
886 UseScratchRegisterScope temps(this);
887 Register scratch = temps.Acquire();
888 DCHECK(rj != scratch);
889 li(scratch, rk);
890 div_d(rd, rj, scratch);
891 }
892}
893
894void MacroAssembler::Div_wu(Register rd, Register rj, const Operand& rk) {
895 if (rk.is_reg()) {
896 div_wu(rd, rj, rk.rm());
897 } else {
898 // li handles the relocation.
899 UseScratchRegisterScope temps(this);
900 Register scratch = temps.Acquire();
901 DCHECK(rj != scratch);
902 li(scratch, rk);
903 div_wu(rd, rj, scratch);
904 }
905}
906
907void MacroAssembler::Div_du(Register rd, Register rj, const Operand& rk) {
908 if (rk.is_reg()) {
909 div_du(rd, rj, rk.rm());
910 } else {
911 // li handles the relocation.
912 UseScratchRegisterScope temps(this);
913 Register scratch = temps.Acquire();
914 DCHECK(rj != scratch);
915 li(scratch, rk);
916 div_du(rd, rj, scratch);
917 }
918}
919
920void MacroAssembler::Mod_d(Register rd, Register rj, const Operand& rk) {
921 if (rk.is_reg()) {
922 mod_d(rd, rj, rk.rm());
923 } else {
924 // li handles the relocation.
925 UseScratchRegisterScope temps(this);
926 Register scratch = temps.Acquire();
927 DCHECK(rj != scratch);
928 li(scratch, rk);
929 mod_d(rd, rj, scratch);
930 }
931}
932
933void MacroAssembler::Mod_du(Register rd, Register rj, const Operand& rk) {
934 if (rk.is_reg()) {
935 mod_du(rd, rj, rk.rm());
936 } else {
937 // li handles the relocation.
938 UseScratchRegisterScope temps(this);
939 Register scratch = temps.Acquire();
940 DCHECK(rj != scratch);
941 li(scratch, rk);
942 mod_du(rd, rj, scratch);
943 }
944}
945
946void MacroAssembler::And(Register rd, Register rj, const Operand& rk) {
947 if (rk.is_reg()) {
948 and_(rd, rj, rk.rm());
949 } else {
950 if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) {
951 andi(rd, rj, static_cast<int32_t>(rk.immediate()));
952 } else {
953 // li handles the relocation.
954 UseScratchRegisterScope temps(this);
955 Register scratch = temps.Acquire();
956 DCHECK(rj != scratch);
957 li(scratch, rk);
958 and_(rd, rj, scratch);
959 }
960 }
961}
962
963void MacroAssembler::Or(Register rd, Register rj, const Operand& rk) {
964 if (rk.is_reg()) {
965 or_(rd, rj, rk.rm());
966 } else {
967 if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) {
968 ori(rd, rj, static_cast<int32_t>(rk.immediate()));
969 } else {
970 // li handles the relocation.
971 UseScratchRegisterScope temps(this);
972 Register scratch = temps.Acquire();
973 DCHECK(rj != scratch);
974 li(scratch, rk);
975 or_(rd, rj, scratch);
976 }
977 }
978}
979
980void MacroAssembler::Xor(Register rd, Register rj, const Operand& rk) {
981 if (rk.is_reg()) {
982 xor_(rd, rj, rk.rm());
983 } else {
984 if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) {
985 xori(rd, rj, static_cast<int32_t>(rk.immediate()));
986 } else {
987 // li handles the relocation.
988 UseScratchRegisterScope temps(this);
989 Register scratch = temps.Acquire();
990 DCHECK(rj != scratch);
991 li(scratch, rk);
992 xor_(rd, rj, scratch);
993 }
994 }
995}
996
997void MacroAssembler::Nor(Register rd, Register rj, const Operand& rk) {
998 if (rk.is_reg()) {
999 nor(rd, rj, rk.rm());
1000 } else {
1001 // li handles the relocation.
1002 UseScratchRegisterScope temps(this);
1003 Register scratch = temps.Acquire();
1004 DCHECK(rj != scratch);
1005 li(scratch, rk);
1006 nor(rd, rj, scratch);
1007 }
1008}
1009
1010void MacroAssembler::Andn(Register rd, Register rj, const Operand& rk) {
1011 if (rk.is_reg()) {
1012 andn(rd, rj, rk.rm());
1013 } else {
1014 // li handles the relocation.
1015 UseScratchRegisterScope temps(this);
1016 Register scratch = temps.Acquire();
1017 DCHECK(rj != scratch);
1018 li(scratch, rk);
1019 andn(rd, rj, scratch);
1020 }
1021}
1022
1023void MacroAssembler::Orn(Register rd, Register rj, const Operand& rk) {
1024 if (rk.is_reg()) {
1025 orn(rd, rj, rk.rm());
1026 } else {
1027 // li handles the relocation.
1028 UseScratchRegisterScope temps(this);
1029 Register scratch = temps.Acquire();
1030 DCHECK(rj != scratch);
1031 li(scratch, rk);
1032 orn(rd, rj, scratch);
1033 }
1034}
1035
1036void MacroAssembler::Neg(Register rj, const Operand& rk) {
1037 DCHECK(rk.is_reg());
1038 sub_d(rj, zero_reg, rk.rm());
1039}
1040
1041void MacroAssembler::Slt(Register rd, Register rj, const Operand& rk) {
1042 if (rk.is_reg()) {
1043 slt(rd, rj, rk.rm());
1044 } else {
1045 if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
1046 slti(rd, rj, static_cast<int32_t>(rk.immediate()));
1047 } else {
1048 // li handles the relocation.
1049 UseScratchRegisterScope temps(this);
1050 BlockTrampolinePoolScope block_trampoline_pool(this);
1051 Register scratch = temps.Acquire();
1052 DCHECK(rj != scratch);
1053 li(scratch, rk);
1054 slt(rd, rj, scratch);
1055 }
1056 }
1057}
1058
1059void MacroAssembler::Sltu(Register rd, Register rj, const Operand& rk) {
1060 if (rk.is_reg()) {
1061 sltu(rd, rj, rk.rm());
1062 } else {
1063 if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
1064 sltui(rd, rj, static_cast<int32_t>(rk.immediate()));
1065 } else {
1066 // li handles the relocation.
1067 UseScratchRegisterScope temps(this);
1068 BlockTrampolinePoolScope block_trampoline_pool(this);
1069 Register scratch = temps.Acquire();
1070 DCHECK(rj != scratch);
1071 li(scratch, rk);
1072 sltu(rd, rj, scratch);
1073 }
1074 }
1075}
1076
1077void MacroAssembler::Sle(Register rd, Register rj, const Operand& rk) {
1078 if (rk.is_reg()) {
1079 slt(rd, rk.rm(), rj);
1080 } else {
1081 if (rk.immediate() == 0 && !MustUseReg(rk.rmode())) {
1082 slt(rd, zero_reg, rj);
1083 } else {
1084 // li handles the relocation.
1085 UseScratchRegisterScope temps(this);
1086 Register scratch = temps.Acquire();
1087 BlockTrampolinePoolScope block_trampoline_pool(this);
1088 DCHECK(rj != scratch);
1089 li(scratch, rk);
1090 slt(rd, scratch, rj);
1091 }
1092 }
1093 xori(rd, rd, 1);
1094}
1095
1096void MacroAssembler::Sleu(Register rd, Register rj, const Operand& rk) {
1097 if (rk.is_reg()) {
1098 sltu(rd, rk.rm(), rj);
1099 } else {
1100 if (rk.immediate() == 0 && !MustUseReg(rk.rmode())) {
1101 sltu(rd, zero_reg, rj);
1102 } else {
1103 // li handles the relocation.
1104 UseScratchRegisterScope temps(this);
1105 Register scratch = temps.Acquire();
1106 BlockTrampolinePoolScope block_trampoline_pool(this);
1107 DCHECK(rj != scratch);
1108 li(scratch, rk);
1109 sltu(rd, scratch, rj);
1110 }
1111 }
1112 xori(rd, rd, 1);
1113}
1114
1115void MacroAssembler::Sge(Register rd, Register rj, const Operand& rk) {
1116 Slt(rd, rj, rk);
1117 xori(rd, rd, 1);
1118}
1119
1120void MacroAssembler::Sgeu(Register rd, Register rj, const Operand& rk) {
1121 Sltu(rd, rj, rk);
1122 xori(rd, rd, 1);
1123}
1124
1125void MacroAssembler::Sgt(Register rd, Register rj, const Operand& rk) {
1126 if (rk.is_reg()) {
1127 slt(rd, rk.rm(), rj);
1128 } else {
1129 if (rk.immediate() == 0 && !MustUseReg(rk.rmode())) {
1130 slt(rd, zero_reg, rj);
1131 } else {
1132 // li handles the relocation.
1133 UseScratchRegisterScope temps(this);
1134 Register scratch = temps.Acquire();
1135 BlockTrampolinePoolScope block_trampoline_pool(this);
1136 DCHECK(rj != scratch);
1137 li(scratch, rk);
1138 slt(rd, scratch, rj);
1139 }
1140 }
1141}
1142
1143void MacroAssembler::Sgtu(Register rd, Register rj, const Operand& rk) {
1144 if (rk.is_reg()) {
1145 sltu(rd, rk.rm(), rj);
1146 } else {
1147 if (rk.immediate() == 0 && !MustUseReg(rk.rmode())) {
1148 sltu(rd, zero_reg, rj);
1149 } else {
1150 // li handles the relocation.
1151 UseScratchRegisterScope temps(this);
1152 Register scratch = temps.Acquire();
1153 BlockTrampolinePoolScope block_trampoline_pool(this);
1154 DCHECK(rj != scratch);
1155 li(scratch, rk);
1156 sltu(rd, scratch, rj);
1157 }
1158 }
1159}
1160
1161void MacroAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) {
1162 if (rk.is_reg()) {
1163 rotr_w(rd, rj, rk.rm());
1164 } else {
1165 int64_t ror_value = rk.immediate() % 32;
1166 if (ror_value < 0) {
1167 ror_value += 32;
1168 }
1169 rotri_w(rd, rj, ror_value);
1170 }
1171}
1172
1173void MacroAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) {
1174 if (rk.is_reg()) {
1175 rotr_d(rd, rj, rk.rm());
1176 } else {
1177 int64_t dror_value = rk.immediate() % 64;
1178 if (dror_value < 0) dror_value += 64;
1179 rotri_d(rd, rj, dror_value);
1180 }
1181}
1182
1183void MacroAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa) {
1184 DCHECK(sa >= 1 && sa <= 31);
1185 if (sa <= 4) {
1186 alsl_w(rd, rj, rk, sa);
1187 } else {
1188 UseScratchRegisterScope temps(this);
1189 Register tmp = rd == rk ? temps.Acquire() : rd;
1190 DCHECK(tmp != rk);
1191 slli_w(tmp, rj, sa);
1192 add_w(rd, rk, tmp);
1193 }
1194}
1195
1196void MacroAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa) {
1197 DCHECK(sa >= 1 && sa <= 63);
1198 if (sa <= 4) {
1199 alsl_d(rd, rj, rk, sa);
1200 } else {
1201 UseScratchRegisterScope temps(this);
1202 Register tmp = rd == rk ? temps.Acquire() : rd;
1203 DCHECK(tmp != rk);
1204 slli_d(tmp, rj, sa);
1205 add_d(rd, rk, tmp);
1206 }
1207}
1208
1209// ------------Pseudo-instructions-------------
1210
1211// Change endianness
1212void MacroAssembler::ByteSwap(Register dest, Register src, int operand_size) {
1213 DCHECK(operand_size == 4 || operand_size == 8);
1214 if (operand_size == 4) {
1215 revb_2w(dest, src);
1216 slli_w(dest, dest, 0);
1217 } else {
1218 revb_d(dest, src);
1219 }
1220}
1221
1222void MacroAssembler::Ld_b(Register rd, const MemOperand& rj) {
1223 MemOperand source = rj;
1224 AdjustBaseAndOffset(&source);
1225 if (source.hasIndexReg()) {
1226 ldx_b(rd, source.base(), source.index());
1227 } else {
1228 ld_b(rd, source.base(), source.offset());
1229 }
1230}
1231
1232void MacroAssembler::Ld_bu(Register rd, const MemOperand& rj) {
1233 MemOperand source = rj;
1234 AdjustBaseAndOffset(&source);
1235 if (source.hasIndexReg()) {
1236 ldx_bu(rd, source.base(), source.index());
1237 } else {
1238 ld_bu(rd, source.base(), source.offset());
1239 }
1240}
1241
1242void MacroAssembler::St_b(Register rd, const MemOperand& rj) {
1243 MemOperand source = rj;
1244 AdjustBaseAndOffset(&source);
1245 if (source.hasIndexReg()) {
1246 stx_b(rd, source.base(), source.index());
1247 } else {
1248 st_b(rd, source.base(), source.offset());
1249 }
1250}
1251
1252void MacroAssembler::Ld_h(Register rd, const MemOperand& rj) {
1253 MemOperand source = rj;
1254 AdjustBaseAndOffset(&source);
1255 if (source.hasIndexReg()) {
1256 ldx_h(rd, source.base(), source.index());
1257 } else {
1258 ld_h(rd, source.base(), source.offset());
1259 }
1260}
1261
1262void MacroAssembler::Ld_hu(Register rd, const MemOperand& rj) {
1263 MemOperand source = rj;
1264 AdjustBaseAndOffset(&source);
1265 if (source.hasIndexReg()) {
1266 ldx_hu(rd, source.base(), source.index());
1267 } else {
1268 ld_hu(rd, source.base(), source.offset());
1269 }
1270}
1271
1272void MacroAssembler::St_h(Register rd, const MemOperand& rj) {
1273 MemOperand source = rj;
1274 AdjustBaseAndOffset(&source);
1275 if (source.hasIndexReg()) {
1276 stx_h(rd, source.base(), source.index());
1277 } else {
1278 st_h(rd, source.base(), source.offset());
1279 }
1280}
1281
1282void MacroAssembler::Ld_w(Register rd, const MemOperand& rj) {
1283 MemOperand source = rj;
1284
1285 if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
1286 (source.offset() & 0b11) == 0) {
1287 ldptr_w(rd, source.base(), source.offset());
1288 return;
1289 }
1290
1291 AdjustBaseAndOffset(&source);
1292 if (source.hasIndexReg()) {
1293 ldx_w(rd, source.base(), source.index());
1294 } else {
1295 ld_w(rd, source.base(), source.offset());
1296 }
1297}
1298
1299void MacroAssembler::Ld_wu(Register rd, const MemOperand& rj) {
1300 MemOperand source = rj;
1301 AdjustBaseAndOffset(&source);
1302
1303 if (source.hasIndexReg()) {
1304 ldx_wu(rd, source.base(), source.index());
1305 } else {
1306 ld_wu(rd, source.base(), source.offset());
1307 }
1308}
1309
1310void MacroAssembler::St_w(Register rd, const MemOperand& rj) {
1311 MemOperand source = rj;
1312
1313 if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
1314 (source.offset() & 0b11) == 0) {
1315 stptr_w(rd, source.base(), source.offset());
1316 return;
1317 }
1318
1319 AdjustBaseAndOffset(&source);
1320 if (source.hasIndexReg()) {
1321 stx_w(rd, source.base(), source.index());
1322 } else {
1323 st_w(rd, source.base(), source.offset());
1324 }
1325}
1326
1327void MacroAssembler::Ld_d(Register rd, const MemOperand& rj) {
1328 MemOperand source = rj;
1329
1330 if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
1331 (source.offset() & 0b11) == 0) {
1332 ldptr_d(rd, source.base(), source.offset());
1333 return;
1334 }
1335
1336 AdjustBaseAndOffset(&source);
1337 if (source.hasIndexReg()) {
1338 ldx_d(rd, source.base(), source.index());
1339 } else {
1340 ld_d(rd, source.base(), source.offset());
1341 }
1342}
1343
1344void MacroAssembler::St_d(Register rd, const MemOperand& rj) {
1345 MemOperand source = rj;
1346
1347 if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
1348 (source.offset() & 0b11) == 0) {
1349 stptr_d(rd, source.base(), source.offset());
1350 return;
1351 }
1352
1353 AdjustBaseAndOffset(&source);
1354 if (source.hasIndexReg()) {
1355 stx_d(rd, source.base(), source.index());
1356 } else {
1357 st_d(rd, source.base(), source.offset());
1358 }
1359}
1360
1361void MacroAssembler::Fld_s(FPURegister fd, const MemOperand& src) {
1362 MemOperand tmp = src;
1363 AdjustBaseAndOffset(&tmp);
1364 if (tmp.hasIndexReg()) {
1365 fldx_s(fd, tmp.base(), tmp.index());
1366 } else {
1367 fld_s(fd, tmp.base(), tmp.offset());
1368 }
1369}
1370
1371void MacroAssembler::Fst_s(FPURegister fs, const MemOperand& src) {
1372 MemOperand tmp = src;
1373 AdjustBaseAndOffset(&tmp);
1374 if (tmp.hasIndexReg()) {
1375 fstx_s(fs, tmp.base(), tmp.index());
1376 } else {
1377 fst_s(fs, tmp.base(), tmp.offset());
1378 }
1379}
1380
1381void MacroAssembler::Fld_d(FPURegister fd, const MemOperand& src) {
1382 MemOperand tmp = src;
1383 AdjustBaseAndOffset(&tmp);
1384 if (tmp.hasIndexReg()) {
1385 fldx_d(fd, tmp.base(), tmp.index());
1386 } else {
1387 fld_d(fd, tmp.base(), tmp.offset());
1388 }
1389}
1390
1391void MacroAssembler::Fst_d(FPURegister fs, const MemOperand& src) {
1392 MemOperand tmp = src;
1393 AdjustBaseAndOffset(&tmp);
1394 if (tmp.hasIndexReg()) {
1395 fstx_d(fs, tmp.base(), tmp.index());
1396 } else {
1397 fst_d(fs, tmp.base(), tmp.offset());
1398 }
1399}
1400
1401void MacroAssembler::Ll_w(Register rd, const MemOperand& rj) {
1402 DCHECK(!rj.hasIndexReg());
1403 bool is_one_instruction = is_int14(rj.offset());
1404 if (is_one_instruction) {
1405 ll_w(rd, rj.base(), rj.offset());
1406 } else {
1407 UseScratchRegisterScope temps(this);
1408 Register scratch = temps.Acquire();
1409 li(scratch, rj.offset());
1410 add_d(scratch, scratch, rj.base());
1411 ll_w(rd, scratch, 0);
1412 }
1413}
1414
1415void MacroAssembler::Ll_d(Register rd, const MemOperand& rj) {
1416 DCHECK(!rj.hasIndexReg());
1417 bool is_one_instruction = is_int14(rj.offset());
1418 if (is_one_instruction) {
1419 ll_d(rd, rj.base(), rj.offset());
1420 } else {
1421 UseScratchRegisterScope temps(this);
1422 Register scratch = temps.Acquire();
1423 li(scratch, rj.offset());
1424 add_d(scratch, scratch, rj.base());
1425 ll_d(rd, scratch, 0);
1426 }
1427}
1428
1429void MacroAssembler::Sc_w(Register rd, const MemOperand& rj) {
1430 DCHECK(!rj.hasIndexReg());
1431 bool is_one_instruction = is_int14(rj.offset());
1432 if (is_one_instruction) {
1433 sc_w(rd, rj.base(), rj.offset());
1434 } else {
1435 UseScratchRegisterScope temps(this);
1436 Register scratch = temps.Acquire();
1437 li(scratch, rj.offset());
1438 add_d(scratch, scratch, rj.base());
1439 sc_w(rd, scratch, 0);
1440 }
1441}
1442
1443void MacroAssembler::Sc_d(Register rd, const MemOperand& rj) {
1444 DCHECK(!rj.hasIndexReg());
1445 bool is_one_instruction = is_int14(rj.offset());
1446 if (is_one_instruction) {
1447 sc_d(rd, rj.base(), rj.offset());
1448 } else {
1449 UseScratchRegisterScope temps(this);
1450 Register scratch = temps.Acquire();
1451 li(scratch, rj.offset());
1452 add_d(scratch, scratch, rj.base());
1453 sc_d(rd, scratch, 0);
1454 }
1455}
1456
1457void MacroAssembler::li(Register dst, Handle<HeapObject> value,
1458 RelocInfo::Mode rmode, LiFlags mode) {
1459 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
1460 // non-isolate-independent code. In many cases it might be cheaper than
1461 // embedding the relocatable value.
1462 if (root_array_available_ && options().isolate_independent_code) {
1463 IndirectLoadConstant(dst, value);
1464 return;
1465 }
1466 li(dst, Operand(value), mode);
1467}
1468
1469void MacroAssembler::li(Register dst, ExternalReference reference,
1470 LiFlags mode) {
1471 if (root_array_available()) {
1472 if (reference.IsIsolateFieldId()) {
1473 Add_d(dst, kRootRegister, Operand(reference.offset_from_root_register()));
1474 return;
1475 }
1476 if (options().isolate_independent_code) {
1477 IndirectLoadExternalReference(dst, reference);
1478 return;
1479 }
1480 }
1481
1482 // External references should not get created with IDs if
1483 // `!root_array_available()`.
1484 CHECK(!reference.IsIsolateFieldId());
1485 li(dst, Operand(reference), mode);
1486}
1487
1488static inline int InstrCountForLiLower32Bit(int64_t value) {
1489 if (is_int12(static_cast<int32_t>(value)) ||
1490 is_uint12(static_cast<int32_t>(value)) || !(value & kImm12Mask)) {
1491 return 1;
1492 } else {
1493 return 2;
1494 }
1495}
1496
1497void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
1498 if (is_int12(static_cast<int32_t>(j.immediate()))) {
1499 addi_d(rd, zero_reg, j.immediate());
1500 } else if (is_uint12(static_cast<int32_t>(j.immediate()))) {
1501 ori(rd, zero_reg, j.immediate() & kImm12Mask);
1502 } else {
1503 lu12i_w(rd, j.immediate() >> 12 & 0xfffff);
1504 if (j.immediate() & kImm12Mask) {
1505 ori(rd, rd, j.immediate() & kImm12Mask);
1506 }
1507 }
1508}
1509
1510int MacroAssembler::InstrCountForLi64Bit(int64_t value) {
1511 if (is_int32(value)) {
1512 return InstrCountForLiLower32Bit(value);
1513 } else if (is_int52(value)) {
1514 return InstrCountForLiLower32Bit(value) + 1;
1515 } else if ((value & 0xffffffffL) == 0) {
1516 // 32 LSBs (Least Significant Bits) all set to zero.
1517 uint8_t tzc = base::bits::CountTrailingZeros32(value >> 32);
1518 uint8_t lzc = base::bits::CountLeadingZeros32(value >> 32);
1519 if (tzc >= 20) {
1520 return 1;
1521 } else if (tzc + lzc > 12) {
1522 return 2;
1523 } else {
1524 return 3;
1525 }
1526 } else {
1527 int64_t imm21 = (value >> 31) & 0x1fffffL;
1528 if (imm21 != 0x1fffffL && imm21 != 0) {
1529 return InstrCountForLiLower32Bit(value) + 2;
1530 } else {
1531 return InstrCountForLiLower32Bit(value) + 1;
1532 }
1533 }
1534 UNREACHABLE();
1535 return INT_MAX;
1536}
1537
1538// All changes to if...else conditions here must be added to
1539// InstrCountForLi64Bit as well.
1540void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
1541 DCHECK(!j.is_reg());
1542 DCHECK(!MustUseReg(j.rmode()));
1543 DCHECK(mode == OPTIMIZE_SIZE);
1544 int64_t imm = j.immediate();
1545 BlockTrampolinePoolScope block_trampoline_pool(this);
1546 // Normal load of an immediate value which does not need Relocation Info.
1547 if (is_int32(imm)) {
1548 LiLower32BitHelper(rd, j);
1549 } else if (is_int52(imm)) {
1550 LiLower32BitHelper(rd, j);
1551 lu32i_d(rd, imm >> 32 & 0xfffff);
1552 } else if ((imm & 0xffffffffL) == 0) {
1553 // 32 LSBs (Least Significant Bits) all set to zero.
1554 uint8_t tzc = base::bits::CountTrailingZeros32(imm >> 32);
1555 uint8_t lzc = base::bits::CountLeadingZeros32(imm >> 32);
1556 if (tzc >= 20) {
1557 lu52i_d(rd, zero_reg, imm >> 52 & kImm12Mask);
1558 } else if (tzc + lzc > 12) {
1559 int32_t mask = (1 << (32 - tzc)) - 1;
1560 lu12i_w(rd, imm >> (tzc + 32) & mask);
1561 slli_d(rd, rd, tzc + 20);
1562 } else {
1563 xor_(rd, rd, rd);
1564 lu32i_d(rd, imm >> 32 & 0xfffff);
1565 lu52i_d(rd, rd, imm >> 52 & kImm12Mask);
1566 }
1567 } else {
1568 int64_t imm21 = (imm >> 31) & 0x1fffffL;
1569 LiLower32BitHelper(rd, j);
1570 if (imm21 != 0x1fffffL && imm21 != 0) lu32i_d(rd, imm >> 32 & 0xfffff);
1571 lu52i_d(rd, rd, imm >> 52 & kImm12Mask);
1572 }
1573}
1574
1575void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1576 DCHECK(!j.is_reg());
1577 BlockTrampolinePoolScope block_trampoline_pool(this);
1578 if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
1579 li_optimized(rd, j, mode);
1580 } else if (RelocInfo::IsCompressedEmbeddedObject(j.rmode())) {
1581 Handle<HeapObject> handle(reinterpret_cast<Address*>(j.immediate()));
1582 uint32_t immediate = AddEmbeddedObject(handle);
1583 RecordRelocInfo(j.rmode(), immediate);
1584 lu12i_w(rd, immediate >> 12 & 0xfffff);
1585 ori(rd, rd, immediate & kImm12Mask);
1586 } else if (MustUseReg(j.rmode())) {
1587 int64_t immediate;
1588 if (j.IsHeapNumberRequest()) {
1589 RequestHeapNumber(j.heap_number_request());
1590 immediate = 0;
1591 } else if (RelocInfo::IsFullEmbeddedObject(j.rmode())) {
1592 Handle<HeapObject> handle(reinterpret_cast<Address*>(j.immediate()));
1593 immediate = AddEmbeddedObject(handle);
1594 } else {
1595 immediate = j.immediate();
1596 }
1597
1598 RecordRelocInfo(j.rmode(), immediate);
1599 lu12i_w(rd, immediate >> 12 & 0xfffff);
1600 ori(rd, rd, immediate & kImm12Mask);
1601 if (RelocInfo::IsWasmCanonicalSigId(j.rmode()) ||
1604 // These reloc data are 32-bit values.
1605 DCHECK(is_int32(immediate) || is_uint32(immediate));
1606 return;
1607 }
1608 lu32i_d(rd, immediate >> 32 & 0xfffff);
1609 } else if (mode == ADDRESS_LOAD) {
1610 // We always need the same number of instructions as we may need to patch
1611 // this code to load another value which may need all 3 instructions.
1612 lu12i_w(rd, j.immediate() >> 12 & 0xfffff);
1613 ori(rd, rd, j.immediate() & kImm12Mask);
1614 lu32i_d(rd, j.immediate() >> 32 & 0xfffff);
1615 } else { // mode == CONSTANT_SIZE - always emit the same instruction
1616 // sequence.
1617 lu12i_w(rd, j.immediate() >> 12 & 0xfffff);
1618 ori(rd, rd, j.immediate() & kImm12Mask);
1619 lu32i_d(rd, j.immediate() >> 32 & 0xfffff);
1620 lu52i_d(rd, rd, j.immediate() >> 52 & kImm12Mask);
1621 }
1622}
1623
1624void MacroAssembler::LoadIsolateField(Register dst, IsolateFieldId id) {
1625 li(dst, ExternalReference::Create(id));
1626}
1627
1629 int16_t stack_offset = 0;
1630
1631 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1632 if ((regs.bits() & (1 << i)) != 0) {
1633 stack_offset -= kSystemPointerSize;
1634 St_d(ToRegister(i), MemOperand(sp, stack_offset));
1635 }
1636 }
1637 addi_d(sp, sp, stack_offset);
1638}
1639
1640void MacroAssembler::MultiPush(RegList regs1, RegList regs2) {
1641 DCHECK((regs1 & regs2).is_empty());
1642 int16_t stack_offset = 0;
1643
1644 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1645 if ((regs1.bits() & (1 << i)) != 0) {
1646 stack_offset -= kSystemPointerSize;
1647 St_d(ToRegister(i), MemOperand(sp, stack_offset));
1648 }
1649 }
1650 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1651 if ((regs2.bits() & (1 << i)) != 0) {
1652 stack_offset -= kSystemPointerSize;
1653 St_d(ToRegister(i), MemOperand(sp, stack_offset));
1654 }
1655 }
1656 addi_d(sp, sp, stack_offset);
1657}
1658
1659void MacroAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) {
1660 DCHECK((regs1 & regs2).is_empty());
1661 DCHECK((regs1 & regs3).is_empty());
1662 DCHECK((regs2 & regs3).is_empty());
1663 int16_t stack_offset = 0;
1664
1665 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1666 if ((regs1.bits() & (1 << i)) != 0) {
1667 stack_offset -= kSystemPointerSize;
1668 St_d(ToRegister(i), MemOperand(sp, stack_offset));
1669 }
1670 }
1671 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1672 if ((regs2.bits() & (1 << i)) != 0) {
1673 stack_offset -= kSystemPointerSize;
1674 St_d(ToRegister(i), MemOperand(sp, stack_offset));
1675 }
1676 }
1677 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1678 if ((regs3.bits() & (1 << i)) != 0) {
1679 stack_offset -= kSystemPointerSize;
1680 St_d(ToRegister(i), MemOperand(sp, stack_offset));
1681 }
1682 }
1683 addi_d(sp, sp, stack_offset);
1684}
1685
1687 int16_t stack_offset = 0;
1688
1689 for (int16_t i = 0; i < kNumRegisters; i++) {
1690 if ((regs.bits() & (1 << i)) != 0) {
1691 Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
1692 stack_offset += kSystemPointerSize;
1693 }
1694 }
1695 addi_d(sp, sp, stack_offset);
1696}
1697
1698void MacroAssembler::MultiPop(RegList regs1, RegList regs2) {
1699 DCHECK((regs1 & regs2).is_empty());
1700 int16_t stack_offset = 0;
1701
1702 for (int16_t i = 0; i < kNumRegisters; i++) {
1703 if ((regs2.bits() & (1 << i)) != 0) {
1704 Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
1705 stack_offset += kSystemPointerSize;
1706 }
1707 }
1708 for (int16_t i = 0; i < kNumRegisters; i++) {
1709 if ((regs1.bits() & (1 << i)) != 0) {
1710 Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
1711 stack_offset += kSystemPointerSize;
1712 }
1713 }
1714 addi_d(sp, sp, stack_offset);
1715}
1716
1717void MacroAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) {
1718 DCHECK((regs1 & regs2).is_empty());
1719 DCHECK((regs1 & regs3).is_empty());
1720 DCHECK((regs2 & regs3).is_empty());
1721 int16_t stack_offset = 0;
1722
1723 for (int16_t i = 0; i < kNumRegisters; i++) {
1724 if ((regs3.bits() & (1 << i)) != 0) {
1725 Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
1726 stack_offset += kSystemPointerSize;
1727 }
1728 }
1729 for (int16_t i = 0; i < kNumRegisters; i++) {
1730 if ((regs2.bits() & (1 << i)) != 0) {
1731 Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
1732 stack_offset += kSystemPointerSize;
1733 }
1734 }
1735 for (int16_t i = 0; i < kNumRegisters; i++) {
1736 if ((regs1.bits() & (1 << i)) != 0) {
1737 Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
1738 stack_offset += kSystemPointerSize;
1739 }
1740 }
1741 addi_d(sp, sp, stack_offset);
1742}
1743
1745 int16_t num_to_push = regs.Count();
1746 int16_t stack_offset = num_to_push * kDoubleSize;
1747
1748 Sub_d(sp, sp, Operand(stack_offset));
1749 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1750 if ((regs.bits() & (1 << i)) != 0) {
1751 stack_offset -= kDoubleSize;
1752 Fst_d(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1753 }
1754 }
1755}
1756
1758 int16_t stack_offset = 0;
1759
1760 for (int16_t i = 0; i < kNumRegisters; i++) {
1761 if ((regs.bits() & (1 << i)) != 0) {
1762 Fld_d(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1763 stack_offset += kDoubleSize;
1764 }
1765 }
1766 addi_d(sp, sp, stack_offset);
1767}
1768
1769void MacroAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw,
1770 uint16_t lsbw) {
1771 DCHECK_LT(lsbw, msbw);
1772 DCHECK_LT(lsbw, 32);
1773 DCHECK_LT(msbw, 32);
1774 bstrpick_w(rk, rj, msbw, lsbw);
1775}
1776
1777void MacroAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw,
1778 uint16_t lsbw) {
1779 DCHECK_LT(lsbw, msbw);
1780 DCHECK_LT(lsbw, 64);
1781 DCHECK_LT(msbw, 64);
1782 bstrpick_d(rk, rj, msbw, lsbw);
1783}
1784
1785void MacroAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); }
1786
1787void MacroAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); }
1788
1789void MacroAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) {
1790 BlockTrampolinePoolScope block_trampoline_pool(this);
1791 UseScratchRegisterScope temps(this);
1792 Register scratch = temps.Acquire();
1793 movfr2gr_s(scratch, fj);
1794 Ffint_d_uw(fd, scratch);
1795}
1796
1797void MacroAssembler::Ffint_d_uw(FPURegister fd, Register rj) {
1798 BlockTrampolinePoolScope block_trampoline_pool(this);
1799 UseScratchRegisterScope temps(this);
1800 Register scratch = temps.Acquire();
1801 DCHECK(rj != scratch);
1802
1803 Bstrpick_d(scratch, rj, 31, 0);
1804 movgr2fr_d(fd, scratch);
1805 ffint_d_l(fd, fd);
1806}
1807
1808void MacroAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) {
1809 BlockTrampolinePoolScope block_trampoline_pool(this);
1810 UseScratchRegisterScope temps(this);
1811 Register scratch = temps.Acquire();
1812 movfr2gr_d(scratch, fj);
1813 Ffint_d_ul(fd, scratch);
1814}
1815
1816void MacroAssembler::Ffint_d_ul(FPURegister fd, Register rj) {
1817 BlockTrampolinePoolScope block_trampoline_pool(this);
1818 UseScratchRegisterScope temps(this);
1819 Register scratch = temps.Acquire();
1820 DCHECK(rj != scratch);
1821
1822 Label msb_clear, conversion_done;
1823
1824 Branch(&msb_clear, ge, rj, Operand(zero_reg));
1825
1826 // Rj >= 2^63
1827 andi(scratch, rj, 1);
1828 srli_d(rj, rj, 1);
1829 or_(scratch, scratch, rj);
1830 movgr2fr_d(fd, scratch);
1831 ffint_d_l(fd, fd);
1832 fadd_d(fd, fd, fd);
1833 Branch(&conversion_done);
1834
1835 bind(&msb_clear);
1836 // Rs < 2^63, we can do simple conversion.
1837 movgr2fr_d(fd, rj);
1838 ffint_d_l(fd, fd);
1839
1840 bind(&conversion_done);
1841}
1842
1843void MacroAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) {
1844 BlockTrampolinePoolScope block_trampoline_pool(this);
1845 UseScratchRegisterScope temps(this);
1846 Register scratch = temps.Acquire();
1847 movfr2gr_d(scratch, fj);
1848 Ffint_s_uw(fd, scratch);
1849}
1850
1851void MacroAssembler::Ffint_s_uw(FPURegister fd, Register rj) {
1852 BlockTrampolinePoolScope block_trampoline_pool(this);
1853 UseScratchRegisterScope temps(this);
1854 Register scratch = temps.Acquire();
1855 DCHECK(rj != scratch);
1856
1857 bstrpick_d(scratch, rj, 31, 0);
1858 movgr2fr_d(fd, scratch);
1859 ffint_s_l(fd, fd);
1860}
1861
1862void MacroAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) {
1863 BlockTrampolinePoolScope block_trampoline_pool(this);
1864 UseScratchRegisterScope temps(this);
1865 Register scratch = temps.Acquire();
1866 movfr2gr_d(scratch, fj);
1867 Ffint_s_ul(fd, scratch);
1868}
1869
1870void MacroAssembler::Ffint_s_ul(FPURegister fd, Register rj) {
1871 BlockTrampolinePoolScope block_trampoline_pool(this);
1872 UseScratchRegisterScope temps(this);
1873 Register scratch = temps.Acquire();
1874 DCHECK(rj != scratch);
1875
1876 Label positive, conversion_done;
1877
1878 Branch(&positive, ge, rj, Operand(zero_reg));
1879
1880 // Rs >= 2^31.
1881 andi(scratch, rj, 1);
1882 srli_d(rj, rj, 1);
1883 or_(scratch, scratch, rj);
1884 movgr2fr_d(fd, scratch);
1885 ffint_s_l(fd, fd);
1886 fadd_s(fd, fd, fd);
1887 Branch(&conversion_done);
1888
1889 bind(&positive);
1890 // Rs < 2^31, we can do simple conversion.
1891 movgr2fr_d(fd, rj);
1892 ffint_s_l(fd, fd);
1893
1894 bind(&conversion_done);
1895}
1896
1897void MacroAssembler::Ftintrne_l_d(FPURegister fd, FPURegister fj) {
1898 ftintrne_l_d(fd, fj);
1899}
1900
1901void MacroAssembler::Ftintrm_l_d(FPURegister fd, FPURegister fj) {
1902 ftintrm_l_d(fd, fj);
1903}
1904
1905void MacroAssembler::Ftintrp_l_d(FPURegister fd, FPURegister fj) {
1906 ftintrp_l_d(fd, fj);
1907}
1908
1909void MacroAssembler::Ftintrz_l_d(FPURegister fd, FPURegister fj) {
1910 ftintrz_l_d(fd, fj);
1911}
1912
1913void MacroAssembler::Ftintrz_l_ud(FPURegister fd, FPURegister fj,
1914 FPURegister scratch) {
1915 fabs_d(scratch, fj);
1916 ftintrz_l_d(fd, scratch);
1917}
1918
1919void MacroAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj,
1920 FPURegister scratch) {
1921 UseScratchRegisterScope temps(this);
1922 Register scratch2 = temps.Acquire();
1923 Ftintrz_uw_d(scratch2, fj, scratch);
1924 movgr2fr_w(fd, scratch2);
1925}
1926
1927void MacroAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj,
1928 FPURegister scratch) {
1929 UseScratchRegisterScope temps(this);
1930 Register scratch2 = temps.Acquire();
1931 Ftintrz_uw_s(scratch2, fj, scratch);
1932 movgr2fr_w(fd, scratch2);
1933}
1934
1935void MacroAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj,
1936 FPURegister scratch, Register result) {
1937 UseScratchRegisterScope temps(this);
1938 Register scratch2 = temps.Acquire();
1939 Ftintrz_ul_d(scratch2, fj, scratch, result);
1940 movgr2fr_d(fd, scratch2);
1941}
1942
1943void MacroAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj,
1944 FPURegister scratch, Register result) {
1945 UseScratchRegisterScope temps(this);
1946 Register scratch2 = temps.Acquire();
1947 Ftintrz_ul_s(scratch2, fj, scratch, result);
1948 movgr2fr_d(fd, scratch2);
1949}
1950
1951void MacroAssembler::Ftintrz_w_d(FPURegister fd, FPURegister fj) {
1952 ftintrz_w_d(fd, fj);
1953}
1954
1955void MacroAssembler::Ftintrne_w_d(FPURegister fd, FPURegister fj) {
1956 ftintrne_w_d(fd, fj);
1957}
1958
1959void MacroAssembler::Ftintrm_w_d(FPURegister fd, FPURegister fj) {
1960 ftintrm_w_d(fd, fj);
1961}
1962
1963void MacroAssembler::Ftintrp_w_d(FPURegister fd, FPURegister fj) {
1964 ftintrp_w_d(fd, fj);
1965}
1966
1967void MacroAssembler::Ftintrz_uw_d(Register rd, FPURegister fj,
1968 FPURegister scratch) {
1969 DCHECK(fj != scratch);
1970
1971 {
1972 // Load 2^32 into scratch as its float representation.
1973 UseScratchRegisterScope temps(this);
1974 Register scratch1 = temps.Acquire();
1975 li(scratch1, 0x41F0000000000000);
1976 movgr2fr_d(scratch, scratch1);
1977 }
1978 // Test if scratch > fd.
1979 // If fd < 2^32 we can convert it normally.
1980 Label simple_convert;
1981 CompareF64(fj, scratch, CULT);
1982 BranchTrueShortF(&simple_convert);
1983
1984 // If fd > 2^32, the result should be UINT_32_MAX;
1985 Add_w(rd, zero_reg, -1);
1986
1987 Label done;
1988 Branch(&done);
1989 // Simple conversion.
1990 bind(&simple_convert);
1991 // Double -> Int64 -> Uint32;
1992 ftintrz_l_d(scratch, fj);
1993 movfr2gr_s(rd, scratch);
1994
1995 bind(&done);
1996}
1997
1998void MacroAssembler::Ftintrz_uw_s(Register rd, FPURegister fj,
1999 FPURegister scratch) {
2000 DCHECK(fj != scratch);
2001 {
2002 // Load 2^32 into scratch as its float representation.
2003 UseScratchRegisterScope temps(this);
2004 Register scratch1 = temps.Acquire();
2005 li(scratch1, 0x4F800000);
2006 movgr2fr_w(scratch, scratch1);
2007 }
2008 // Test if scratch > fs.
2009 // If fs < 2^32 we can convert it normally.
2010 Label simple_convert;
2011 CompareF32(fj, scratch, CULT);
2012 BranchTrueShortF(&simple_convert);
2013
2014 // If fd > 2^32, the result should be UINT_32_MAX;
2015 Add_w(rd, zero_reg, -1);
2016
2017 Label done;
2018 Branch(&done);
2019 // Simple conversion.
2020 bind(&simple_convert);
2021 // Float -> Int64 -> Uint32;
2022 ftintrz_l_s(scratch, fj);
2023 movfr2gr_s(rd, scratch);
2024
2025 bind(&done);
2026}
2027
2028void MacroAssembler::Ftintrz_ul_d(Register rd, FPURegister fj,
2029 FPURegister scratch, Register result) {
2030 UseScratchRegisterScope temps(this);
2031 Register scratch1 = temps.Acquire();
2032 DCHECK(fj != scratch);
2033 DCHECK(result.is_valid() ? !AreAliased(rd, result, scratch1)
2034 : !AreAliased(rd, scratch1));
2035
2036 Label simple_convert, done, fail;
2037 if (result.is_valid()) {
2038 mov(result, zero_reg);
2039 Move(scratch, -1.0);
2040 // If fd =< -1 or unordered, then the conversion fails.
2041 CompareF64(fj, scratch, CULE);
2042 BranchTrueShortF(&fail);
2043 }
2044
2045 // Load 2^63 into scratch as its double representation.
2046 li(scratch1, 0x43E0000000000000);
2047 movgr2fr_d(scratch, scratch1);
2048
2049 // Test if scratch > fs.
2050 // If fs < 2^63 or unordered we can convert it normally.
2051 CompareF64(fj, scratch, CULT);
2052 BranchTrueShortF(&simple_convert);
2053
2054 // First we subtract 2^63 from fs, then trunc it to rd
2055 // and add 2^63 to rd.
2056 fsub_d(scratch, fj, scratch);
2057 ftintrz_l_d(scratch, scratch);
2058 movfr2gr_d(rd, scratch);
2059 Or(rd, rd, Operand(1UL << 63));
2060 Branch(&done);
2061
2062 // Simple conversion.
2063 bind(&simple_convert);
2064 ftintrz_l_d(scratch, fj);
2065 movfr2gr_d(rd, scratch);
2066
2067 bind(&done);
2068 if (result.is_valid()) {
2069 // Conversion is failed if the result is negative.
2070 addi_d(scratch1, zero_reg, -1);
2071 srli_d(scratch1, scratch1, 1); // Load 2^62.
2072 movfr2gr_d(result, scratch);
2073 xor_(result, result, scratch1);
2074 Slt(result, zero_reg, result);
2075 }
2076
2077 bind(&fail);
2078}
2079
2080void MacroAssembler::Ftintrz_ul_s(Register rd, FPURegister fj,
2081 FPURegister scratch, Register result) {
2082 DCHECK(fj != scratch);
2083 DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7));
2084
2085 Label simple_convert, done, fail;
2086 if (result.is_valid()) {
2087 mov(result, zero_reg);
2088 Move(scratch, -1.0f);
2089 // If fd =< -1 or unordered, then the conversion fails.
2090 CompareF32(fj, scratch, CULE);
2091 BranchTrueShortF(&fail);
2092 }
2093
2094 {
2095 // Load 2^63 into scratch as its float representation.
2096 UseScratchRegisterScope temps(this);
2097 Register scratch1 = temps.Acquire();
2098 li(scratch1, 0x5F000000);
2099 movgr2fr_w(scratch, scratch1);
2100 }
2101
2102 // Test if scratch > fs.
2103 // If fs < 2^63 or unordered, we can convert it normally.
2104 CompareF32(fj, scratch, CULT);
2105 BranchTrueShortF(&simple_convert);
2106
2107 // First we subtract 2^63 from fs, then trunc it to rd
2108 // and add 2^63 to rd.
2109 fsub_s(scratch, fj, scratch);
2110 ftintrz_l_s(scratch, scratch);
2111 movfr2gr_d(rd, scratch);
2112 Or(rd, rd, Operand(1UL << 63));
2113 Branch(&done);
2114
2115 // Simple conversion.
2116 bind(&simple_convert);
2117 ftintrz_l_s(scratch, fj);
2118 movfr2gr_d(rd, scratch);
2119
2120 bind(&done);
2121 if (result.is_valid()) {
2122 // Conversion is failed if the result is negative or unordered.
2123 {
2124 UseScratchRegisterScope temps(this);
2125 Register scratch1 = temps.Acquire();
2126 addi_d(scratch1, zero_reg, -1);
2127 srli_d(scratch1, scratch1, 1); // Load 2^62.
2128 movfr2gr_d(result, scratch);
2129 xor_(result, result, scratch1);
2130 }
2131 Slt(result, zero_reg, result);
2132 }
2133
2134 bind(&fail);
2135}
2136
2137void MacroAssembler::RoundDouble(FPURegister dst, FPURegister src,
2138 FPURoundingMode mode) {
2139 BlockTrampolinePoolScope block_trampoline_pool(this);
2140 UseScratchRegisterScope temps(this);
2141 Register scratch = temps.Acquire();
2142 Register scratch2 = temps.Acquire();
2143 movfcsr2gr(scratch);
2144 li(scratch2, Operand(mode));
2145 movgr2fcsr(scratch2);
2146 frint_d(dst, src);
2147 movgr2fcsr(scratch);
2148}
2149
2150void MacroAssembler::Floor_d(FPURegister dst, FPURegister src) {
2151 RoundDouble(dst, src, mode_floor);
2152}
2153
2154void MacroAssembler::Ceil_d(FPURegister dst, FPURegister src) {
2155 RoundDouble(dst, src, mode_ceil);
2156}
2157
2158void MacroAssembler::Trunc_d(FPURegister dst, FPURegister src) {
2159 RoundDouble(dst, src, mode_trunc);
2160}
2161
2162void MacroAssembler::Round_d(FPURegister dst, FPURegister src) {
2163 RoundDouble(dst, src, mode_round);
2164}
2165
2166void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src,
2167 FPURoundingMode mode) {
2168 BlockTrampolinePoolScope block_trampoline_pool(this);
2169 UseScratchRegisterScope temps(this);
2170 Register scratch = temps.Acquire();
2171 Register scratch2 = temps.Acquire();
2172 movfcsr2gr(scratch);
2173 li(scratch2, Operand(mode));
2174 movgr2fcsr(scratch2);
2175 frint_s(dst, src);
2176 movgr2fcsr(scratch);
2177}
2178
2179void MacroAssembler::Floor_s(FPURegister dst, FPURegister src) {
2180 RoundFloat(dst, src, mode_floor);
2181}
2182
2183void MacroAssembler::Ceil_s(FPURegister dst, FPURegister src) {
2184 RoundFloat(dst, src, mode_ceil);
2185}
2186
2187void MacroAssembler::Trunc_s(FPURegister dst, FPURegister src) {
2188 RoundFloat(dst, src, mode_trunc);
2189}
2190
2191void MacroAssembler::Round_s(FPURegister dst, FPURegister src) {
2192 RoundFloat(dst, src, mode_round);
2193}
2194
2195void MacroAssembler::CompareF(FPURegister cmp1, FPURegister cmp2,
2196 FPUCondition cc, CFRegister cd, bool f32) {
2197 if (f32) {
2198 fcmp_cond_s(cc, cmp1, cmp2, cd);
2199 } else {
2200 fcmp_cond_d(cc, cmp1, cmp2, cd);
2201 }
2202}
2203
2204void MacroAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2,
2205 CFRegister cd, bool f32) {
2206 CompareF(cmp1, cmp2, CUN, cd, f32);
2207}
2208
2209void MacroAssembler::BranchTrueShortF(Label* target, CFRegister cj) {
2210 bcnez(cj, target);
2211}
2212
2213void MacroAssembler::BranchFalseShortF(Label* target, CFRegister cj) {
2214 bceqz(cj, target);
2215}
2216
2217void MacroAssembler::BranchTrueF(Label* target, CFRegister cj) {
2218 // TODO(yuyin): can be optimzed
2219 bool long_branch = target->is_bound()
2220 ? !is_near(target, OffsetSize::kOffset21)
2222 if (long_branch) {
2223 Label skip;
2224 BranchFalseShortF(&skip, cj);
2225 Branch(target);
2226 bind(&skip);
2227 } else {
2228 BranchTrueShortF(target, cj);
2229 }
2230}
2231
2232void MacroAssembler::BranchFalseF(Label* target, CFRegister cj) {
2233 bool long_branch = target->is_bound()
2234 ? !is_near(target, OffsetSize::kOffset21)
2236 if (long_branch) {
2237 Label skip;
2238 BranchTrueShortF(&skip, cj);
2239 Branch(target);
2240 bind(&skip);
2241 } else {
2242 BranchFalseShortF(target, cj);
2243 }
2244}
2245
2246void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
2247 UseScratchRegisterScope temps(this);
2248 Register scratch = temps.Acquire();
2249 DCHECK(src_low != scratch);
2250 movfrh2gr_s(scratch, dst);
2251 movgr2fr_w(dst, src_low);
2252 movgr2frh_w(dst, scratch);
2253}
2254
2255void MacroAssembler::Move(FPURegister dst, uint32_t src) {
2256 UseScratchRegisterScope temps(this);
2257 Register scratch = temps.Acquire();
2258 li(scratch, Operand(static_cast<int32_t>(src)));
2259 movgr2fr_w(dst, scratch);
2260}
2261
2262void MacroAssembler::Move(FPURegister dst, uint64_t src) {
2263 // Handle special values first.
2265 fmov_d(dst, kDoubleRegZero);
2266 } else if (src == base::bit_cast<uint64_t>(-0.0) &&
2268 Neg_d(dst, kDoubleRegZero);
2269 } else {
2270 UseScratchRegisterScope temps(this);
2271 Register scratch = temps.Acquire();
2272 li(scratch, Operand(static_cast<int64_t>(src)));
2273 movgr2fr_d(dst, scratch);
2274 if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true;
2275 }
2276}
2277
2278void MacroAssembler::Movz(Register rd, Register rj, Register rk) {
2279 UseScratchRegisterScope temps(this);
2280 Register scratch = temps.Acquire();
2281 masknez(scratch, rj, rk);
2282 maskeqz(rd, rd, rk);
2283 or_(rd, rd, scratch);
2284}
2285
2286void MacroAssembler::Movn(Register rd, Register rj, Register rk) {
2287 UseScratchRegisterScope temps(this);
2288 Register scratch = temps.Acquire();
2289 maskeqz(scratch, rj, rk);
2290 masknez(rd, rd, rk);
2291 or_(rd, rd, scratch);
2292}
2293
2295 Register condition) {
2296 masknez(dest, dest, condition);
2297}
2298
2300 Register condition) {
2301 maskeqz(dest, dest, condition);
2302}
2303
2305 UseScratchRegisterScope temps(this);
2306 Register scratch = temps.Acquire();
2307 movcf2gr(scratch, cc);
2308 LoadZeroIfConditionNotZero(dest, scratch);
2309}
2310
2312 UseScratchRegisterScope temps(this);
2313 Register scratch = temps.Acquire();
2314 movcf2gr(scratch, cc);
2315 LoadZeroIfConditionZero(dest, scratch);
2316}
2317
2318void MacroAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); }
2319
2320void MacroAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); }
2321
2322void MacroAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); }
2323
2324void MacroAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); }
2325
2326// TODO(LOONG_dev): Optimize like arm64, use simd instruction
2327void MacroAssembler::Popcnt_w(Register rd, Register rj) {
2328 ASM_CODE_COMMENT(this);
2329 // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
2330 //
2331 // A generalization of the best bit counting method to integers of
2332 // bit-widths up to 128 (parameterized by type T) is this:
2333 //
2334 // v = v - ((v >> 1) & (T)~(T)0/3); // temp
2335 // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
2336 // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
2337 // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
2338 //
2339 // There are algorithms which are faster in the cases where very few
2340 // bits are set but the algorithm here attempts to minimize the total
2341 // number of instructions executed even when a large number of bits
2342 // are set.
2343 int32_t B0 = 0x55555555; // (T)~(T)0/3
2344 int32_t B1 = 0x33333333; // (T)~(T)0/15*3
2345 int32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
2346 int32_t value = 0x01010101; // (T)~(T)0/255
2347 uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
2348
2349 UseScratchRegisterScope temps(this);
2350 BlockTrampolinePoolScope block_trampoline_pool(this);
2351 Register scratch = temps.Acquire();
2352 Register scratch2 = temps.Acquire();
2353 srli_w(scratch, rj, 1);
2354 li(scratch2, B0);
2355 And(scratch, scratch, scratch2);
2356 Sub_w(scratch, rj, scratch);
2357 li(scratch2, B1);
2358 And(rd, scratch, scratch2);
2359 srli_w(scratch, scratch, 2);
2360 And(scratch, scratch, scratch2);
2361 Add_w(scratch, rd, scratch);
2362 srli_w(rd, scratch, 4);
2363 Add_w(rd, rd, scratch);
2364 li(scratch2, B2);
2365 And(rd, rd, scratch2);
2366 li(scratch, value);
2367 Mul_w(rd, rd, scratch);
2368 srli_w(rd, rd, shift);
2369}
2370
2371void MacroAssembler::Popcnt_d(Register rd, Register rj) {
2372 ASM_CODE_COMMENT(this);
2373 int64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
2374 int64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
2375 int64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
2376 int64_t value = 0x0101010101010101l; // (T)~(T)0/255
2377 uint32_t shift = 56; // (sizeof(T) - 1) * BITS_PER_BYTE
2378
2379 UseScratchRegisterScope temps(this);
2380 BlockTrampolinePoolScope block_trampoline_pool(this);
2381 Register scratch = temps.Acquire();
2382 Register scratch2 = temps.Acquire();
2383 srli_d(scratch, rj, 1);
2384 li(scratch2, B0);
2385 And(scratch, scratch, scratch2);
2386 Sub_d(scratch, rj, scratch);
2387 li(scratch2, B1);
2388 And(rd, scratch, scratch2);
2389 srli_d(scratch, scratch, 2);
2390 And(scratch, scratch, scratch2);
2391 Add_d(scratch, rd, scratch);
2392 srli_d(rd, scratch, 4);
2393 Add_d(rd, rd, scratch);
2394 li(scratch2, B2);
2395 And(rd, rd, scratch2);
2396 li(scratch, value);
2397 Mul_d(rd, rd, scratch);
2398 srli_d(rd, rd, shift);
2399}
2400
2401void MacroAssembler::ExtractBits(Register dest, Register source, Register pos,
2402 int size, bool sign_extend) {
2403 sra_d(dest, source, pos);
2404 bstrpick_d(dest, dest, size - 1, 0);
2405 if (sign_extend) {
2406 switch (size) {
2407 case 8:
2408 ext_w_b(dest, dest);
2409 break;
2410 case 16:
2411 ext_w_h(dest, dest);
2412 break;
2413 case 32:
2414 // sign-extend word
2415 slli_w(dest, dest, 0);
2416 break;
2417 default:
2418 UNREACHABLE();
2419 }
2420 }
2421}
2422
2423void MacroAssembler::InsertBits(Register dest, Register source, Register pos,
2424 int size) {
2425 Rotr_d(dest, dest, pos);
2426 bstrins_d(dest, source, size - 1, 0);
2427 {
2428 UseScratchRegisterScope temps(this);
2429 Register scratch = temps.Acquire();
2430 Sub_d(scratch, zero_reg, pos);
2431 Rotr_d(dest, dest, scratch);
2432 }
2433}
2434
2436 DoubleRegister double_input,
2437 Label* done) {
2438 DoubleRegister single_scratch = kScratchDoubleReg;
2439 BlockTrampolinePoolScope block_trampoline_pool(this);
2440 UseScratchRegisterScope temps(this);
2441 Register scratch = temps.Acquire();
2442 Register scratch2 = temps.Acquire();
2443
2444 ftintrz_l_d(single_scratch, double_input);
2445 movfr2gr_d(scratch2, single_scratch);
2446 li(scratch, 1L << 63);
2447 Xor(scratch, scratch, scratch2);
2448 rotri_d(scratch2, scratch, 1);
2449 movfr2gr_s(result, single_scratch);
2450 Branch(done, ne, scratch, Operand(scratch2));
2451}
2452
2453void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
2454 Register result,
2455 DoubleRegister double_input,
2456 StubCallMode stub_mode) {
2457 Label done;
2458
2459 TryInlineTruncateDoubleToI(result, double_input, &done);
2460
2461 // If we fell through then inline version didn't succeed - call stub instead.
2462 Sub_d(sp, sp,
2463 Operand(kDoubleSize + kSystemPointerSize)); // Put input on stack.
2465 Fst_d(double_input, MemOperand(sp, 0));
2466
2467#if V8_ENABLE_WEBASSEMBLY
2468 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
2469 Call(static_cast<Address>(Builtin::kDoubleToI), RelocInfo::WASM_STUB_CALL);
2470#else
2471 // For balance.
2472 if (false) {
2473#endif // V8_ENABLE_WEBASSEMBLY
2474 } else {
2475 CallBuiltin(Builtin::kDoubleToI);
2476 }
2477
2478 Pop(ra, result);
2479 bind(&done);
2480}
2481
2482void MacroAssembler::CompareWord(Condition cond, Register dst, Register lhs,
2483 const Operand& rhs) {
2484 switch (cond) {
2485 case eq:
2486 case ne: {
2487 if (rhs.IsImmediate()) {
2488 if (rhs.immediate() == 0) {
2489 if (cond == eq) {
2490 Sltu(dst, lhs, 1);
2491 } else {
2492 Sltu(dst, zero_reg, lhs);
2493 }
2494 } else if (is_int12(-rhs.immediate())) {
2495 Add_d(dst, lhs, Operand(-rhs.immediate()));
2496 if (cond == eq) {
2497 Sltu(dst, dst, 1);
2498 } else {
2499 Sltu(dst, zero_reg, dst);
2500 }
2501 } else {
2502 Xor(dst, lhs, rhs);
2503 if (cond == eq) {
2504 Sltu(dst, dst, 1);
2505 } else {
2506 Sltu(dst, zero_reg, dst);
2507 }
2508 }
2509 } else {
2510 Xor(dst, lhs, rhs);
2511 if (cond == eq) {
2512 Sltu(dst, dst, 1);
2513 } else {
2514 Sltu(dst, zero_reg, dst);
2515 }
2516 }
2517 break;
2518 }
2519 case lt:
2520 Slt(dst, lhs, rhs);
2521 break;
2522 case gt:
2523 Sgt(dst, lhs, rhs);
2524 break;
2525 case le:
2526 Sle(dst, lhs, rhs);
2527 break;
2528 case ge:
2529 Sge(dst, lhs, rhs);
2530 break;
2531 case lo:
2532 Sltu(dst, lhs, rhs);
2533 break;
2534 case hs:
2535 Sgeu(dst, lhs, rhs);
2536 break;
2537 case hi:
2538 Sgtu(dst, lhs, rhs);
2539 break;
2540 case ls:
2541 Sleu(dst, lhs, rhs);
2542 break;
2543 default:
2544 UNREACHABLE();
2545 }
2546}
2547
2548// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2549#define BRANCH_ARGS_CHECK(cond, rj, rk) \
2550 DCHECK((cond == cc_always && rj == zero_reg && rk.rm() == zero_reg) || \
2551 (cond != cc_always && (rj != zero_reg || rk.rm() != zero_reg)))
2552
2553void MacroAssembler::Branch(Label* L, bool need_link) {
2554 int offset = GetOffset(L, OffsetSize::kOffset26);
2555 if (need_link) {
2556 bl(offset);
2557 } else {
2558 b(offset);
2559 }
2560}
2561
2562void MacroAssembler::Branch(Label* L, Condition cond, Register rj,
2563 const Operand& rk, bool need_link) {
2564 if (L->is_bound()) {
2565 BRANCH_ARGS_CHECK(cond, rj, rk);
2566 if (!BranchShortOrFallback(L, cond, rj, rk, need_link)) {
2567 if (cond != cc_always) {
2568 Label skip;
2569 Condition neg_cond = NegateCondition(cond);
2570 BranchShort(&skip, neg_cond, rj, rk, need_link);
2571 Branch(L, need_link);
2572 bind(&skip);
2573 } else {
2574 Branch(L);
2575 }
2576 }
2577 } else {
2578 if (is_trampoline_emitted()) {
2579 if (cond != cc_always) {
2580 Label skip;
2581 Condition neg_cond = NegateCondition(cond);
2582 BranchShort(&skip, neg_cond, rj, rk, need_link);
2583 Branch(L, need_link);
2584 bind(&skip);
2585 } else {
2586 Branch(L);
2587 }
2588 } else {
2589 BranchShort(L, cond, rj, rk, need_link);
2590 }
2591 }
2592}
2593
2594void MacroAssembler::Branch(Label* L, Condition cond, Register rj,
2595 RootIndex index, bool need_sign_extend) {
2596 UseScratchRegisterScope temps(this);
2597 Register right = temps.Acquire();
2599 Register left = rj;
2600 if (need_sign_extend) {
2601 left = temps.Acquire();
2602 slli_w(left, rj, 0);
2603 }
2604 LoadTaggedRoot(right, index);
2605 Branch(L, cond, left, Operand(right));
2606 } else {
2607 LoadRoot(right, index);
2608 Branch(L, cond, rj, Operand(right));
2609 }
2610}
2611
2613 return branch_offset_helper(L, bits) >> 2;
2614}
2615
2617 Register scratch) {
2618 Register r2 = no_reg;
2619 if (rk.is_reg()) {
2620 r2 = rk.rm();
2621 } else {
2622 r2 = scratch;
2623 li(r2, rk);
2624 }
2625
2626 return r2;
2627}
2628
2630 Register rj, const Operand& rk,
2631 bool need_link) {
2632 UseScratchRegisterScope temps(this);
2633 BlockTrampolinePoolScope block_trampoline_pool(this);
2634 Register scratch = temps.Acquire();
2635 DCHECK_NE(rj, zero_reg);
2636
2637 // Be careful to always use shifted_branch_offset only just before the
2638 // branch instruction, as the location will be remember for patching the
2639 // target.
2640 {
2641 BlockTrampolinePoolScope block_trampoline_pool(this);
2642 int offset = 0;
2643 switch (cond) {
2644 case cc_always:
2645 if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
2646 offset = GetOffset(L, OffsetSize::kOffset26);
2647 if (need_link) {
2648 bl(offset);
2649 } else {
2650 b(offset);
2651 }
2652 break;
2653 case eq:
2654 if (rk.is_reg() && rj.code() == rk.rm().code()) {
2655 // beq is used here to make the code patchable. Otherwise b should
2656 // be used which has no condition field so is not patchable.
2657 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2658 if (need_link) pcaddi(ra, 2);
2659 offset = GetOffset(L, OffsetSize::kOffset16);
2660 beq(rj, rj, offset);
2661 } else if (IsZero(rk)) {
2662 if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false;
2663 if (need_link) pcaddi(ra, 2);
2664 offset = GetOffset(L, OffsetSize::kOffset21);
2665 beqz(rj, offset);
2666 } else {
2667 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2668 if (need_link) pcaddi(ra, 2);
2669 // We don't want any other register but scratch clobbered.
2670 Register sc = GetRkAsRegisterHelper(rk, scratch);
2671 offset = GetOffset(L, OffsetSize::kOffset16);
2672 beq(rj, sc, offset);
2673 }
2674 break;
2675 case ne:
2676 if (rk.is_reg() && rj.code() == rk.rm().code()) {
2677 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2678 if (need_link) pcaddi(ra, 2);
2679 // bne is used here to make the code patchable. Otherwise we
2680 // should not generate any instruction.
2681 offset = GetOffset(L, OffsetSize::kOffset16);
2682 bne(rj, rj, offset);
2683 } else if (IsZero(rk)) {
2684 if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false;
2685 if (need_link) pcaddi(ra, 2);
2686 offset = GetOffset(L, OffsetSize::kOffset21);
2687 bnez(rj, offset);
2688 } else {
2689 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2690 if (need_link) pcaddi(ra, 2);
2691 // We don't want any other register but scratch clobbered.
2692 Register sc = GetRkAsRegisterHelper(rk, scratch);
2693 offset = GetOffset(L, OffsetSize::kOffset16);
2694 bne(rj, sc, offset);
2695 }
2696 break;
2697
2698 // Signed comparison.
2699 case greater:
2700 // rj > rk
2701 if (rk.is_reg() && rj.code() == rk.rm().code()) {
2702 // No code needs to be emitted.
2703 } else if (IsZero(rk)) {
2704 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2705 if (need_link) pcaddi(ra, 2);
2706 offset = GetOffset(L, OffsetSize::kOffset16);
2707 blt(zero_reg, rj, offset);
2708 } else {
2709 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2710 if (need_link) pcaddi(ra, 2);
2711 Register sc = GetRkAsRegisterHelper(rk, scratch);
2712 DCHECK(rj != sc);
2713 offset = GetOffset(L, OffsetSize::kOffset16);
2714 blt(sc, rj, offset);
2715 }
2716 break;
2717 case greater_equal:
2718 // rj >= rk
2719 if (rk.is_reg() && rj.code() == rk.rm().code()) {
2720 if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
2721 if (need_link) pcaddi(ra, 2);
2722 offset = GetOffset(L, OffsetSize::kOffset26);
2723 b(offset);
2724 } else if (IsZero(rk)) {
2725 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2726 if (need_link) pcaddi(ra, 2);
2727 offset = GetOffset(L, OffsetSize::kOffset16);
2728 bge(rj, zero_reg, offset);
2729 } else {
2730 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2731 if (need_link) pcaddi(ra, 2);
2732 Register sc = GetRkAsRegisterHelper(rk, scratch);
2733 DCHECK(rj != sc);
2734 offset = GetOffset(L, OffsetSize::kOffset16);
2735 bge(rj, sc, offset);
2736 }
2737 break;
2738 case less:
2739 // rj < rk
2740 if (rk.is_reg() && rj.code() == rk.rm().code()) {
2741 // No code needs to be emitted.
2742 } else if (IsZero(rk)) {
2743 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2744 if (need_link) pcaddi(ra, 2);
2745 offset = GetOffset(L, OffsetSize::kOffset16);
2746 blt(rj, zero_reg, offset);
2747 } else {
2748 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2749 if (need_link) pcaddi(ra, 2);
2750 Register sc = GetRkAsRegisterHelper(rk, scratch);
2751 DCHECK(rj != sc);
2752 offset = GetOffset(L, OffsetSize::kOffset16);
2753 blt(rj, sc, offset);
2754 }
2755 break;
2756 case less_equal:
2757 // rj <= rk
2758 if (rk.is_reg() && rj.code() == rk.rm().code()) {
2759 if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
2760 if (need_link) pcaddi(ra, 2);
2761 offset = GetOffset(L, OffsetSize::kOffset26);
2762 b(offset);
2763 } else if (IsZero(rk)) {
2764 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2765 if (need_link) pcaddi(ra, 2);
2766 offset = GetOffset(L, OffsetSize::kOffset16);
2767 bge(zero_reg, rj, offset);
2768 } else {
2769 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2770 if (need_link) pcaddi(ra, 2);
2771 Register sc = GetRkAsRegisterHelper(rk, scratch);
2772 DCHECK(rj != sc);
2773 offset = GetOffset(L, OffsetSize::kOffset16);
2774 bge(sc, rj, offset);
2775 }
2776 break;
2777
2778 // Unsigned comparison.
2779 case Ugreater:
2780 // rj > rk
2781 if (rk.is_reg() && rj.code() == rk.rm().code()) {
2782 // No code needs to be emitted.
2783 } else if (IsZero(rk)) {
2784 if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
2785 if (need_link) pcaddi(ra, 2);
2786 offset = GetOffset(L, OffsetSize::kOffset26);
2787 bnez(rj, offset);
2788 } else {
2789 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2790 if (need_link) pcaddi(ra, 2);
2791 Register sc = GetRkAsRegisterHelper(rk, scratch);
2792 DCHECK(rj != sc);
2793 offset = GetOffset(L, OffsetSize::kOffset16);
2794 bltu(sc, rj, offset);
2795 }
2796 break;
2797 case Ugreater_equal:
2798 // rj >= rk
2799 if (rk.is_reg() && rj.code() == rk.rm().code()) {
2800 if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
2801 if (need_link) pcaddi(ra, 2);
2802 offset = GetOffset(L, OffsetSize::kOffset26);
2803 b(offset);
2804 } else if (IsZero(rk)) {
2805 if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
2806 if (need_link) pcaddi(ra, 2);
2807 offset = GetOffset(L, OffsetSize::kOffset26);
2808 b(offset);
2809 } else {
2810 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2811 if (need_link) pcaddi(ra, 2);
2812 Register sc = GetRkAsRegisterHelper(rk, scratch);
2813 DCHECK(rj != sc);
2814 offset = GetOffset(L, OffsetSize::kOffset16);
2815 bgeu(rj, sc, offset);
2816 }
2817 break;
2818 case Uless:
2819 // rj < rk
2820 if (rk.is_reg() && rj.code() == rk.rm().code()) {
2821 // No code needs to be emitted.
2822 } else if (IsZero(rk)) {
2823 // No code needs to be emitted.
2824 } else {
2825 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2826 if (need_link) pcaddi(ra, 2);
2827 Register sc = GetRkAsRegisterHelper(rk, scratch);
2828 DCHECK(rj != sc);
2829 offset = GetOffset(L, OffsetSize::kOffset16);
2830 bltu(rj, sc, offset);
2831 }
2832 break;
2833 case Uless_equal:
2834 // rj <= rk
2835 if (rk.is_reg() && rj.code() == rk.rm().code()) {
2836 if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
2837 if (need_link) pcaddi(ra, 2);
2838 offset = GetOffset(L, OffsetSize::kOffset26);
2839 b(offset);
2840 } else if (IsZero(rk)) {
2841 if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false;
2842 if (need_link) pcaddi(ra, 2);
2843 beqz(rj, L);
2844 } else {
2845 if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
2846 if (need_link) pcaddi(ra, 2);
2847 Register sc = GetRkAsRegisterHelper(rk, scratch);
2848 DCHECK(rj != sc);
2849 offset = GetOffset(L, OffsetSize::kOffset16);
2850 bgeu(sc, rj, offset);
2851 }
2852 break;
2853 default:
2854 UNREACHABLE();
2855 }
2856 }
2857 return true;
2858}
2859
2860void MacroAssembler::BranchShort(Label* L, Condition cond, Register rj,
2861 const Operand& rk, bool need_link) {
2862 BRANCH_ARGS_CHECK(cond, rj, rk);
2863 bool result = BranchShortOrFallback(L, cond, rj, rk, need_link);
2864 DCHECK(result);
2865 USE(result);
2866}
2867
2869 Register r1, const Operand& r2,
2870 bool need_link) {
2872 UseScratchRegisterScope temps(this);
2873 Register scratch0 = temps.Acquire();
2874 slli_w(scratch0, r1, 0);
2875 if (IsZero(r2)) {
2876 Branch(label, cond, scratch0, Operand(zero_reg), need_link);
2877 } else {
2878 Register scratch1 = temps.Acquire();
2879 if (r2.is_reg()) {
2880 slli_w(scratch1, r2.rm(), 0);
2881 } else {
2882 li(scratch1, r2);
2883 }
2884 Branch(label, cond, scratch0, Operand(scratch1), need_link);
2885 }
2886 } else {
2887 Branch(label, cond, r1, r2, need_link);
2888 }
2889}
2890
2891void MacroAssembler::LoadLabelRelative(Register dest, Label* target) {
2892 ASM_CODE_COMMENT(this);
2893 // pcaddi could handle 22-bit pc offset.
2894 int32_t offset = branch_offset_helper(target, OffsetSize::kOffset20);
2895 DCHECK(is_int22(offset));
2896 pcaddi(dest, offset >> 2);
2897}
2898
2900 int constant_index) {
2901 ASM_CODE_COMMENT(this);
2902 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
2903 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
2906 constant_index)));
2907}
2908
2909void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
2911}
2912
2913void MacroAssembler::StoreRootRelative(int32_t offset, Register value) {
2915}
2916
2918 intptr_t offset) {
2919 if (offset == 0) {
2921 } else {
2922 Add_d(destination, kRootRegister, Operand(offset));
2923 }
2924}
2925
2927 ExternalReference reference, Register scratch) {
2928 if (root_array_available()) {
2929 if (reference.IsIsolateFieldId()) {
2930 return MemOperand(kRootRegister, reference.offset_from_root_register());
2931 }
2932 if (options().enable_root_relative_access) {
2933 int64_t offset =
2935 if (is_int32(offset)) {
2936 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
2937 }
2938 }
2939 if (options().isolate_independent_code) {
2940 if (IsAddressableThroughRootRegister(isolate(), reference)) {
2941 // Some external references can be efficiently loaded as an offset from
2942 // kRootRegister.
2943 intptr_t offset =
2945 CHECK(is_int32(offset));
2946 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
2947 } else {
2948 // Otherwise, do a memory load from the external reference table.
2949 DCHECK(scratch.is_valid());
2950 Ld_d(scratch,
2953 isolate(), reference)));
2954 return MemOperand(scratch, 0);
2955 }
2956 }
2957 }
2958 DCHECK(scratch.is_valid());
2959 li(scratch, reference);
2960 return MemOperand(scratch, 0);
2961}
2962
2964 return is_int28(offset);
2965}
2966
2967// The calculated offset is either:
2968// * the 'target' input unmodified if this is a Wasm call, or
2969// * the offset of the target from the current PC, in instructions, for any
2970// other type of call.
2971// static
2973 RelocInfo::Mode rmode,
2974 uint8_t* pc) {
2975 int64_t offset = static_cast<int64_t>(target);
2976 if (rmode == RelocInfo::WASM_CALL || rmode == RelocInfo::WASM_STUB_CALL) {
2977 // The target of WebAssembly calls is still an index instead of an actual
2978 // address at this point, and needs to be encoded as-is.
2979 return offset;
2980 }
2981 offset -= reinterpret_cast<int64_t>(pc);
2983 return offset;
2984}
2985
2986void MacroAssembler::Jump(Register target, Condition cond, Register rj,
2987 const Operand& rk) {
2988 BlockTrampolinePoolScope block_trampoline_pool(this);
2989 if (cond == cc_always) {
2990 jirl(zero_reg, target, 0);
2991 } else {
2992 BRANCH_ARGS_CHECK(cond, rj, rk);
2993 Label skip;
2994 Branch(&skip, NegateCondition(cond), rj, rk);
2995 jirl(zero_reg, target, 0);
2996 bind(&skip);
2997 }
2998}
2999
3000void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
3001 Condition cond, Register rj, const Operand& rk) {
3002 Label skip;
3003 if (cond != cc_always) {
3004 Branch(&skip, NegateCondition(cond), rj, rk);
3005 }
3006 {
3007 BlockTrampolinePoolScope block_trampoline_pool(this);
3008 UseScratchRegisterScope temps(this);
3009 Register scratch = temps.Acquire();
3010 li(scratch, Operand(target, rmode));
3011 jirl(zero_reg, scratch, 0);
3012 bind(&skip);
3013 }
3014}
3015
3016void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
3017 Register rj, const Operand& rk) {
3018 Jump(static_cast<intptr_t>(target), rmode, cond, rj, rk);
3019}
3020
3022 Condition cond, Register rj, const Operand& rk) {
3024 BlockTrampolinePoolScope block_trampoline_pool(this);
3025 Label skip;
3026 if (cond != cc_always) {
3027 BranchShort(&skip, NegateCondition(cond), rj, rk);
3028 }
3029
3031 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
3032 TailCallBuiltin(builtin);
3033 bind(&skip);
3034 return;
3035 }
3036
3037 int32_t target_index = AddCodeTarget(code);
3038 Jump(static_cast<Address>(target_index), rmode, cc_always, rj, rk);
3039 bind(&skip);
3040}
3041
3042void MacroAssembler::Jump(const ExternalReference& reference) {
3043 UseScratchRegisterScope temps(this);
3044 Register scratch = temps.Acquire();
3045 li(scratch, reference);
3046 Jump(scratch);
3047}
3048
3049// Note: To call gcc-compiled C code on loonarch, you must call through t[0-8].
3050void MacroAssembler::Call(Register target, Condition cond, Register rj,
3051 const Operand& rk) {
3052 BlockTrampolinePoolScope block_trampoline_pool(this);
3053 if (cond == cc_always) {
3054 jirl(ra, target, 0);
3055 } else {
3056 BRANCH_ARGS_CHECK(cond, rj, rk);
3057 Label skip;
3058 Branch(&skip, NegateCondition(cond), rj, rk);
3059 jirl(ra, target, 0);
3060 bind(&skip);
3061 }
3063}
3064
3065void MacroAssembler::CompareTaggedRootAndBranch(const Register& obj,
3066 RootIndex index, Condition cc,
3067 Label* target) {
3068 ASM_CODE_COMMENT(this);
3069 // AssertSmiOrHeapObjectInMainCompressionCage(obj);
3070 UseScratchRegisterScope temps(this);
3072 CompareTaggedAndBranch(target, cc, obj, Operand(ReadOnlyRootPtr(index)));
3073 return;
3074 }
3075 // Some smi roots contain system pointer size values like stack limits.
3078 Register temp = temps.Acquire();
3079 DCHECK(!AreAliased(obj, temp));
3080 LoadRoot(temp, index);
3081 CompareTaggedAndBranch(target, cc, obj, Operand(temp));
3082}
3083
3084// Compare the object in a register to a value from the root list.
3085void MacroAssembler::CompareRootAndBranch(const Register& obj, RootIndex index,
3086 Condition cc, Label* target,
3087 ComparisonMode mode) {
3088 ASM_CODE_COMMENT(this);
3089 if (mode == ComparisonMode::kFullPointer ||
3092 // Some smi roots contain system pointer size values like stack limits.
3093 UseScratchRegisterScope temps(this);
3094 Register temp = temps.Acquire();
3095 DCHECK(!AreAliased(obj, temp));
3096 LoadRoot(temp, index);
3097 Branch(target, cc, obj, Operand(temp));
3098 return;
3099 }
3100 CompareTaggedRootAndBranch(obj, index, cc, target);
3101}
3102
3103void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
3104 unsigned higher_limit,
3105 Label* on_in_range) {
3106 ASM_CODE_COMMENT(this);
3107 if (lower_limit != 0) {
3108 UseScratchRegisterScope temps(this);
3109 Register scratch = temps.Acquire();
3110 Sub_d(scratch, value, Operand(lower_limit));
3111 Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit));
3112 } else {
3113 Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit));
3114 }
3115}
3116
3117void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
3118 Register rj, const Operand& rk) {
3119 BlockTrampolinePoolScope block_trampoline_pool(this);
3120 Label skip;
3121 if (cond != cc_always) {
3122 BranchShort(&skip, NegateCondition(cond), rj, rk);
3123 }
3124 intptr_t offset_diff = target - pc_offset();
3125 if (RelocInfo::IsNoInfo(rmode) && is_int28(offset_diff)) {
3126 bl(offset_diff >> 2);
3127 } else if (RelocInfo::IsNoInfo(rmode) && is_int38(offset_diff)) {
3128 UseScratchRegisterScope temps(this);
3129 Register scratch = temps.Acquire();
3130 pcaddu18i(scratch, static_cast<int32_t>(offset_diff) >> 18);
3131 jirl(ra, scratch, (offset_diff & 0x3ffff) >> 2);
3132 } else {
3133 UseScratchRegisterScope temps(this);
3134 Register scratch = temps.Acquire();
3135 li(scratch, Operand(static_cast<int64_t>(target), rmode), ADDRESS_LOAD);
3136 Call(scratch, cc_always, rj, rk);
3137 }
3138 bind(&skip);
3139}
3140
3142 Condition cond, Register rj, const Operand& rk) {
3143 BlockTrampolinePoolScope block_trampoline_pool(this);
3145 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
3146 CallBuiltin(builtin);
3147 return;
3148 }
3149
3151 int32_t target_index = AddCodeTarget(code);
3152 Call(static_cast<Address>(target_index), rmode, cond, rj, rk);
3153}
3154
3155void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index,
3156 Register target) {
3157 ASM_CODE_COMMENT(this);
3158 static_assert(kSystemPointerSize == 8);
3159 static_assert(kSmiTagSize == 1);
3160 static_assert(kSmiTag == 0);
3161
3162 // The builtin_index register contains the builtin index as a Smi.
3163 SmiUntag(target, builtin_index);
3165 Ld_d(target, MemOperand(target, IsolateData::builtin_entry_table_offset()));
3166}
3167
3169 Register destination) {
3171}
3176}
3177
3178void MacroAssembler::CallBuiltinByIndex(Register builtin_index,
3179 Register target) {
3180 ASM_CODE_COMMENT(this);
3181 LoadEntryFromBuiltinIndex(builtin_index, target);
3182 Call(target);
3183}
3184
3187 UseScratchRegisterScope temps(this);
3188 Register temp = temps.Acquire();
3189 switch (options().builtin_call_jump_mode) {
3191 li(temp, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
3192 Call(temp);
3193 break;
3194 }
3197 bl(static_cast<int>(builtin));
3199 break;
3200 }
3202 LoadEntryFromBuiltin(builtin, temp);
3203 Call(temp);
3204 break;
3205 }
3207 if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
3208 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
3209 int32_t code_target_index = AddCodeTarget(code);
3211 bl(code_target_index);
3213 } else {
3214 LoadEntryFromBuiltin(builtin, temp);
3215 Call(temp);
3216 }
3217 break;
3218 }
3219 }
3220}
3221
3223 Register type, Operand range) {
3224 if (cond != cc_always) {
3225 Label done;
3226 Branch(&done, NegateCondition(cond), type, range);
3227 TailCallBuiltin(builtin);
3228 bind(&done);
3229 } else {
3230 TailCallBuiltin(builtin);
3231 }
3232}
3233
3236 CommentForOffHeapTrampoline("tail call", builtin));
3237 UseScratchRegisterScope temps(this);
3238 Register temp = temps.Acquire();
3239
3240 switch (options().builtin_call_jump_mode) {
3242 li(temp, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
3243 Jump(temp);
3244 break;
3245 }
3247 LoadEntryFromBuiltin(builtin, temp);
3248 Jump(temp);
3249 break;
3250 }
3253 b(static_cast<int>(builtin));
3255 break;
3256 }
3258 if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
3259 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
3260 int32_t code_target_index = AddCodeTarget(code);
3262 b(code_target_index);
3263 } else {
3264 LoadEntryFromBuiltin(builtin, temp);
3265 Jump(temp);
3266 }
3267 break;
3268 }
3269 }
3270}
3271
3272void MacroAssembler::StoreReturnAddressAndCall(Register target) {
3273 ASM_CODE_COMMENT(this);
3274 // This generates the final instruction sequence for calls to C functions
3275 // once an exit frame has been constructed.
3276 //
3277 // Note that this assumes the caller code (i.e. the InstructionStream object
3278 // currently being generated) is immovable or that the callee function cannot
3279 // trigger GC, since the callee function will return to it.
3280
3281 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
3282 static constexpr int kNumInstructionsToJump = 2;
3283 Label find_ra;
3284 // Adjust the value in ra to point to the correct return location, 2nd
3285 // instruction past the real call into C code (the jirl)), and push it.
3286 // This is the return address of the exit frame.
3287 pcaddi(ra, kNumInstructionsToJump + 1);
3288 bind(&find_ra);
3289
3290 // This spot was reserved in EnterExitFrame.
3291 St_d(ra, MemOperand(sp, 0));
3292 // Stack is still aligned.
3293
3294 // TODO(LOONG_dev): can be jirl target? a0 -- a7?
3295 jirl(zero_reg, target, 0);
3296 // Make sure the stored 'ra' points to this position.
3297 DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
3298}
3299
3300void MacroAssembler::DropArguments(Register count) {
3301 Alsl_d(sp, count, sp, kSystemPointerSizeLog2);
3302}
3303
3305 Register receiver) {
3306 DCHECK(!AreAliased(argc, receiver));
3307 DropArguments(argc);
3308 Push(receiver);
3309}
3310
3311void MacroAssembler::Ret(Condition cond, Register rj, const Operand& rk) {
3312 Jump(ra, cond, rj, rk);
3313}
3314
3315void MacroAssembler::Drop(int count, Condition cond, Register reg,
3316 const Operand& op) {
3317 if (count <= 0) {
3318 return;
3319 }
3320
3321 Label skip;
3322
3323 if (cond != al) {
3324 Branch(&skip, NegateCondition(cond), reg, op);
3325 }
3326
3327 Add_d(sp, sp, Operand(count * kSystemPointerSize));
3328
3329 if (cond != al) {
3330 bind(&skip);
3331 }
3332}
3333
3334void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
3335 if (scratch == no_reg) {
3336 Xor(reg1, reg1, Operand(reg2));
3337 Xor(reg2, reg2, Operand(reg1));
3338 Xor(reg1, reg1, Operand(reg2));
3339 } else {
3340 mov(scratch, reg1);
3341 mov(reg1, reg2);
3342 mov(reg2, scratch);
3343 }
3344}
3345
3346void MacroAssembler::Call(Label* target) { Branch(target, true); }
3347
3349 UseScratchRegisterScope temps(this);
3350 Register scratch = temps.Acquire();
3351 li(scratch, Operand(smi));
3352 Push(scratch);
3353}
3354
3355void MacroAssembler::Push(Handle<HeapObject> handle) {
3356 UseScratchRegisterScope temps(this);
3357 Register scratch = temps.Acquire();
3358 li(scratch, Operand(handle));
3359 Push(scratch);
3360}
3361
3362void MacroAssembler::PushArray(Register array, Register size, Register scratch,
3363 Register scratch2, PushArrayOrder order) {
3364 DCHECK(!AreAliased(array, size, scratch, scratch2));
3365 Label loop, entry;
3366 if (order == PushArrayOrder::kReverse) {
3367 mov(scratch, zero_reg);
3368 jmp(&entry);
3369 bind(&loop);
3370 Alsl_d(scratch2, scratch, array, kSystemPointerSizeLog2);
3371 Ld_d(scratch2, MemOperand(scratch2, 0));
3372 Push(scratch2);
3373 Add_d(scratch, scratch, Operand(1));
3374 bind(&entry);
3375 Branch(&loop, less, scratch, Operand(size));
3376 } else {
3377 mov(scratch, size);
3378 jmp(&entry);
3379 bind(&loop);
3380 Alsl_d(scratch2, scratch, array, kSystemPointerSizeLog2);
3381 Ld_d(scratch2, MemOperand(scratch2, 0));
3382 Push(scratch2);
3383 bind(&entry);
3384 Add_d(scratch, scratch, Operand(-1));
3385 Branch(&loop, greater_equal, scratch, Operand(zero_reg));
3386 }
3387}
3388
3389// ---------------------------------------------------------------------------
3390// Exception handling.
3391
3393 // Adjust this code if not the case.
3396
3397 Push(Smi::zero()); // Padding.
3398
3399 // Link the current handler as the next handler.
3400 li(t2,
3401 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
3402 Ld_d(t1, MemOperand(t2, 0));
3403 Push(t1);
3404
3405 // Set this new handler as the current one.
3406 St_d(sp, MemOperand(t2, 0));
3407}
3408
3410 static_assert(StackHandlerConstants::kNextOffset == 0);
3411 Pop(a1);
3412 Add_d(sp, sp,
3413 Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
3415 UseScratchRegisterScope temps(this);
3416 Register scratch = temps.Acquire();
3417 li(scratch,
3418 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
3419 St_d(a1, MemOperand(scratch, 0));
3420}
3421
3423 const DoubleRegister src) {
3424 fsub_d(dst, src, kDoubleRegZero);
3425}
3426
3427// -----------------------------------------------------------------------------
3428// JavaScript invokes.
3429
3431 ASM_CODE_COMMENT(this);
3435 : IsolateData::jslimit_offset();
3436
3438}
3439
3440void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
3441 Register scratch2,
3442 Label* stack_overflow) {
3443 ASM_CODE_COMMENT(this);
3444 // Check the stack for overflow. We are not trying to catch
3445 // interruptions (e.g. debug break and preemption) here, so the "real stack
3446 // limit" is checked.
3447
3449 // Make scratch1 the space we have left. The stack might already be overflowed
3450 // here which will cause scratch1 to become negative.
3451 sub_d(scratch1, sp, scratch1);
3452 // Check if the arguments will overflow the stack.
3453 slli_d(scratch2, num_args, kSystemPointerSizeLog2);
3454 // Signed comparison.
3455 Branch(stack_overflow, le, scratch1, Operand(scratch2));
3456}
3457
3459 Register code_data_container, Register scratch, Condition cond,
3460 Label* target) {
3461 Ld_wu(scratch, FieldMemOperand(code_data_container, Code::kFlagsOffset));
3462 And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
3463 Branch(target, cond, scratch, Operand(zero_reg));
3464}
3465
3466Operand MacroAssembler::ClearedValue() const {
3467 return Operand(static_cast<int32_t>(i::ClearedValue(isolate()).ptr()));
3468}
3469
3470void MacroAssembler::InvokePrologue(Register expected_parameter_count,
3471 Register actual_parameter_count,
3472 InvokeType type) {
3473 ASM_CODE_COMMENT(this);
3474 Label regular_invoke;
3475
3476 // a0: actual arguments count
3477 // a1: function (passed through to callee)
3478 // a2: expected arguments count
3479
3480 DCHECK_EQ(actual_parameter_count, a0);
3481 DCHECK_EQ(expected_parameter_count, a2);
3482
3483 // If overapplication or if the actual argument count is equal to the
3484 // formal parameter count, no need to push extra undefined values.
3485 sub_d(expected_parameter_count, expected_parameter_count,
3486 actual_parameter_count);
3487 Branch(&regular_invoke, le, expected_parameter_count, Operand(zero_reg));
3488
3489 Label stack_overflow;
3490 StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
3491 // Underapplication. Move the arguments already in the stack, including the
3492 // receiver and the return address.
3493 {
3494 Label copy;
3495 Register src = a6, dest = a7;
3496 mov(src, sp);
3497 slli_d(t0, expected_parameter_count, kSystemPointerSizeLog2);
3498 Sub_d(sp, sp, Operand(t0));
3499 // Update stack pointer.
3500 mov(dest, sp);
3501 mov(t0, actual_parameter_count);
3502 bind(&copy);
3503 Ld_d(t1, MemOperand(src, 0));
3504 St_d(t1, MemOperand(dest, 0));
3505 Sub_d(t0, t0, Operand(1));
3506 Add_d(src, src, Operand(kSystemPointerSize));
3507 Add_d(dest, dest, Operand(kSystemPointerSize));
3508 Branch(&copy, gt, t0, Operand(zero_reg));
3509 }
3510
3511 // Fill remaining expected arguments with undefined values.
3512 LoadRoot(t0, RootIndex::kUndefinedValue);
3513 {
3514 Label loop;
3515 bind(&loop);
3516 St_d(t0, MemOperand(a7, 0));
3517 Sub_d(expected_parameter_count, expected_parameter_count, Operand(1));
3518 Add_d(a7, a7, Operand(kSystemPointerSize));
3519 Branch(&loop, gt, expected_parameter_count, Operand(zero_reg));
3520 }
3521 b(&regular_invoke);
3522
3523 bind(&stack_overflow);
3524 {
3525 FrameScope frame(
3526 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
3527 CallRuntime(Runtime::kThrowStackOverflow);
3528 break_(0xCC);
3529 }
3530
3531 bind(&regular_invoke);
3532}
3533
3535 Register fun, Register new_target,
3536 Register expected_parameter_count_or_dispatch_handle,
3537 Register actual_parameter_count) {
3538 DCHECK(!AreAliased(t0, fun, new_target,
3539 expected_parameter_count_or_dispatch_handle,
3540 actual_parameter_count));
3541 // Load receiver to pass it later to DebugOnFunctionCall hook.
3542 LoadReceiver(t0);
3543 FrameScope frame(
3544 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
3545
3546 SmiTag(expected_parameter_count_or_dispatch_handle);
3547 SmiTag(actual_parameter_count);
3548 Push(expected_parameter_count_or_dispatch_handle, actual_parameter_count);
3549
3550 if (new_target.is_valid()) {
3552 }
3553 Push(fun, fun, t0);
3554 CallRuntime(Runtime::kDebugOnFunctionCall);
3555 Pop(fun);
3556 if (new_target.is_valid()) {
3557 Pop(new_target);
3558 }
3559
3560 Pop(expected_parameter_count_or_dispatch_handle, actual_parameter_count);
3561 SmiUntag(actual_parameter_count);
3562 SmiUntag(expected_parameter_count_or_dispatch_handle);
3563}
3564
3565#ifdef V8_ENABLE_LEAPTIERING
3567 Register function, Register actual_parameter_count, InvokeType type,
3568 ArgumentAdaptionMode argument_adaption_mode) {
3569 ASM_CODE_COMMENT(this);
3570 // You can't call a function without a valid frame.
3571 DCHECK(type == InvokeType::kJump || has_frame());
3572
3573 // Contract with called JS functions requires that function is passed in a1.
3574 // (See FullCodeGenerator::Generate().)
3575 DCHECK_EQ(function, a1);
3576
3577 // Set up the context.
3578 LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
3579
3580 InvokeFunctionCode(function, no_reg, actual_parameter_count, type,
3581 argument_adaption_mode);
3582}
3583
3585 Register function, Register new_target, Register actual_parameter_count,
3586 InvokeType type) {
3587 ASM_CODE_COMMENT(this);
3588 // You can't call a function without a valid frame.
3589 DCHECK(type == InvokeType::kJump || has_frame());
3590
3591 // Contract with called JS functions requires that function is passed in a1.
3592 // (See FullCodeGenerator::Generate().)
3593 DCHECK_EQ(function, a1);
3594
3595 LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
3596
3597 InvokeFunctionCode(function, new_target, actual_parameter_count, type);
3598}
3599
3601 Register function, Register new_target, Register actual_parameter_count,
3602 InvokeType type, ArgumentAdaptionMode argument_adaption_mode) {
3603 ASM_CODE_COMMENT(this);
3604 // You can't call a function without a valid frame.
3606 DCHECK_EQ(function, a1);
3607 DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
3608
3610 Ld_w(dispatch_handle,
3611 FieldMemOperand(function, JSFunction::kDispatchHandleOffset));
3612
3613 // On function call, call into the debugger if necessary.
3614 Label debug_hook, continue_after_hook;
3615 {
3616 li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
3617 Ld_b(t0, MemOperand(t0, 0));
3618 BranchShort(&debug_hook, ne, t0, Operand(zero_reg));
3619 }
3620 bind(&continue_after_hook);
3621
3622 // Clear the new.target register if not given.
3623 if (!new_target.is_valid()) {
3624 LoadRoot(a3, RootIndex::kUndefinedValue);
3625 }
3626
3627 Register scratch = s1;
3628 if (argument_adaption_mode == ArgumentAdaptionMode::kAdapt) {
3629 Register expected_parameter_count = a2;
3630 LoadParameterCountFromJSDispatchTable(expected_parameter_count,
3631 dispatch_handle, scratch);
3632 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
3633 }
3634
3635 // We call indirectly through the code field in the function to
3636 // allow recompilation to take effect without changing any of the
3637 // call sites.
3638 LoadEntrypointFromJSDispatchTable(kJavaScriptCallCodeStartRegister,
3639 dispatch_handle, scratch);
3640 switch (type) {
3641 case InvokeType::kCall:
3643 break;
3644 case InvokeType::kJump:
3646 break;
3647 }
3648 Label done;
3649 Branch(&done);
3650
3651 // Deferred debug hook.
3652 bind(&debug_hook);
3653 CallDebugOnFunctionCall(function, new_target, dispatch_handle,
3654 actual_parameter_count);
3655 Branch(&continue_after_hook);
3656
3657 bind(&done);
3658}
3659#else
3660void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
3661 Register expected_parameter_count,
3662 Register actual_parameter_count,
3663 InvokeType type) {
3664 // You can't call a function without a valid frame.
3666 DCHECK_EQ(function, a1);
3667 DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
3668
3669 // On function call, call into the debugger if necessary.
3670 Label debug_hook, continue_after_hook;
3671 {
3672 li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
3673 Ld_b(t0, MemOperand(t0, 0));
3674 BranchShort(&debug_hook, ne, t0, Operand(zero_reg));
3675 }
3676 bind(&continue_after_hook);
3677
3678 // Clear the new.target register if not given.
3679 if (!new_target.is_valid()) {
3680 LoadRoot(a3, RootIndex::kUndefinedValue);
3681 }
3682
3683 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
3684
3685 // We call indirectly through the code field in the function to
3686 // allow recompilation to take effect without changing any of the
3687 // call sites.
3688 constexpr int unused_argument_count = 0;
3689 switch (type) {
3690 case InvokeType::kCall:
3691 CallJSFunction(function, unused_argument_count);
3692 break;
3693 case InvokeType::kJump:
3694 JumpJSFunction(function);
3695 break;
3696 }
3697
3698 Label done;
3699 Branch(&done);
3700
3701 // Deferred debug hook.
3702 bind(&debug_hook);
3703 CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
3704 actual_parameter_count);
3705 Branch(&continue_after_hook);
3706
3707 // Continue here if InvokePrologue does handle the invocation due to
3708 // mismatched parameter counts.
3709 bind(&done);
3710}
3711
3713 Register function, Register new_target, Register actual_parameter_count,
3714 InvokeType type) {
3715 ASM_CODE_COMMENT(this);
3716 // You can't call a function without a valid frame.
3718
3719 // Contract with called JS functions requires that function is passed in a1.
3720 DCHECK_EQ(function, a1);
3721 Register expected_parameter_count = a2;
3722 Register temp_reg = t0;
3723 LoadTaggedField(temp_reg,
3724 FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3725 LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3726 // The argument count is stored as uint16_t
3727 Ld_hu(expected_parameter_count,
3728 FieldMemOperand(temp_reg,
3729 SharedFunctionInfo::kFormalParameterCountOffset));
3730
3731 InvokeFunctionCode(a1, new_target, expected_parameter_count,
3732 actual_parameter_count, type);
3733}
3734
3735void MacroAssembler::InvokeFunction(Register function,
3736 Register expected_parameter_count,
3737 Register actual_parameter_count,
3738 InvokeType type) {
3739 ASM_CODE_COMMENT(this);
3740 // You can't call a function without a valid frame.
3742
3743 // Contract with called JS functions requires that function is passed in a1.
3744 DCHECK_EQ(function, a1);
3745
3746 // Get the function and setup the context.
3747 LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3748
3749 InvokeFunctionCode(a1, no_reg, expected_parameter_count,
3750 actual_parameter_count, type);
3751}
3752#endif // V8_ENABLE_LEAPTIERING
3753
3754// ---------------------------------------------------------------------------
3755// Support functions.
3756
3757void MacroAssembler::GetObjectType(Register object, Register map,
3758 Register type_reg) {
3759 LoadMap(map, object);
3760 Ld_hu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3761}
3762
3763void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
3764 InstanceType lower_limit,
3765 Register range) {
3766 Ld_hu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3767 if (lower_limit != 0 || type_reg != range) {
3768 Sub_d(range, type_reg, Operand(lower_limit));
3769 }
3770}
3771
3772// -----------------------------------------------------------------------------
3773// Runtime calls.
3774
3775void MacroAssembler::AddOverflow_d(Register dst, Register left,
3776 const Operand& right, Register overflow) {
3777 ASM_CODE_COMMENT(this);
3778 BlockTrampolinePoolScope block_trampoline_pool(this);
3779 UseScratchRegisterScope temps(this);
3780 Register scratch = temps.Acquire();
3781 Register scratch2 = temps.Acquire();
3782 Register right_reg = no_reg;
3783 if (!right.is_reg()) {
3784 li(scratch, Operand(right));
3785 right_reg = scratch;
3786 } else {
3787 right_reg = right.rm();
3788 }
3789
3790 DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
3791 overflow != scratch2);
3792 DCHECK(overflow != left && overflow != right_reg);
3793
3794 if (dst == left || dst == right_reg) {
3795 add_d(scratch2, left, right_reg);
3796 xor_(overflow, scratch2, left);
3797 xor_(scratch, scratch2, right_reg);
3798 and_(overflow, overflow, scratch);
3799 mov(dst, scratch2);
3800 } else {
3801 add_d(dst, left, right_reg);
3802 xor_(overflow, dst, left);
3803 xor_(scratch, dst, right_reg);
3804 and_(overflow, overflow, scratch);
3805 }
3806}
3807
3808void MacroAssembler::SubOverflow_d(Register dst, Register left,
3809 const Operand& right, Register overflow) {
3810 ASM_CODE_COMMENT(this);
3811 BlockTrampolinePoolScope block_trampoline_pool(this);
3812 UseScratchRegisterScope temps(this);
3813 Register scratch = temps.Acquire();
3814 Register scratch2 = temps.Acquire();
3815 Register right_reg = no_reg;
3816 if (!right.is_reg()) {
3817 li(scratch, Operand(right));
3818 right_reg = scratch;
3819 } else {
3820 right_reg = right.rm();
3821 }
3822
3823 DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
3824 overflow != scratch2);
3825 DCHECK(overflow != left && overflow != right_reg);
3826
3827 if (dst == left || dst == right_reg) {
3828 Sub_d(scratch2, left, right_reg);
3829 xor_(overflow, left, scratch2);
3830 xor_(scratch, left, right_reg);
3831 and_(overflow, overflow, scratch);
3832 mov(dst, scratch2);
3833 } else {
3834 sub_d(dst, left, right_reg);
3835 xor_(overflow, left, dst);
3836 xor_(scratch, left, right_reg);
3837 and_(overflow, overflow, scratch);
3838 }
3839}
3840
3841void MacroAssembler::MulOverflow_w(Register dst, Register left,
3842 const Operand& right, Register overflow) {
3843 ASM_CODE_COMMENT(this);
3844 BlockTrampolinePoolScope block_trampoline_pool(this);
3845 UseScratchRegisterScope temps(this);
3846 Register scratch = temps.Acquire();
3847 Register scratch2 = temps.Acquire();
3848 Register right_reg = no_reg;
3849 if (!right.is_reg()) {
3850 li(scratch, Operand(right));
3851 right_reg = scratch;
3852 } else {
3853 right_reg = right.rm();
3854 }
3855
3856 DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
3857 overflow != scratch2);
3858 DCHECK(overflow != left && overflow != right_reg);
3859
3860 if (dst == left || dst == right_reg) {
3861 Mul_w(scratch2, left, right_reg);
3862 Mulh_w(overflow, left, right_reg);
3863 mov(dst, scratch2);
3864 } else {
3865 Mul_w(dst, left, right_reg);
3866 Mulh_w(overflow, left, right_reg);
3867 }
3868
3869 srai_d(scratch2, dst, 32);
3870 xor_(overflow, overflow, scratch2);
3871}
3872
3873void MacroAssembler::MulOverflow_d(Register dst, Register left,
3874 const Operand& right, Register overflow) {
3875 ASM_CODE_COMMENT(this);
3876 BlockTrampolinePoolScope block_trampoline_pool(this);
3877 UseScratchRegisterScope temps(this);
3878 Register scratch = temps.Acquire();
3879 Register scratch2 = temps.Acquire();
3880 Register right_reg = no_reg;
3881 if (!right.is_reg()) {
3882 li(scratch, Operand(right));
3883 right_reg = scratch;
3884 } else {
3885 right_reg = right.rm();
3886 }
3887
3888 DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
3889 overflow != scratch2);
3890 DCHECK(overflow != left && overflow != right_reg);
3891
3892 if (dst == left || dst == right_reg) {
3893 Mul_d(scratch2, left, right_reg);
3894 Mulh_d(overflow, left, right_reg);
3895 mov(dst, scratch2);
3896 } else {
3897 Mul_d(dst, left, right_reg);
3898 Mulh_d(overflow, left, right_reg);
3899 }
3900
3901 srai_d(scratch2, dst, 63);
3902 xor_(overflow, overflow, scratch2);
3903}
3904
3905void MacroAssembler::CallRuntime(const Runtime::Function* f,
3906 int num_arguments) {
3907 ASM_CODE_COMMENT(this);
3908 // All parameters are on the stack. v0 has the return value after call.
3909
3910 // If the expected number of arguments of the runtime function is
3911 // constant, we check that the actual number of arguments match the
3912 // expectation.
3913 CHECK(f->nargs < 0 || f->nargs == num_arguments);
3914
3915 // TODO(1236192): Most runtime routines don't need the number of
3916 // arguments passed in because it is constant. At some point we
3917 // should remove this need and make the runtime routine entry code
3918 // smarter.
3919 PrepareCEntryArgs(num_arguments);
3921 bool switch_to_central_stack = options().is_wasm;
3922 CallBuiltin(Builtins::RuntimeCEntry(f->result_size, switch_to_central_stack));
3923}
3924
3926 ASM_CODE_COMMENT(this);
3927 const Runtime::Function* function = Runtime::FunctionForId(fid);
3928 DCHECK_EQ(1, function->result_size);
3929 if (function->nargs >= 0) {
3930 PrepareCEntryArgs(function->nargs);
3931 }
3933}
3934
3935void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
3936 bool builtin_exit_frame) {
3937 PrepareCEntryFunction(builtin);
3938 TailCallBuiltin(Builtins::CEntry(1, ArgvMode::kStack, builtin_exit_frame));
3939}
3940
3941void MacroAssembler::LoadWeakValue(Register out, Register in,
3942 Label* target_if_cleared) {
3943 CompareTaggedAndBranch(target_if_cleared, eq, in,
3945 And(out, in, Operand(~kWeakHeapObjectMask));
3946}
3947
3948void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
3949 Register scratch1,
3950 Register scratch2) {
3951 DCHECK_GT(value, 0);
3952 if (v8_flags.native_code_counters && counter->Enabled()) {
3953 ASM_CODE_COMMENT(this);
3954 // This operation has to be exactly 32-bit wide in case the external
3955 // reference table redirects the counter to a uint32_t dummy_stats_counter_
3956 // field.
3957 li(scratch2, ExternalReference::Create(counter));
3958 Ld_w(scratch1, MemOperand(scratch2, 0));
3959 Add_w(scratch1, scratch1, Operand(value));
3960 St_w(scratch1, MemOperand(scratch2, 0));
3961 }
3962}
3963
3964void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
3965 Register scratch1,
3966 Register scratch2) {
3967 DCHECK_GT(value, 0);
3968 if (v8_flags.native_code_counters && counter->Enabled()) {
3969 ASM_CODE_COMMENT(this);
3970 // This operation has to be exactly 32-bit wide in case the external
3971 // reference table redirects the counter to a uint32_t dummy_stats_counter_
3972 // field.
3973 li(scratch2, ExternalReference::Create(counter));
3974 Ld_w(scratch1, MemOperand(scratch2, 0));
3975 Sub_w(scratch1, scratch1, Operand(value));
3976 St_w(scratch1, MemOperand(scratch2, 0));
3977 }
3978}
3979
3980// -----------------------------------------------------------------------------
3981// Debugging.
3982
3983void MacroAssembler::Trap() { stop(); }
3984void MacroAssembler::DebugBreak() { stop(); }
3985
3986void MacroAssembler::Check(Condition cc, AbortReason reason, Register rj,
3987 Operand rk) {
3988 Label L;
3989 Branch(&L, cc, rj, rk);
3990 Abort(reason);
3991 // Will not return here.
3992 bind(&L);
3993}
3994
3995void MacroAssembler::SbxCheck(Condition cc, AbortReason reason, Register rj,
3996 Operand rk) {
3997 Check(cc, reason, rj, rk);
3998}
3999
4001 ASM_CODE_COMMENT(this);
4002 if (v8_flags.code_comments) {
4003 RecordComment("Abort message:", SourceLocation{});
4004 RecordComment(GetAbortReason(reason), SourceLocation{});
4005 }
4006
4007 // Without debug code, save the code size and just trap.
4008 if (!v8_flags.debug_code || v8_flags.trap_on_abort) {
4009 stop();
4010 return;
4011 }
4012
4013 if (should_abort_hard()) {
4014 // We don't care if we constructed a frame. Just pretend we did.
4015 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
4016 PrepareCallCFunction(1, a0);
4017 li(a0, Operand(static_cast<int>(reason)));
4018 li(a1, ExternalReference::abort_with_reason());
4019 // Use Call directly to avoid any unneeded overhead. The function won't
4020 // return anyway.
4021 Call(a1);
4022 return;
4023 }
4024
4025 Label abort_start;
4026 bind(&abort_start);
4027
4028 Move(a0, Smi::FromInt(static_cast<int>(reason)));
4029
4030 {
4031 // We don't actually want to generate a pile of code for this, so just
4032 // claim there is a stack frame, without generating one.
4033 FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
4034 if (root_array_available()) {
4035 UseScratchRegisterScope temps(this);
4036 Register scratch = temps.Acquire();
4037 // Generate an indirect call via builtins entry table here in order to
4038 // ensure that the interpreter_entry_return_pc_offset is the same for
4039 // InterpreterEntryTrampoline and InterpreterEntryTrampolineForProfiling
4040 // when v8_flags.debug_code is enabled.
4041 LoadEntryFromBuiltin(Builtin::kAbort, scratch);
4042 Call(scratch);
4043 } else {
4044 CallBuiltin(Builtin::kAbort);
4045 }
4046 }
4047
4048 // Will not return here.
4050 // If the calling code cares about the exact number of
4051 // instructions generated, we insert padding here to keep the size
4052 // of the Abort macro constant.
4053 // Currently in debug mode with debug_code enabled the number of
4054 // generated instructions is 10, so we use this as a maximum value.
4055 static const int kExpectedAbortInstructions = 10;
4056 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4057 DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
4058 while (abort_instructions++ < kExpectedAbortInstructions) {
4059 nop();
4060 }
4061 }
4062}
4063
4064void MacroAssembler::LoadMap(Register destination, Register object) {
4066}
4067
4068void MacroAssembler::LoadCompressedMap(Register dst, Register object) {
4069 ASM_CODE_COMMENT(this);
4071}
4072
4073void MacroAssembler::LoadFeedbackVector(Register dst, Register closure,
4074 Register scratch, Label* fbv_undef) {
4075 Label done;
4076 // Load the feedback vector from the closure.
4077 LoadTaggedField(dst,
4078 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
4079 LoadTaggedField(dst, FieldMemOperand(dst, FeedbackCell::kValueOffset));
4080
4081 // Check if feedback vector is valid.
4083 Ld_hu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4084 Branch(&done, eq, scratch, Operand(FEEDBACK_VECTOR_TYPE));
4085
4086 // Not valid, load undefined.
4087 LoadRoot(dst, RootIndex::kUndefinedValue);
4088 Branch(fbv_undef);
4089
4090 bind(&done);
4091}
4092
4093void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
4094 LoadMap(dst, cp);
4096 dst, FieldMemOperand(
4097 dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
4099}
4100
4102 UseScratchRegisterScope temps(this);
4103 Register scratch = temps.Acquire();
4104 li(scratch, Operand(StackFrame::TypeToMarker(type)));
4105 PushCommonFrame(scratch);
4106}
4107
4109
4111 ASM_CODE_COMMENT(this);
4112 BlockTrampolinePoolScope block_trampoline_pool(this);
4113 Push(ra, fp);
4114 Move(fp, sp);
4115 if (!StackFrame::IsJavaScript(type)) {
4116 li(kScratchReg, Operand(StackFrame::TypeToMarker(type)));
4118 }
4119#if V8_ENABLE_WEBASSEMBLY
4120 if (type == StackFrame::WASM || type == StackFrame::WASM_LIFTOFF_SETUP) {
4122 }
4123#endif // V8_ENABLE_WEBASSEMBLY
4124}
4125
4127 ASM_CODE_COMMENT(this);
4128 addi_d(sp, fp, 2 * kSystemPointerSize);
4130 Ld_d(fp, MemOperand(fp, 0 * kSystemPointerSize));
4131}
4132
4133void MacroAssembler::EnterExitFrame(Register scratch, int stack_space,
4134 StackFrame::Type frame_type) {
4135 ASM_CODE_COMMENT(this);
4136 DCHECK(frame_type == StackFrame::EXIT ||
4137 frame_type == StackFrame::BUILTIN_EXIT ||
4138 frame_type == StackFrame::API_ACCESSOR_EXIT ||
4139 frame_type == StackFrame::API_CALLBACK_EXIT);
4140
4141 using ER = ExternalReference;
4142
4143 // Set up the frame structure on the stack.
4144 static_assert(2 * kSystemPointerSize ==
4148
4149 // This is how the stack will look:
4150 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4151 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4152 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4153 // [fp - 1 frame_type Smi
4154 // [fp - 2 (==kSPOffset)] - sp of the called function
4155 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4156 // new stack (will contain saved ra)
4157
4158 // Save registers and reserve room for saved entry sp.
4159 addi_d(sp, sp,
4162 St_d(fp, MemOperand(sp, 2 * kSystemPointerSize));
4163 li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
4164 St_d(scratch, MemOperand(sp, 1 * kSystemPointerSize));
4165
4166 // Set up new frame pointer.
4168
4169 if (v8_flags.debug_code) {
4171 }
4172
4173 // Save the frame pointer and the context in top.
4174 ER c_entry_fp_address =
4175 ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
4176 St_d(fp, ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
4177
4178 ER context_address = ER::Create(IsolateAddressId::kContextAddress, isolate());
4179 St_d(cp, ExternalReferenceAsOperand(context_address, no_reg));
4180
4181 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4182
4183 // Reserve place for the return address, stack space and align the frame
4184 // preparing for calling the runtime function.
4185 DCHECK_GE(stack_space, 0);
4186 Sub_d(sp, sp, Operand((stack_space + 1) * kSystemPointerSize));
4187 if (frame_alignment > 0) {
4188 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
4189 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4190 }
4191
4192 // Set the exit frame sp value to point just before the return address
4193 // location.
4194 addi_d(scratch, sp, kSystemPointerSize);
4196}
4197
4198void MacroAssembler::LeaveExitFrame(Register scratch) {
4199 ASM_CODE_COMMENT(this);
4200 BlockTrampolinePoolScope block_trampoline_pool(this);
4201
4202 using ER = ExternalReference;
4203
4204 // Restore current context from top and clear it in debug mode.
4205 ER context_address = ER::Create(IsolateAddressId::kContextAddress, isolate());
4206 Ld_d(cp, ExternalReferenceAsOperand(context_address, no_reg));
4207
4208 if (v8_flags.debug_code) {
4209 li(scratch, Operand(Context::kInvalidContext));
4210 St_d(scratch, ExternalReferenceAsOperand(context_address, no_reg));
4211 }
4212
4213 // Clear the top frame.
4214 ER c_entry_fp_address =
4215 ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
4216 St_d(zero_reg, ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
4217
4218 // Pop the arguments, restore registers, and return.
4219 mov(sp, fp); // Respect ABI stack constraint.
4222 addi_d(sp, sp, 2 * kSystemPointerSize);
4223}
4224
4226#if V8_HOST_ARCH_LOONG64
4227 // Running on the real platform. Use the alignment as mandated by the local
4228 // environment.
4229 // Note: This will break if we ever start generating snapshots on one LOONG64
4230 // platform for another LOONG64 platform with a different alignment.
4232#else // V8_HOST_ARCH_LOONG64
4233 // If we are using the simulator then we should always align to the expected
4234 // alignment. As the simulator is used to generate snapshots we do not know
4235 // if the target platform will need alignment, so this is controlled from a
4236 // flag.
4237 return v8_flags.sim_stack_alignment;
4238#endif // V8_HOST_ARCH_LOONG64
4239}
4240
4241void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) {
4242 if (SmiValuesAre32Bits()) {
4243 Ld_w(dst, MemOperand(src.base(), SmiWordOffset(src.offset())));
4244 } else {
4247 Ld_w(dst, src);
4248 } else {
4249 Ld_d(dst, src);
4250 }
4251 SmiUntag(dst);
4252 }
4253}
4254
4255void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) {
4256 DCHECK_EQ(0, kSmiTag);
4257 UseScratchRegisterScope temps(this);
4258 Register scratch = temps.Acquire();
4259 andi(scratch, value, kSmiTagMask);
4260 Branch(smi_label, eq, scratch, Operand(zero_reg));
4261}
4262
4263void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
4264 DCHECK_EQ(0, kSmiTag);
4265 UseScratchRegisterScope temps(this);
4266 Register scratch = temps.Acquire();
4267 andi(scratch, value, kSmiTagMask);
4268 Branch(not_smi_label, ne, scratch, Operand(zero_reg));
4269}
4270
4272 Register object,
4273 InstanceType instance_type,
4274 Register scratch) {
4275 DCHECK(cc == eq || cc == ne);
4276 UseScratchRegisterScope temps(this);
4277 if (scratch == no_reg) {
4278 scratch = temps.Acquire();
4279 }
4281 if (std::optional<RootIndex> expected =
4283 Tagged_t ptr = ReadOnlyRootPtr(*expected);
4284 LoadCompressedMap(scratch, object);
4285 Branch(target, cc, scratch, Operand(ptr));
4286 return;
4287 }
4288 }
4289 GetObjectType(object, scratch, scratch);
4290 Branch(target, cc, scratch, Operand(instance_type));
4291}
4292
4293void MacroAssembler::JumpIfJSAnyIsNotPrimitive(Register heap_object,
4294 Register scratch, Label* target,
4295 Label::Distance distance,
4296 Condition cc) {
4297 CHECK(cc == Condition::kUnsignedLessThan ||
4298 cc == Condition::kUnsignedGreaterThanEqual);
4300#ifdef DEBUG
4301 Label ok;
4302 LoadMap(scratch, heap_object);
4303 GetInstanceTypeRange(scratch, scratch, FIRST_JS_RECEIVER_TYPE, scratch);
4304 Branch(&ok, Condition::kUnsignedLessThanEqual, scratch,
4305 Operand(LAST_JS_RECEIVER_TYPE - FIRST_JS_RECEIVER_TYPE));
4306
4307 LoadMap(scratch, heap_object);
4308 GetInstanceTypeRange(scratch, scratch, FIRST_PRIMITIVE_HEAP_OBJECT_TYPE,
4309 scratch);
4310 Branch(&ok, Condition::kUnsignedLessThanEqual, scratch,
4311 Operand(LAST_PRIMITIVE_HEAP_OBJECT_TYPE -
4312 FIRST_PRIMITIVE_HEAP_OBJECT_TYPE));
4313
4314 Abort(AbortReason::kInvalidReceiver);
4315 bind(&ok);
4316#endif // DEBUG
4317
4318 // All primitive object's maps are allocated at the start of the read only
4319 // heap. Thus JS_RECEIVER's must have maps with larger (compressed)
4320 // addresses.
4321 LoadCompressedMap(scratch, heap_object);
4322 Branch(target, cc, scratch,
4324 } else {
4325 static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
4326 GetObjectType(heap_object, scratch, scratch);
4327 Branch(target, cc, scratch, Operand(FIRST_JS_RECEIVER_TYPE));
4328 }
4329}
4330
4331#ifdef V8_ENABLE_DEBUG_CODE
4332
4333void MacroAssembler::Assert(Condition cc, AbortReason reason, Register rs,
4334 Operand rk) {
4335 if (v8_flags.debug_code) Check(cc, reason, rs, rk);
4336}
4337
4338void MacroAssembler::AssertJSAny(Register object, Register map_tmp,
4339 Register tmp, AbortReason abort_reason) {
4340 if (!v8_flags.debug_code) return;
4341
4342 ASM_CODE_COMMENT(this);
4343 DCHECK(!AreAliased(object, map_tmp, tmp));
4344 Label ok;
4345
4346 JumpIfSmi(object, &ok);
4347
4348 GetObjectType(object, map_tmp, tmp);
4349
4350 Branch(&ok, kUnsignedLessThanEqual, tmp, Operand(LAST_NAME_TYPE));
4351
4352 Branch(&ok, kUnsignedGreaterThanEqual, tmp, Operand(FIRST_JS_RECEIVER_TYPE));
4353
4354 Branch(&ok, kEqual, map_tmp, RootIndex::kHeapNumberMap);
4355
4356 Branch(&ok, kEqual, map_tmp, RootIndex::kBigIntMap);
4357
4358 Branch(&ok, kEqual, object, RootIndex::kUndefinedValue);
4359
4360 Branch(&ok, kEqual, object, RootIndex::kTrueValue);
4361
4362 Branch(&ok, kEqual, object, RootIndex::kFalseValue);
4363
4364 Branch(&ok, kEqual, object, RootIndex::kNullValue);
4365
4366 Abort(abort_reason);
4367 bind(&ok);
4368}
4369
4370void MacroAssembler::AssertNotSmi(Register object) {
4371 if (!v8_flags.debug_code) return;
4372 ASM_CODE_COMMENT(this);
4373 static_assert(kSmiTag == 0);
4374 UseScratchRegisterScope temps(this);
4375 Register scratch = temps.Acquire();
4376 andi(scratch, object, kSmiTagMask);
4377 Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
4378}
4379
4380void MacroAssembler::AssertSmi(Register object) {
4381 if (!v8_flags.debug_code) return;
4382 ASM_CODE_COMMENT(this);
4383 static_assert(kSmiTag == 0);
4384 UseScratchRegisterScope temps(this);
4385 Register scratch = temps.Acquire();
4386 andi(scratch, object, kSmiTagMask);
4387 Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
4388}
4389
4391 if (!v8_flags.debug_code) return;
4392 ASM_CODE_COMMENT(this);
4393 const int frame_alignment = ActivationFrameAlignment();
4394 const int frame_alignment_mask = frame_alignment - 1;
4395
4396 if (frame_alignment > kSystemPointerSize) {
4397 Label alignment_as_expected;
4398 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
4399 {
4400 UseScratchRegisterScope temps(this);
4401 Register scratch = temps.Acquire();
4402 andi(scratch, sp, frame_alignment_mask);
4403 Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
4404 }
4405 // Don't use Check here, as it will call Runtime_Abort re-entering here.
4406 stop();
4407 bind(&alignment_as_expected);
4408 }
4409}
4410
4411void MacroAssembler::AssertConstructor(Register object) {
4412 if (!v8_flags.debug_code) return;
4413 ASM_CODE_COMMENT(this);
4414 BlockTrampolinePoolScope block_trampoline_pool(this);
4415 UseScratchRegisterScope temps(this);
4416 Register scratch = temps.Acquire();
4417 static_assert(kSmiTag == 0);
4418 SmiTst(object, scratch);
4419 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, scratch,
4420 Operand(zero_reg));
4421
4422 LoadMap(scratch, object);
4423 Ld_bu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
4424 And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
4425 Check(ne, AbortReason::kOperandIsNotAConstructor, scratch, Operand(zero_reg));
4426}
4427
4428void MacroAssembler::AssertFunction(Register object) {
4429 if (!v8_flags.debug_code) return;
4430 ASM_CODE_COMMENT(this);
4431 BlockTrampolinePoolScope block_trampoline_pool(this);
4432 UseScratchRegisterScope temps(this);
4433 Register scratch = temps.Acquire();
4434 static_assert(kSmiTag == 0);
4435 SmiTst(object, scratch);
4436 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, scratch,
4437 Operand(zero_reg));
4438 Push(object);
4439 LoadMap(object, object);
4440 GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, scratch);
4441 Check(ls, AbortReason::kOperandIsNotAFunction, scratch,
4442 Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
4443 Pop(object);
4444}
4445
4446void MacroAssembler::AssertCallableFunction(Register object) {
4447 if (!v8_flags.debug_code) return;
4448 ASM_CODE_COMMENT(this);
4449 BlockTrampolinePoolScope block_trampoline_pool(this);
4450 UseScratchRegisterScope temps(this);
4451 Register scratch = temps.Acquire();
4452 static_assert(kSmiTag == 0);
4453 SmiTst(object, scratch);
4454 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, scratch,
4455 Operand(zero_reg));
4456 Push(object);
4457 LoadMap(object, object);
4459 scratch);
4460 Check(ls, AbortReason::kOperandIsNotACallableFunction, scratch,
4463 Pop(object);
4464}
4465
4466void MacroAssembler::AssertBoundFunction(Register object) {
4467 if (!v8_flags.debug_code) return;
4468 ASM_CODE_COMMENT(this);
4469 BlockTrampolinePoolScope block_trampoline_pool(this);
4470 UseScratchRegisterScope temps(this);
4471 Register scratch = temps.Acquire();
4472 static_assert(kSmiTag == 0);
4473 SmiTst(object, scratch);
4474 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, scratch,
4475 Operand(zero_reg));
4476 GetObjectType(object, scratch, scratch);
4477 Check(eq, AbortReason::kOperandIsNotABoundFunction, scratch,
4478 Operand(JS_BOUND_FUNCTION_TYPE));
4479}
4480
4481void MacroAssembler::AssertGeneratorObject(Register object) {
4482 if (!v8_flags.debug_code) return;
4483 ASM_CODE_COMMENT(this);
4484 BlockTrampolinePoolScope block_trampoline_pool(this);
4485 UseScratchRegisterScope temps(this);
4486 Register scratch = temps.Acquire();
4487 static_assert(kSmiTag == 0);
4488 SmiTst(object, scratch);
4489 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, scratch,
4490 Operand(zero_reg));
4491 GetObjectType(object, scratch, scratch);
4492 Sub_d(scratch, scratch, Operand(FIRST_JS_GENERATOR_OBJECT_TYPE));
4493 Check(
4494 ls, AbortReason::kOperandIsNotAGeneratorObject, scratch,
4495 Operand(LAST_JS_GENERATOR_OBJECT_TYPE - FIRST_JS_GENERATOR_OBJECT_TYPE));
4496}
4497
4499 if (v8_flags.debug_code) Abort(reason);
4500}
4501
4503 Register scratch) {
4504 if (!v8_flags.debug_code) return;
4505 ASM_CODE_COMMENT(this);
4506 Label done_checking;
4507 AssertNotSmi(object);
4508 LoadRoot(scratch, RootIndex::kUndefinedValue);
4509 Branch(&done_checking, eq, object, Operand(scratch));
4510 GetObjectType(object, scratch, scratch);
4511 Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
4512 Operand(ALLOCATION_SITE_TYPE));
4513 bind(&done_checking);
4514}
4515
4516#endif // V8_ENABLE_DEBUG_CODE
4517
4518void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
4519 FPURegister src2, Label* out_of_line) {
4520 ASM_CODE_COMMENT(this);
4521 if (src1 == src2) {
4522 Move_s(dst, src1);
4523 return;
4524 }
4525
4526 // Check if one of operands is NaN.
4527 CompareIsNanF32(src1, src2);
4528 BranchTrueF(out_of_line);
4529
4530 fmax_s(dst, src1, src2);
4531}
4532
4533void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
4534 FPURegister src2) {
4535 fadd_s(dst, src1, src2);
4536}
4537
4538void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
4539 FPURegister src2, Label* out_of_line) {
4540 ASM_CODE_COMMENT(this);
4541 if (src1 == src2) {
4542 Move_s(dst, src1);
4543 return;
4544 }
4545
4546 // Check if one of operands is NaN.
4547 CompareIsNanF32(src1, src2);
4548 BranchTrueF(out_of_line);
4549
4550 fmin_s(dst, src1, src2);
4551}
4552
4553void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
4554 FPURegister src2) {
4555 fadd_s(dst, src1, src2);
4556}
4557
4558void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1,
4559 FPURegister src2, Label* out_of_line) {
4560 ASM_CODE_COMMENT(this);
4561 if (src1 == src2) {
4562 Move_d(dst, src1);
4563 return;
4564 }
4565
4566 // Check if one of operands is NaN.
4567 CompareIsNanF64(src1, src2);
4568 BranchTrueF(out_of_line);
4569
4570 fmax_d(dst, src1, src2);
4571}
4572
4573void MacroAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
4574 FPURegister src2) {
4575 fadd_d(dst, src1, src2);
4576}
4577
4578void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1,
4579 FPURegister src2, Label* out_of_line) {
4580 ASM_CODE_COMMENT(this);
4581 if (src1 == src2) {
4582 Move_d(dst, src1);
4583 return;
4584 }
4585
4586 // Check if one of operands is NaN.
4587 CompareIsNanF64(src1, src2);
4588 BranchTrueF(out_of_line);
4589
4590 fmin_d(dst, src1, src2);
4591}
4592
4593void MacroAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
4594 FPURegister src2) {
4595 fadd_d(dst, src1, src2);
4596}
4597
4598int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
4599 int num_double_arguments) {
4600 int stack_passed_words = 0;
4601
4602 // Up to eight simple arguments are passed in registers a0..a7.
4603 if (num_reg_arguments > kRegisterPassedArguments) {
4604 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
4605 }
4606 if (num_double_arguments > kFPRegisterPassedArguments) {
4607 int num_count = num_double_arguments - kFPRegisterPassedArguments;
4608 if (num_reg_arguments >= kRegisterPassedArguments) {
4609 stack_passed_words += num_count;
4610 } else if (num_count > kRegisterPassedArguments - num_reg_arguments) {
4611 stack_passed_words +=
4612 num_count - (kRegisterPassedArguments - num_reg_arguments);
4613 }
4614 }
4615 return stack_passed_words;
4616}
4617
4618void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4619 int num_double_arguments,
4620 Register scratch) {
4621 ASM_CODE_COMMENT(this);
4622 int frame_alignment = ActivationFrameAlignment();
4623
4624 // Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
4625 // Remaining arguments are pushed on the stack.
4626 int stack_passed_arguments =
4627 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
4628 if (frame_alignment > kSystemPointerSize) {
4629 // Make stack end at alignment and make room for num_arguments - 4 words
4630 // and the original value of sp.
4631 mov(scratch, sp);
4632 Sub_d(sp, sp, Operand((stack_passed_arguments + 1) * kSystemPointerSize));
4633 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
4634 bstrins_d(sp, zero_reg, std::log2(frame_alignment) - 1, 0);
4635 St_d(scratch, MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
4636 } else {
4637 Sub_d(sp, sp, Operand(stack_passed_arguments * kSystemPointerSize));
4638 }
4639}
4640
4641void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4642 Register scratch) {
4643 PrepareCallCFunction(num_reg_arguments, 0, scratch);
4644}
4645
4646int MacroAssembler::CallCFunction(ExternalReference function,
4647 int num_reg_arguments,
4648 int num_double_arguments,
4649 SetIsolateDataSlots set_isolate_data_slots,
4650 Label* return_location) {
4651 ASM_CODE_COMMENT(this);
4652 BlockTrampolinePoolScope block_trampoline_pool(this);
4653 UseScratchRegisterScope temps(this);
4654 Register scratch = temps.Acquire();
4655 li(scratch, function);
4656 return CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments,
4657 set_isolate_data_slots, return_location);
4658}
4659
4660int MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
4661 int num_double_arguments,
4662 SetIsolateDataSlots set_isolate_data_slots,
4663 Label* return_location) {
4664 ASM_CODE_COMMENT(this);
4665 return CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
4666 set_isolate_data_slots, return_location);
4667}
4668
4669int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments,
4670 SetIsolateDataSlots set_isolate_data_slots,
4671 Label* return_location) {
4672 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
4673 return_location);
4674}
4675
4676int MacroAssembler::CallCFunction(Register function, int num_arguments,
4677 SetIsolateDataSlots set_isolate_data_slots,
4678 Label* return_location) {
4679 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
4680 return_location);
4681}
4682
4684 Register function, int num_reg_arguments, int num_double_arguments,
4685 SetIsolateDataSlots set_isolate_data_slots, Label* return_location) {
4686 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
4687 DCHECK(has_frame());
4688
4689 Label get_pc;
4690 UseScratchRegisterScope temps(this);
4691 // We're doing a C call, which means non-parameter caller-saved registers
4692 // (a0-a7, t0-t8) will be clobbered and so are available to use as scratches.
4693 // In the worst-case scenario, we'll need 2 scratch registers. We pick 3
4694 // registers minus the `function` register, in case `function` aliases with
4695 // any of the registers.
4696 temps.Include({t0, t1, t2, function});
4697 temps.Exclude(function);
4698
4699 // Make sure that the stack is aligned before calling a C function unless
4700 // running in the simulator. The simulator has its own alignment check which
4701 // provides more information.
4702#if V8_HOST_ARCH_LOONG64
4703 if (v8_flags.debug_code) {
4704 int frame_alignment = base::OS::ActivationFrameAlignment();
4705 int frame_alignment_mask = frame_alignment - 1;
4706 if (frame_alignment > kSystemPointerSize) {
4707 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
4708 Label alignment_as_expected;
4709 {
4710 UseScratchRegisterScope temps(this);
4711 Register scratch = temps.Acquire();
4712 And(scratch, sp, Operand(frame_alignment_mask));
4713 Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
4714 }
4715 // Don't use Check here, as it will call Runtime_Abort possibly
4716 // re-entering here.
4717 stop();
4718 bind(&alignment_as_expected);
4719 }
4720 }
4721#endif // V8_HOST_ARCH_LOONG64
4722
4723 // Just call directly. The function called cannot cause a GC, or
4724 // allow preemption, so the return address in the link register
4725 // stays correct.
4726 {
4727 BlockTrampolinePoolScope block_trampoline_pool(this);
4728 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
4729 // Save the frame pointer and PC so that the stack layout remains
4730 // iterable, even without an ExitFrame which normally exists between JS
4731 // and C frames.
4732 UseScratchRegisterScope temps(this);
4733 Register pc_scratch = temps.Acquire();
4734 DCHECK(!AreAliased(pc_scratch, function));
4736
4737 LoadLabelRelative(pc_scratch, &get_pc);
4738
4739 St_d(pc_scratch,
4740 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerPC));
4741 St_d(fp, ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
4742 }
4743
4744 Call(function);
4745 int call_pc_offset = pc_offset();
4746 bind(&get_pc);
4747 if (return_location) bind(return_location);
4748
4749 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
4750 // We don't unset the PC; the FP is the source of truth.
4751 St_d(zero_reg,
4752 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
4753 }
4754
4755 int stack_passed_arguments =
4756 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
4757
4759 Ld_d(sp, MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
4760 } else {
4761 Add_d(sp, sp, Operand(stack_passed_arguments * kSystemPointerSize));
4762 }
4763
4765
4766 return call_pc_offset;
4767 }
4768}
4769
4770#undef BRANCH_ARGS_CHECK
4771
4772void MacroAssembler::CheckPageFlag(Register object, int mask, Condition cc,
4773 Label* condition_met) {
4774 ASM_CODE_COMMENT(this);
4775 UseScratchRegisterScope temps(this);
4776 Register scratch = temps.Acquire();
4777 And(scratch, object, Operand(~MemoryChunk::GetAlignmentMaskForAssembler()));
4778 Ld_d(scratch, MemOperand(scratch, MemoryChunk::FlagsOffset()));
4779 And(scratch, scratch, Operand(mask));
4780 Branch(condition_met, cc, scratch, Operand(zero_reg));
4781}
4782
4783Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
4784 Register reg4, Register reg5,
4785 Register reg6) {
4786 RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
4787
4788 const RegisterConfiguration* config = RegisterConfiguration::Default();
4789 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
4790 int code = config->GetAllocatableGeneralCode(i);
4791 Register candidate = Register::from_code(code);
4792 if (regs.has(candidate)) continue;
4793 return candidate;
4794 }
4795 UNREACHABLE();
4796}
4797
4798void MacroAssembler::ComputeCodeStartAddress(Register dst) {
4799 // TODO(LOONG_dev): range check, add Pcadd macro function?
4800 pcaddi(dst, -pc_offset() >> 2);
4801}
4802
4803// Check if the code object is marked for deoptimization. If it is, then it
4804// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
4805// to:
4806// 1. read from memory the word that contains that bit, which can be found in
4807// the flags in the referenced {Code} object;
4808// 2. test kMarkedForDeoptimizationBit in those flags; and
4809// 3. if it is not zero then it jumps to the builtin.
4810//
4811// Note: With leaptiering we simply assert the code is not deoptimized.
4813 UseScratchRegisterScope temps(this);
4814 Register scratch = temps.Acquire();
4815 if (v8_flags.debug_code || !V8_ENABLE_LEAPTIERING_BOOL) {
4816 int offset =
4817 InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
4820 Ld_wu(scratch, FieldMemOperand(scratch, Code::kFlagsOffset));
4821 }
4822#ifdef V8_ENABLE_LEAPTIERING
4823 if (v8_flags.debug_code) {
4824 Label not_deoptimized;
4825 And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
4826 Branch(&not_deoptimized, eq, scratch, Operand(zero_reg));
4827 Abort(AbortReason::kInvalidDeoptimizedCode);
4828 bind(&not_deoptimized);
4829 }
4830#else
4831 And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
4832 TailCallBuiltin(Builtin::kCompileLazyDeoptimizedCode, ne, scratch,
4833 Operand(zero_reg));
4834#endif
4835}
4836
4837void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
4838 DeoptimizeKind kind, Label* ret,
4839 Label*) {
4840 ASM_CODE_COMMENT(this);
4841 BlockTrampolinePoolScope block_trampoline_pool(this);
4842 UseScratchRegisterScope temps(this);
4843 Register scratch = temps.Acquire();
4844 Ld_d(scratch,
4846 Call(scratch);
4850}
4851
4853 Register code_object,
4854 CodeEntrypointTag tag) {
4855 ASM_CODE_COMMENT(this);
4856#ifdef V8_ENABLE_SANDBOX
4857 LoadCodeEntrypointViaCodePointer(
4859 FieldMemOperand(code_object, Code::kSelfIndirectPointerOffset), tag);
4860#else
4862 FieldMemOperand(code_object, Code::kInstructionStartOffset));
4863#endif
4864}
4865
4866void MacroAssembler::CallCodeObject(Register code_object,
4867 CodeEntrypointTag tag) {
4868 ASM_CODE_COMMENT(this);
4869 LoadCodeInstructionStart(code_object, code_object, tag);
4870 Call(code_object);
4871}
4872
4873void MacroAssembler::JumpCodeObject(Register code_object, CodeEntrypointTag tag,
4874 JumpMode jump_mode) {
4875 // TODO(saelo): can we avoid using this for JavaScript functions
4876 // (kJSEntrypointTag) and instead use a variant that ensures that the caller
4877 // and callee agree on the signature (i.e. parameter count)?
4878 ASM_CODE_COMMENT(this);
4879 DCHECK_EQ(JumpMode::kJump, jump_mode);
4880 LoadCodeInstructionStart(code_object, code_object, tag);
4881 Jump(code_object);
4882}
4883
4884void MacroAssembler::CallJSFunction(Register function_object,
4885 uint16_t argument_count) {
4887#ifdef V8_ENABLE_LEAPTIERING
4890 Register scratch = s2;
4891
4892 Ld_w(dispatch_handle,
4893 FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
4894 LoadEntrypointAndParameterCountFromJSDispatchTable(code, parameter_count,
4895 dispatch_handle, scratch);
4896
4897 // Force a safe crash if the parameter count doesn't match.
4898 SbxCheck(le, AbortReason::kJSSignatureMismatch, parameter_count,
4899 Operand(argument_count));
4900 Call(code);
4901#else
4903 LoadTaggedField(code,
4904 FieldMemOperand(function_object, JSFunction::kCodeOffset));
4906#endif
4907}
4908
4909#if V8_ENABLE_LEAPTIERING
4910void MacroAssembler::CallJSDispatchEntry(JSDispatchHandle dispatch_handle,
4911 uint16_t argument_count) {
4913 Register scratch = s1;
4915 Operand(dispatch_handle.value(), RelocInfo::JS_DISPATCH_HANDLE));
4916 // WARNING: This entrypoint load is only safe because we are storing a
4917 // RelocInfo for the dispatch handle in the li above (thus keeping the
4918 // dispatch entry alive) _and_ because the entrypoints are not compactable
4919 // (thus meaning that the calculation in the entrypoint load is not
4920 // invalidated by a compaction).
4921 // TODO(leszeks): Make this less of a footgun.
4922 static_assert(!JSDispatchTable::kSupportsCompaction);
4923 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
4924 CHECK_EQ(argument_count,
4925 IsolateGroup::current()->js_dispatch_table()->GetParameterCount(
4926 dispatch_handle));
4927 Call(code);
4928}
4929#endif
4930
4931void MacroAssembler::JumpJSFunction(Register function_object,
4932 JumpMode jump_mode) {
4934#ifdef V8_ENABLE_LEAPTIERING
4935 // This implementation is not currently used because callers usually need
4936 // to load both entry point and parameter count and then do something with
4937 // the latter before the actual call.
4938 UNREACHABLE();
4939#else
4941 LoadTaggedField(code,
4942 FieldMemOperand(function_object, JSFunction::kCodeOffset));
4943 JumpCodeObject(code, kJSEntrypointTag, jump_mode);
4944#endif
4945}
4946
4947#ifdef V8_ENABLE_WEBASSEMBLY
4948
4949void MacroAssembler::ResolveWasmCodePointer(Register target,
4950 uint64_t signature_hash) {
4951 ASM_CODE_COMMENT(this);
4952 ExternalReference global_jump_table =
4953 ExternalReference::wasm_code_pointer_table();
4954 UseScratchRegisterScope temps(this);
4955 Register scratch = temps.Acquire();
4956 li(scratch, global_jump_table);
4957#ifdef V8_ENABLE_SANDBOX
4958 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == 16);
4959 Alsl_d(target, target, scratch, 4);
4960 Ld_d(scratch,
4961 MemOperand(target, wasm::WasmCodePointerTable::kOffsetOfSignatureHash));
4962 bool has_second_tmp = temps.hasAvailable();
4963 Register signature_hash_register = has_second_tmp ? temps.Acquire() : target;
4964 if (!has_second_tmp) {
4965 Push(signature_hash_register);
4966 }
4967 li(signature_hash_register, Operand(signature_hash));
4968 SbxCheck(Condition::kEqual, AbortReason::kWasmSignatureMismatch, scratch,
4969 Operand(signature_hash_register));
4970 if (!has_second_tmp) {
4971 Pop(signature_hash_register);
4972 }
4973#else
4974 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == 8);
4975 Alsl_d(target, target, scratch, 3);
4976#endif
4977
4978 Ld_d(target, MemOperand(target, 0));
4979}
4980
4981void MacroAssembler::CallWasmCodePointer(Register target,
4982 uint64_t signature_hash,
4983 CallJumpMode call_jump_mode) {
4984 ResolveWasmCodePointer(target, signature_hash);
4985 if (call_jump_mode == CallJumpMode::kTailCall) {
4986 Jump(target);
4987 } else {
4988 Call(target);
4989 }
4990}
4991
4992void MacroAssembler::CallWasmCodePointerNoSignatureCheck(Register target) {
4993 ExternalReference global_jump_table =
4994 ExternalReference::wasm_code_pointer_table();
4995 UseScratchRegisterScope temps(this);
4996 Register scratch = temps.Acquire();
4997 li(scratch, global_jump_table);
4998 constexpr unsigned int kEntrySizeLog2 =
4999 std::bit_width(sizeof(wasm::WasmCodePointerTableEntry)) - 1;
5000 Alsl_d(target, target, scratch, kEntrySizeLog2);
5001 Ld_d(target, MemOperand(target, 0));
5002
5003 Call(target);
5004}
5005
5006void MacroAssembler::LoadWasmCodePointer(Register dst, MemOperand src) {
5007 static_assert(sizeof(WasmCodePointer) == 4);
5008 Ld_w(dst, src);
5009}
5010
5011#endif
5012
5013namespace {
5014
5015#ifndef V8_ENABLE_LEAPTIERING
5016// Only used when leaptiering is disabled.
5017void TailCallOptimizedCodeSlot(MacroAssembler* masm,
5018 Register optimized_code_entry) {
5019 // ----------- S t a t e -------------
5020 // -- a0 : actual argument count
5021 // -- a3 : new target (preserved for callee if needed, and caller)
5022 // -- a1 : target function (preserved for callee if needed, and caller)
5023 // -----------------------------------
5024 DCHECK(!AreAliased(optimized_code_entry, a1, a3));
5025
5026 Label heal_optimized_code_slot;
5027
5028 // If the optimized code is cleared, go to runtime to update the optimization
5029 // marker field.
5030 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
5031 &heal_optimized_code_slot);
5032
5033 // The entry references a CodeWrapper object. Unwrap it now.
5035 optimized_code_entry,
5036 FieldMemOperand(optimized_code_entry, CodeWrapper::kCodeOffset));
5037
5038 // Check if the optimized code is marked for deopt. If it is, call the
5039 // runtime to clear it.
5040 __ TestCodeIsMarkedForDeoptimizationAndJump(optimized_code_entry, a6, ne,
5041 &heal_optimized_code_slot);
5042
5043 // Optimized code is good, get it into the closure and link the closure into
5044 // the optimized functions list, then tail call the optimized code.
5045 // The feedback vector is no longer used, so reuse it as a scratch
5046 // register.
5047 __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, a1);
5048
5049 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
5050 __ LoadCodeInstructionStart(a2, optimized_code_entry, kJSEntrypointTag);
5051 __ Jump(a2);
5052
5053 // Optimized code slot contains deoptimized code or code is cleared and
5054 // optimized code marker isn't updated. Evict the code, update the marker
5055 // and re-enter the closure's code.
5056 __ bind(&heal_optimized_code_slot);
5057 __ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
5058}
5059#endif // V8_ENABLE_LEAPTIERING
5060
5061} // namespace
5062
5063#ifdef V8_ENABLE_DEBUG_CODE
5064void MacroAssembler::AssertFeedbackCell(Register object, Register scratch) {
5065 if (v8_flags.debug_code) {
5066 GetObjectType(object, scratch, scratch);
5067 Assert(eq, AbortReason::kExpectedFeedbackCell, scratch,
5068 Operand(FEEDBACK_CELL_TYPE));
5069 }
5070}
5071void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
5072 if (v8_flags.debug_code) {
5073 GetObjectType(object, scratch, scratch);
5074 Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
5075 Operand(FEEDBACK_VECTOR_TYPE));
5076 }
5077}
5078#endif // V8_ENABLE_DEBUG_CODE
5079
5081 Register optimized_code, Register closure) {
5082 ASM_CODE_COMMENT(this);
5083 DCHECK(!AreAliased(optimized_code, closure));
5084
5085#ifdef V8_ENABLE_LEAPTIERING
5086 UNREACHABLE();
5087#else
5088 // Store code entry in the closure.
5089 StoreCodePointerField(optimized_code,
5090 FieldMemOperand(closure, JSFunction::kCodeOffset));
5091 RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
5094#endif // V8_ENABLE_LEAPTIERING
5095}
5096
5098 Runtime::FunctionId function_id) {
5099 ASM_CODE_COMMENT(this);
5100 // ----------- S t a t e -------------
5101 // -- a0 : actual argument count (preserved for callee)
5102 // -- a1 : target function (preserved for callee)
5103 // -- a3 : new target (preserved for callee)
5104 // -- a4 : dispatch handle (preserved for callee)
5105 // -----------------------------------
5106 {
5107 FrameScope scope(this, StackFrame::INTERNAL);
5108 // Push a copy of the target function, the new target, the actual
5109 // argument count, and the dispatch handle.
5110 // Push function as parameter to the runtime call.
5114#ifdef V8_ENABLE_LEAPTIERING
5115 // No need to SmiTag since dispatch handles always look like Smis.
5116 static_assert(kJSDispatchHandleShift > 0);
5118#endif
5119 // Function is also the parameter to the runtime call.
5121
5122 CallRuntime(function_id, 1);
5124
5125 // Restore target function, new target, actual argument count and dispatch
5126 // handle.
5127#ifdef V8_ENABLE_LEAPTIERING
5129#endif
5133 }
5134
5135 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
5136 Jump(a2);
5137}
5138
5139#ifndef V8_ENABLE_LEAPTIERING
5140
5141// Read off the flags in the feedback vector and check if there
5142// is optimized code or a tiering state that needs to be processed.
5144 Register flags, Register feedback_vector, CodeKind current_code_kind,
5145 Label* flags_need_processing) {
5146 ASM_CODE_COMMENT(this);
5147 Register scratch = t2;
5148 DCHECK(!AreAliased(t2, flags, feedback_vector));
5149 DCHECK(CodeKindCanTierUp(current_code_kind));
5150 uint32_t flag_mask =
5152 Ld_hu(flags, FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
5153 And(scratch, flags, Operand(flag_mask));
5154 Branch(flags_need_processing, ne, scratch, Operand(zero_reg));
5155}
5156
5158 Register flags, Register feedback_vector) {
5159 ASM_CODE_COMMENT(this);
5160 DCHECK(!AreAliased(flags, feedback_vector));
5161 Label maybe_has_optimized_code, maybe_needs_logging;
5162 // Check if optimized code marker is available.
5163 {
5164 UseScratchRegisterScope temps(this);
5165 Register scratch = temps.Acquire();
5166 And(scratch, flags,
5168 Branch(&maybe_needs_logging, eq, scratch, Operand(zero_reg));
5169 }
5170
5171 GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
5172
5173 bind(&maybe_needs_logging);
5174 {
5175 UseScratchRegisterScope temps(this);
5176 Register scratch = temps.Acquire();
5177 And(scratch, flags, Operand(FeedbackVector::LogNextExecutionBit::kMask));
5178 Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
5179 }
5180
5181 GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution);
5182
5183 bind(&maybe_has_optimized_code);
5184 Register optimized_code_entry = flags;
5185 LoadTaggedField(optimized_code_entry,
5186 FieldMemOperand(feedback_vector,
5187 FeedbackVector::kMaybeOptimizedCodeOffset));
5188
5189 TailCallOptimizedCodeSlot(this, optimized_code_entry);
5190}
5191
5192#endif // !V8_ENABLE_LEAPTIERING
5193
5195 const MemOperand& field_operand) {
5197 DecompressTagged(destination, field_operand);
5198 } else {
5199 Ld_d(destination, field_operand);
5200 }
5201}
5202
5204 const MemOperand& field_operand) {
5206 DecompressTaggedSigned(destination, field_operand);
5207 } else {
5208 Ld_d(destination, field_operand);
5209 }
5210}
5211
5212void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) {
5213 SmiUntag(dst, src);
5214}
5215
5216void MacroAssembler::StoreTaggedField(Register src, const MemOperand& dst) {
5218 St_w(src, dst);
5219 } else {
5220 St_d(src, dst);
5221 }
5222}
5223
5225 const MemOperand& dst) {
5226 UseScratchRegisterScope temps(this);
5227 Register scratch = temps.Acquire();
5228 Add_d(scratch, dst.base(), dst.offset());
5230 amswap_db_w(zero_reg, src, scratch);
5231 } else {
5232 amswap_db_d(zero_reg, src, scratch);
5233 }
5234}
5235
5237 const MemOperand& src) {
5238 ASM_CODE_COMMENT(this);
5239 Ld_wu(dst, src);
5240 if (v8_flags.slow_debug_code) {
5241 // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
5242 Add_d(dst, dst, ((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32);
5243 }
5244}
5245
5246void MacroAssembler::DecompressTagged(Register dst, const MemOperand& src) {
5247 ASM_CODE_COMMENT(this);
5248 Ld_wu(dst, src);
5249 Add_d(dst, kPtrComprCageBaseRegister, dst);
5250}
5251
5252void MacroAssembler::DecompressTagged(Register dst, Register src) {
5253 ASM_CODE_COMMENT(this);
5254 Bstrpick_d(dst, src, 31, 0);
5255 Add_d(dst, kPtrComprCageBaseRegister, Operand(dst));
5256}
5257
5258void MacroAssembler::DecompressTagged(Register dst, Tagged_t immediate) {
5259 ASM_CODE_COMMENT(this);
5260 Add_d(dst, kPtrComprCageBaseRegister, static_cast<int32_t>(immediate));
5261}
5262
5264 const MemOperand& field_operand) {
5265#if V8_ENABLE_SANDBOX
5266 ASM_CODE_COMMENT(this);
5267 UseScratchRegisterScope temps(this);
5268 Register scratch = temps.Acquire();
5269 Ld_wu(destination, field_operand);
5270 Ld_d(scratch,
5271 MemOperand(kRootRegister, IsolateData::trusted_cage_base_offset()));
5272 Or(destination, destination, scratch);
5273#else
5274 UNREACHABLE();
5275#endif // V8_ENABLE_SANDBOX
5276}
5277
5279 const MemOperand& src) {
5280 ASM_CODE_COMMENT(this);
5281 Ld_wu(dst, src);
5282 dbar(0);
5283 if (v8_flags.slow_debug_code) {
5284 // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
5285 Add_d(dst, dst, ((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32);
5286 }
5287}
5288
5290 const MemOperand& src) {
5291 ASM_CODE_COMMENT(this);
5292 Ld_wu(dst, src);
5293 dbar(0);
5294 Add_d(dst, kPtrComprCageBaseRegister, dst);
5295}
5296
5297// Calls an API function. Allocates HandleScope, extracts returned value
5298// from handle and propagates exceptions. Clobbers C argument registers
5299// and C caller-saved registers. Restores context. On return removes
5300// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
5301// (GCed, includes the call JS arguments space and the additional space
5302// allocated for the fast call).
5303void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
5304 Register function_address,
5305 ExternalReference thunk_ref, Register thunk_arg,
5306 int slots_to_drop_on_return,
5307 MemOperand* argc_operand,
5308 MemOperand return_value_operand) {
5309 using ER = ExternalReference;
5310
5311 Isolate* isolate = masm->isolate();
5313 ER::handle_scope_next_address(isolate), no_reg);
5315 ER::handle_scope_limit_address(isolate), no_reg);
5317 ER::handle_scope_level_address(isolate), no_reg);
5318
5319 Register return_value = a0;
5320 Register scratch = a4;
5321 Register scratch2 = a5;
5322
5323 // Allocate HandleScope in callee-saved registers.
5324 // We will need to restore the HandleScope after the call to the API function,
5325 // by allocating it in callee-saved registers it'll be preserved by C code.
5326 Register prev_next_address_reg = s0;
5327 Register prev_limit_reg = s1;
5328 Register prev_level_reg = s2;
5329
5330 // C arguments (kCArgRegs[0/1]) are expected to be initialized outside, so
5331 // this function must not corrupt them (return_value overlaps with
5332 // kCArgRegs[0] but that's ok because we start using it only after the C
5333 // call).
5334 DCHECK(!AreAliased(kCArgRegs[0], kCArgRegs[1], // C args
5335 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
5336 // function_address and thunk_arg might overlap but this function must not
5337 // corrupted them until the call is made (i.e. overlap with return_value is
5338 // fine).
5339 DCHECK(!AreAliased(function_address, // incoming parameters
5340 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
5341 DCHECK(!AreAliased(thunk_arg, // incoming parameters
5342 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
5343 {
5345 "Allocate HandleScope in callee-save registers.");
5346 __ Ld_d(prev_next_address_reg, next_mem_op);
5347 __ Ld_d(prev_limit_reg, limit_mem_op);
5348 __ Ld_w(prev_level_reg, level_mem_op);
5349 __ Add_w(scratch, prev_level_reg, Operand(1));
5350 __ St_w(scratch, level_mem_op);
5351 }
5352
5353 Label profiler_or_side_effects_check_enabled, done_api_call;
5354 if (with_profiling) {
5355 __ RecordComment("Check if profiler or side effects check is enabled");
5356 __ Ld_b(scratch,
5357 __ ExternalReferenceAsOperand(IsolateFieldId::kExecutionMode));
5358 __ Branch(&profiler_or_side_effects_check_enabled, ne, scratch,
5359 Operand(zero_reg));
5360#ifdef V8_RUNTIME_CALL_STATS
5361 __ RecordComment("Check if RCS is enabled");
5362 __ li(scratch, ER::address_of_runtime_stats_flag());
5363 __ Ld_w(scratch, MemOperand(scratch, 0));
5364 __ Branch(&profiler_or_side_effects_check_enabled, ne, scratch,
5365 Operand(zero_reg));
5366#endif // V8_RUNTIME_CALL_STATS
5367 }
5368
5369 __ RecordComment("Call the api function directly.");
5370 __ StoreReturnAddressAndCall(function_address);
5371 __ bind(&done_api_call);
5372
5373 Label propagate_exception;
5374 Label delete_allocated_handles;
5375 Label leave_exit_frame;
5376
5377 __ RecordComment("Load the value from ReturnValue");
5378 __ Ld_d(return_value, return_value_operand);
5379
5380 {
5382 masm,
5383 "No more valid handles (the result handle was the last one)."
5384 "Restore previous handle scope.");
5385 __ St_d(prev_next_address_reg, next_mem_op);
5386 if (v8_flags.debug_code) {
5387 __ Ld_w(scratch, level_mem_op);
5388 __ Sub_w(scratch, scratch, Operand(1));
5389 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, scratch,
5390 Operand(prev_level_reg));
5391 }
5392 __ St_w(prev_level_reg, level_mem_op);
5393 __ Ld_d(scratch, limit_mem_op);
5394 __ Branch(&delete_allocated_handles, ne, prev_limit_reg, Operand(scratch));
5395 }
5396
5397 __ RecordComment("Leave the API exit frame.");
5398 __ bind(&leave_exit_frame);
5399
5400 Register argc_reg = prev_limit_reg;
5401 if (argc_operand != nullptr) {
5402 // Load the number of stack slots to drop before LeaveExitFrame modifies sp.
5403 __ Ld_d(argc_reg, *argc_operand);
5404 }
5405
5406 __ LeaveExitFrame(scratch);
5407
5408 {
5410 "Check if the function scheduled an exception.");
5411 __ LoadRoot(scratch, RootIndex::kTheHoleValue);
5413 ER::exception_address(isolate), no_reg));
5414 __ Branch(&propagate_exception, ne, scratch, Operand(scratch2));
5415 }
5416
5417 __ AssertJSAny(return_value, scratch, scratch2,
5418 AbortReason::kAPICallReturnedInvalidObject);
5419
5420 if (argc_operand == nullptr) {
5421 DCHECK_NE(slots_to_drop_on_return, 0);
5422 __ Add_d(sp, sp, Operand(slots_to_drop_on_return * kSystemPointerSize));
5423 } else {
5424 // {argc_operand} was loaded into {argc_reg} above.
5425 if (slots_to_drop_on_return != 0) {
5426 __ Add_d(sp, sp, Operand(slots_to_drop_on_return * kSystemPointerSize));
5427 }
5428 __ Alsl_d(sp, argc_reg, sp, kSystemPointerSizeLog2);
5429 }
5430
5431 __ Ret();
5432
5433 if (with_profiling) {
5434 ASM_CODE_COMMENT_STRING(masm, "Call the api function via thunk wrapper.");
5435 __ bind(&profiler_or_side_effects_check_enabled);
5436 // Additional parameter is the address of the actual callback function.
5437 if (thunk_arg.is_valid()) {
5438 MemOperand thunk_arg_mem_op = __ ExternalReferenceAsOperand(
5439 IsolateFieldId::kApiCallbackThunkArgument);
5440 __ St_d(thunk_arg, thunk_arg_mem_op);
5441 }
5442 __ li(scratch, thunk_ref);
5444 __ Branch(&done_api_call);
5445 }
5446
5447 __ RecordComment("An exception was thrown. Propagate it.");
5448 __ bind(&propagate_exception);
5449 __ TailCallRuntime(Runtime::kPropagateException);
5450
5451 {
5453 masm, "HandleScope limit has changed. Delete allocated extensions.");
5454 __ bind(&delete_allocated_handles);
5455 __ St_d(prev_limit_reg, limit_mem_op);
5456 // Save the return value in a callee-save register.
5457 Register saved_result = prev_limit_reg;
5458 __ mov(saved_result, a0);
5459 __ PrepareCallCFunction(1, prev_level_reg);
5460 __ li(kCArgRegs[0], ER::isolate_address());
5461 __ CallCFunction(ER::delete_handle_scope_extensions(), 1);
5462 __ mov(kCArgRegs[0], saved_result);
5463 __ jmp(&leave_exit_frame);
5464 }
5465}
5466
5467} // namespace internal
5468} // namespace v8
5469
5470#undef __
5471
5472#endif // V8_TARGET_ARCH_LOONG64
friend Zone
Definition asm-types.cc:195
#define Assert(condition)
int16_t parameter_count
Definition builtins.cc:67
Builtins::Kind kind
Definition builtins.cc:40
SourcePosition pos
static int ActivationFrameAlignment()
void RequestHeapNumber(HeapNumberRequest request)
Definition assembler.cc:262
EmbeddedObjectIndex AddEmbeddedObject(IndirectHandle< HeapObject > object)
Definition assembler.cc:285
int AddCodeTarget(IndirectHandle< Code > target)
Definition assembler.cc:267
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
const AssemblerOptions & options() const
Definition assembler.h:339
void ctz_d(Register rd, Register rj)
void ftintrm_w_d(FPURegister fd, FPURegister fj)
void mod_du(Register rd, Register rj, Register rk)
void lu12i_w(Register rd, int32_t si20)
void rotri_d(Register rd, Register rj, int32_t ui6)
bool is_near(Label *L, OffsetSize bits)
void frint_d(FPURegister fd, FPURegister fj)
void ld_d(Register rd, Register rj, int32_t si12)
void clz_d(Register rd, Register rj)
void ldptr_w(Register rd, Register rj, int32_t si14)
void break_(uint32_t code, bool break_as_stop=false)
void fmax_d(FPURegister fd, FPURegister fj, FPURegister fk)
void rotri_w(Register rd, Register rj, int32_t ui5)
void masknez(Register rd, Register rj, Register rk)
void fmov_d(FPURegister fd, FPURegister fj)
void ftintrz_w_d(FPURegister fd, FPURegister fj)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void mulh_d(Register rd, Register rj, Register rk)
void fldx_s(FPURegister fd, Register rj, Register rk)
void alsl_w(Register rd, Register rj, Register rk, int32_t sa2)
void bgeu(Register rj, Register rd, int32_t offset)
void slli_w(Register rd, Register rj, int32_t ui5)
void bstrpick_w(Register rd, Register rj, int32_t msbw, int32_t lsbw)
void bstrins_d(Register rd, Register rj, int32_t msbd, int32_t lsbd)
void ldptr_d(Register rd, Register rj, int32_t si14)
void beqz(Register rj, int32_t offset)
void stx_b(Register rd, Register rj, Register rk)
void ld_hu(Register rd, Register rj, int32_t si12)
void fabs_d(FPURegister fd, FPURegister fj)
void mul_d(Register rd, Register rj, Register rk)
void mod_d(Register rd, Register rj, Register rk)
void fneg_s(FPURegister fd, FPURegister fj)
void ld_w(Register rd, Register rj, int32_t si12)
void ext_w_b(Register rd, Register rj)
void fld_d(Operand adr)
void movfrh2gr_s(Register rd, FPURegister fj)
void movfr2gr_s(Register rd, FPURegister fj)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void bcnez(CFRegister cj, int32_t si21)
void stx_h(Register rd, Register rj, Register rk)
void stptr_w(Register rd, Register rj, int32_t si14)
void ftintrp_l_d(FPURegister fd, FPURegister fj)
void srli_w(Register rd, Register rj, int32_t ui5)
void movfr2gr_d(Register rd, FPURegister fj)
void bne(Register rj, Register rd, int32_t offset)
void movcf2gr(Register rd, CFRegister cj)
void div_w(Register rd, Register rj, Register rk)
void alsl_d(Register rd, Register rj, Register rk, int32_t sa2)
void fsub_s(FPURegister fd, FPURegister fj, FPURegister fk)
void ftintrp_w_d(FPURegister fd, FPURegister fj)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void movgr2fr_d(FPURegister fd, Register rj)
bool is_trampoline_pool_blocked() const
void slti(Register rd, Register rj, int32_t si12)
void ldx_d(Register rd, Register rj, Register rk)
void movfcsr2gr(Register rd, FPUControlRegister fcsr=FCSR0)
void div_wu(Register rd, Register rj, Register rk)
void lu52i_d(Register rd, Register rj, int32_t si12)
void fsub_d(FPURegister fd, FPURegister fj, FPURegister fk)
int InstructionsGeneratedSince(Label *label)
void fmax_s(FPURegister fd, FPURegister fj, FPURegister fk)
void blt(Register rj, Register rd, int32_t offset)
void ld_wu(Register rd, Register rj, int32_t si12)
void bl(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
Simd128Register Simd128Register ra
void st_w(Register rd, Register rj, int32_t si12)
void fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk, CFRegister cd)
void movgr2fr_w(FPURegister fd, Register rj)
void revb_2w(Register rd, Register rj)
void fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk, CFRegister cd)
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data=0)
void ldx_wu(Register rd, Register rj, Register rk)
void ftintrz_l_s(FPURegister fd, FPURegister fj)
void bstrpick_d(Register rd, Register rj, int32_t msbd, int32_t lsbd)
void jirl(Register rd, Register rj, int32_t offset)
void revb_d(Register rd, Register rj)
void mul_w(Register rd, Register rj, Register rk)
void xori(Register rd, Register rj, int32_t ui12)
void mulh_wu(Register rd, Register rj, Register rk)
friend class UseScratchRegisterScope
bool MustUseReg(RelocInfo::Mode rmode)
void pcaddu18i(Register rd, int32_t si20)
void srai_d(Register rd, Register rj, int32_t ui6)
void ld_h(Register rd, Register rj, int32_t si12)
void amswap_db_d(Register rd, Register rk, Register rj)
void add_w(Register rd, Register rj, Register rk)
void dbar(int32_t hint)
void sub_w(Register rd, Register rj, Register rk)
void nor(Register rd, Register rj, Register rk)
void mod_w(Register rd, Register rj, Register rk)
void bltu(Register rj, Register rd, int32_t offset)
void ld_bu(Register rd, Register rj, int32_t si12)
void ftintrne_w_d(FPURegister fd, FPURegister fj)
void shift(Operand dst, Immediate shift_amount, int subcode, int size)
void sc(Register rd, const MemOperand &rs)
void or_(Register dst, int32_t imm32)
void fmin_d(FPURegister fd, FPURegister fj, FPURegister fk)
void sra_d(Register rd, Register rj, Register rk)
void xor_(Register dst, int32_t imm32)
void andi(Register rd, Register rj, int32_t ui12)
void st_b(Register rd, Register rj, int32_t si12)
void rotr_d(Register rd, Register rj, Register rk)
void st_h(Register rd, Register rj, int32_t si12)
void sc_d(Register rd, Register rj, int32_t si14)
void stx_d(Register rd, Register rj, Register rk)
void movgr2fcsr(Register rj, FPUControlRegister fcsr=FCSR0)
void ftintrm_l_d(FPURegister fd, FPURegister fj)
void frint_s(FPURegister fd, FPURegister fj)
void sltu(Register rd, Register rj, Register rk)
void ld_b(Register rd, Register rj, int32_t si12)
void amswap_db_w(Register rd, Register rk, Register rj)
void sc_w(Register rd, Register rj, int32_t si14)
void fstx_s(FPURegister fd, Register rj, Register rk)
void ffint_s_l(FPURegister fd, FPURegister fj)
void add_d(Register rd, Register rj, Register rk)
void addi_d(Register rd, Register rj, int32_t si12)
void andn(Register dst, Register src1, Register src2)
void fmin_s(FPURegister fd, FPURegister fj, FPURegister fk)
void fneg_d(FPURegister fd, FPURegister fj)
int32_t branch_offset_helper(Label *L, OffsetSize bits)
void ll_w(Register rd, Register rj, int32_t si14)
void fldx_d(FPURegister fd, Register rj, Register rk)
void ll_d(Register rd, Register rj, int32_t si14)
void sltui(Register rd, Register rj, int32_t si12)
void bge(Register rj, Register rd, int32_t offset)
void ctz_w(Register rd, Register rj)
void fst_s(Operand adr)
void ffint_d_l(FPURegister fd, FPURegister fj)
void ori(Register rd, Register rj, int32_t ui12)
void fst_d(Operand adr)
void mulh_w(Register rd, Register rj, Register rk)
void lu32i_d(Register rd, int32_t si20)
void ldx_b(Register rd, Register rj, Register rk)
void ftintrz_l_d(FPURegister fd, FPURegister fj)
void maskeqz(Register rd, Register rj, Register rk)
void addi_w(Register rd, Register rj, int32_t si12)
void rotr_w(Register rd, Register rj, Register rk)
void stptr_d(Register rd, Register rj, int32_t si14)
void AdjustBaseAndOffset(MemOperand *src)
void fadd_s(FPURegister fd, FPURegister fj, FPURegister fk)
void stx_w(Register rd, Register rj, Register rk)
void ftintrne_l_d(FPURegister fd, FPURegister fj)
void slt(Register rd, Register rj, Register rk)
void ldx_hu(Register rd, Register rj, Register rk)
void fadd_d(FPURegister fd, FPURegister fj, FPURegister fk)
void mulh_du(Register rd, Register rj, Register rk)
void ldx_bu(Register rd, Register rj, Register rk)
void bceqz(CFRegister cj, int32_t si21)
void clz_w(Register rd, Register rj)
void ext_w_h(Register rd, Register rj)
void movgr2frh_w(FPURegister fd, Register rj)
void fstx_d(FPURegister fd, Register rj, Register rk)
void slli_d(Register rd, Register rj, int32_t ui6)
void st_d(Register rd, Register rj, int32_t si12)
void ldx_h(Register rd, Register rj, Register rk)
void bnez(Register rj, int32_t offset)
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
void ldx_w(Register rd, Register rj, Register rk)
int SizeOfCodeGeneratedSince(Label *label)
Instruction * pc() const
void mod_wu(Register rd, Register rj, Register rk)
void fld_s(Operand adr)
void orn(const VRegister &vd, const VRegister &vn, const VRegister &vm)
void srli_d(Register rd, Register rj, int32_t ui6)
void beq(Register rj, Register rd, int32_t offset)
void div_d(Register rd, Register rj, Register rk)
void sub_d(Register rd, Register rj, Register rk)
void div_du(Register rd, Register rj, Register rk)
void pcaddi(Register rd, int32_t si20)
static constexpr Builtin RecordWrite(SaveFPRegsMode fp_mode)
V8_EXPORT_PRIVATE Handle< Code > code_handle(Builtin builtin)
Definition builtins.cc:154
static constexpr Builtin RuntimeCEntry(int result_size, bool switch_to_central_stack=false)
static constexpr Builtin EphemeronKeyBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin IndirectPointerBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin CEntry(int result_size, ArgvMode argv_mode, bool builtin_exit_frame=false, bool switch_to_central_stack=false)
static const int kMarkedForDeoptimizationBit
Definition code.h:456
static const int kInvalidContext
Definition contexts.h:578
static V8_INLINE constexpr int SlotOffset(int index)
Definition contexts.h:516
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static constexpr int kCallerSPDisplacement
static V8_EXPORT_PRIVATE ExternalReference address_of_code_pointer_table_base_address()
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr uint32_t kFlagsTieringStateIsAnyRequested
static constexpr uint32_t FlagMaskForNeedsProcessingCheckFrom(CodeKind code_kind)
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static const int kExternalPointerTableBasePointerOffset
static constexpr int BuiltinEntrySlotOffset(Builtin id)
static constexpr int real_jslimit_offset()
static IsolateGroup * current()
Builtins * builtins()
Definition isolate.h:1443
Address BuiltinEntry(Builtin builtin)
static bool IsAddressableThroughRootRegister(Isolate *isolate, const ExternalReference &reference)
V8_INLINE std::string CommentForOffHeapTrampoline(const char *prefix, Builtin builtin)
static int32_t RootRegisterOffsetForExternalReferenceTableEntry(Isolate *isolate, const ExternalReference &reference)
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
Tagged_t ReadOnlyRootPtr(RootIndex index)
void IndirectLoadConstant(Register destination, Handle< HeapObject > object)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
void IndirectLoadExternalReference(Register destination, ExternalReference reference)
void Ld_b(Register rd, const MemOperand &rj)
void LoadStackLimit(Register destination, StackLimitKind kind)
void Ftintrp_w_d(FPURegister fd, FPURegister fj)
void GetObjectType(Register function, Register map, Register type_reg)
void Call(Register target, Condition cond=al)
void CallJSFunction(Register function_object, uint16_t argument_count)
void CallDebugOnFunctionCall(Register fun, Register new_target, Register expected_parameter_count, Register actual_parameter_count)
void LiLower32BitHelper(Register rd, Operand j)
void Round_d(FPURegister fd, FPURegister fj)
void St_w(Register rd, const MemOperand &rj)
void AddOverflow_d(Register dst, Register left, const Operand &right, Register overflow)
void ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend=false)
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void MultiPopFPU(DoubleRegList regs)
void Ftintrm_l_d(FPURegister fd, FPURegister fj)
void DecompressTaggedSigned(const Register &destination, const MemOperand &field_operand)
void Drop(int count, Condition cond=al)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void MultiPushFPU(DoubleRegList regs)
void Round_s(FPURegister fd, FPURegister fj)
void Neg(const Register &rd, const Operand &operand)
void Floor_d(FPURegister fd, FPURegister fj)
void Fst_d(FPURegister fj, const MemOperand &dst)
void mov(Register rd, Register rj)
void SmiUntag(Register reg, SBit s=LeaveCC)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void Neg_s(FPURegister fd, FPURegister fj)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTagRange tag_range, Register isolate_root=Register::no_reg())
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2, CFRegister cd=FCC0)
void Sc_d(Register rd, const MemOperand &rj)
void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void TestCodeIsMarkedForDeoptimizationAndJump(Register code_data_container, Register scratch, Condition cond, Label *target)
void PushStandardFrame(Register function_reg)
void BranchFalseF(Label *target, CFRegister cc=FCC0)
void LoadZeroIfNotFPUCondition(Register dest, CFRegister=FCC0)
void Move(Register dst, Tagged< Smi > smi)
void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd, bool f32=true)
void SmiTst(Register value)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void li_optimized(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void AtomicDecompressTaggedSigned(const Register &destination, const Register &base, const Register &index, const Register &temp)
void Clz_d(Register rd, Register rj)
void Bstrpick_w(Register rk, Register rj, uint16_t msbw, uint16_t lsbw)
void StoreReturnAddressAndCall(Register target)
void Ftintrp_l_d(FPURegister fd, FPURegister fj)
void LoadZeroIfConditionZero(Register dest, Register condition)
void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
int32_t GetOffset(Label *L, OffsetSize bits)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void MulOverflow_w(Register dst, Register left, const Operand &right, Register overflow)
void Movz(Register rd, Register rj, Register rk)
void LoadRootRelative(Register destination, int32_t offset) final
void JumpIfSmi(Register value, Label *smi_label)
void Ld_d(Register rd, const MemOperand &rj)
void Ftintrz_ul_s(FPURegister fd, FPURegister fj, FPURegister scratch, Register result=no_reg)
void JumpIfObjectType(Register object, Register map, Register type_reg, InstanceType type, Label *if_cond_pass, Condition cond=eq)
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
bool BranchShortOrFallback(Label *L, Condition cond, Register rj, const Operand &rk, bool need_link)
void CallCodeObject(Register code_object)
void LoadSandboxedPointerField(Register destination, MemOperand field_operand)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void Alsl_d(Register rd, Register rj, Register rk, uint8_t sa)
int CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void Ll_d(Register rd, const MemOperand &rj)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void Ld_bu(Register rd, const MemOperand &rj)
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
void BranchFalseShortF(Label *target, CFRegister cc=FCC0)
void CompareTaggedRootAndBranch(const Register &with, RootIndex index, Condition cc, Label *target)
void Alsl_w(Register rd, Register rj, Register rk, uint8_t sa)
void AtomicDecompressTagged(const Register &destination, const Register &base, const Register &index, const Register &temp)
void Ftintrz_ul_d(FPURegister fd, FPURegister fj, FPURegister scratch, Register result=no_reg)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Movn(Register rd, Register rj, Register rk)
void BranchTrueF(Label *target, CFRegister cc=FCC0)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Fld_d(FPURegister fd, const MemOperand &src)
void Ll_w(Register rd, const MemOperand &rj)
void DecodeSandboxedPointer(Register value)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void Move_d(FPURegister dst, FPURegister src)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Register scratch, Label *target, Label::Distance distance=Label::kFar, Condition condition=Condition::kUnsignedGreaterThanEqual)
void CompareTaggedAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode)
void Ftintrne_l_d(FPURegister fd, FPURegister fj)
void SmiTag(Register reg, SBit s=LeaveCC)
void SbxCheck(Condition cc, AbortReason reason)
void Ld_h(Register rd, const MemOperand &rj)
void Ctz_w(Register rd, Register rj)
void Ftintrz_l_ud(FPURegister fd, FPURegister fj, FPURegister scratch)
void PushArray(Register array, Register size, Register scratch, PushArrayOrder order=PushArrayOrder::kNormal)
void Ffint_s_uw(FPURegister fd, FPURegister fj)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void Clz_w(Register rd, Register rj)
void CompareWord(Condition cond, Register dst, Register lhs, const Operand &rhs)
void Fst_s(FPURegister fj, const MemOperand &dst)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void PushCommonFrame(Register marker_reg=no_reg)
void LoadIndirectPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void Trunc_d(FPURegister fd, FPURegister fj)
void CallIndirectPointerBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode, IndirectPointerTag tag)
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode)
int LeaveFrame(StackFrame::Type type)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void Ftintrz_w_d(FPURegister fd, FPURegister fj)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
static int InstrCountForLi64Bit(int64_t value)
void Ffint_d_ul(FPURegister fd, FPURegister fj)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
void Ftintrm_w_d(FPURegister fd, FPURegister fj)
void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void St_d(Register rd, const MemOperand &rj)
void Ceil_s(FPURegister fd, FPURegister fj)
void InsertBits(Register dest, Register source, Register pos, int size)
void StoreCodePointerField(Register value, MemOperand dst_field_operand)
void Ffint_d_uw(FPURegister fd, FPURegister fj)
void ByteSwap(Register dest, Register src, int operand_size)
void Trunc_s(FPURegister fd, FPURegister fj)
void Sc_w(Register rd, const MemOperand &rj)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void FmoveLow(Register dst_low, FPURegister src)
void Jump(Register target, Condition cond=al)
void LoadRoot(Register destination, RootIndex index) final
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void DecompressProtected(const Register &destination, const MemOperand &field_operand)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void Ftintrz_l_d(FPURegister fd, FPURegister fj)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void Ftintrne_w_d(FPURegister fd, FPURegister fj)
void LoadZeroIfConditionNotZero(Register dest, Register condition)
void Bstrpick_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw)
void Orn(const Register &rd, const Register &rn, const Operand &operand)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void LoadFromConstantsTable(Register destination, int constant_index) final
void LoadProtectedPointerField(Register destination, MemOperand field_operand)
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void PrepareCEntryFunction(const ExternalReference &ref)
void MaybeSaveRegisters(RegList registers)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void LoadTaggedRoot(Register destination, RootIndex index)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void StoreIndirectPointerField(Register value, MemOperand dst_field_operand)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
Register GetRkAsRegisterHelper(const Operand &rk, Register scratch)
void Neg_d(FPURegister fd, FPURegister fk)
void MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
void LoadCompressedMap(Register dst, Register object)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void CallBuiltin(Builtin builtin, Condition cond=al)
void Ceil_d(FPURegister fd, FPURegister fj)
void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode)
void LoadCodePointerField(Register destination, MemOperand field_operand)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void Xor(Register dst, Register src)
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void Check(Condition cond, AbortReason reason)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void Or(Register dst, Register src)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void Ffint_s_ul(FPURegister fd, FPURegister fj)
void StoreTrustedPointerField(Register value, MemOperand dst_field_operand)
static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode, uint8_t *pc)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void StoreSandboxedPointerField(Register value, MemOperand dst_field_operand)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void MulOverflow_d(Register dst, Register left, const Operand &right, Register overflow)
static bool IsNearCallOffset(int64_t offset)
void LoadLabelRelative(Register dst, Label *target)
void Ctz_d(Register rd, Register rj)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void CompareRootAndBranch(const Register &obj, RootIndex index, Condition cc, Label *target, ComparisonMode mode=ComparisonMode::kDefault)
void Popcnt_d(Register rd, Register rj)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Branch(Label *label, bool need_link=false)
void GetInstanceTypeRange(Register map, Register type_reg, InstanceType lower_limit, Register range)
void Ld_hu(Register rd, const MemOperand &rj)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadIsolateField(Register dst, IsolateFieldId id)
void Ftintrz_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch)
void LoadZeroIfFPUCondition(Register dest, CFRegister=FCC0)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2, CFRegister cd=FCC0)
void Move_s(FPURegister dst, FPURegister src)
void BranchTrueShortF(Label *target, CFRegister cc=FCC0)
void MaybeRestoreRegisters(RegList registers)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Popcnt_w(Register rd, Register rj)
void Fld_s(FPURegister fd, const MemOperand &src)
void St_b(Register rd, const MemOperand &rj)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void SmiUntagField(Register dst, const MemOperand &src)
void StubPrologue(StackFrame::Type type)
void St_h(Register rd, const MemOperand &rj)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
void StoreRootRelative(int32_t offset, Register value) final
void LoadTaggedSignedField(const Register &destination, const MemOperand &field_operand)
void LoadMap(Register destination, Register object)
void AtomicStoreTaggedField(const Register &value, const Register &dst_base, const Register &dst_index, const Register &temp)
void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void AssertStackIsAligned() NOOP_UNLESS_DEBUG_CODE
void TailCallRuntime(Runtime::FunctionId fid)
void SubOverflow_d(Register dst, Register left, const Operand &right, Register overflow)
void Swap(Register srcdst0, Register srcdst1)
void Floor_s(FPURegister fd, FPURegister fj)
void LoadNativeContextSlot(Register dst, int index)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd, bool f32=true)
void Ftintrz_uw_d(FPURegister fd, FPURegister fj, FPURegister scratch)
void Ld_wu(Register rd, const MemOperand &rj)
void Ld_w(Register rd, const MemOperand &rj)
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr intptr_t FlagsOffset()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static constexpr intptr_t GetAlignmentMaskForAssembler()
static constexpr FPURegister from_code(int8_t code)
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr bool IsWasmCanonicalSigId(Mode mode)
Definition reloc-info.h:217
static constexpr bool IsCompressedEmbeddedObject(Mode mode)
Definition reloc-info.h:206
static constexpr bool IsCodeTarget(Mode mode)
Definition reloc-info.h:196
static constexpr bool IsJSDispatchHandle(Mode mode)
Definition reloc-info.h:254
static constexpr bool IsWasmCodePointerTableEntry(Mode mode)
Definition reloc-info.h:220
static constexpr bool IsFullEmbeddedObject(Mode mode)
Definition reloc-info.h:203
static constexpr bool IsNoInfo(Mode mode)
Definition reloc-info.h:257
static constexpr bool IsReadOnly(RootIndex root_index)
Definition roots.h:623
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
Definition runtime.cc:350
static SlotDescriptor ForCodePointerSlot()
Definition assembler.h:311
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > zero()
Definition smi.h:99
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static bool IsJavaScript(Type t)
Definition frames.h:284
static constexpr int kFixedFrameSizeFromFp
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode)
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define V8_ENABLE_LEAPTIERING_BOOL
Definition globals.h:151
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
bool is_empty
Definition sweeper.cc:229
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
int32_t offset
TNode< Object > target
TNode< Object > receiver
ZoneVector< RpoNumber > & result
LiftoffRegister reg
uint32_t const mask
#define SmiWordOffset(offset)
#define BRANCH_ARGS_CHECK(cond, rs, rt)
SmiCheck
ComparisonMode
ArgumentAdaptionMode
InvokeType
SetIsolateDataSlots
JumpMode
RegListBase< RegisterT > registers
InstructionOperand destination
int int32_t
Definition unicode.cc:40
signed short int16_t
Definition unicode.cc:38
constexpr unsigned CountTrailingZeros32(uint32_t value)
Definition bits.h:161
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr unsigned CountLeadingZeros32(uint32_t value)
Definition bits.h:122
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
V8_INLINE constexpr std::optional< RootIndex > UniqueMapOfInstanceType(InstanceType type)
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr Register kRootRegister
constexpr int kCodePointerTableEntrySizeLog2
constexpr uint64_t kExternalPointerTagShift
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kFPRegisterPassedArguments
DwVfpRegister DoubleRegister
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
constexpr DoubleRegister kScratchDoubleReg
constexpr uint64_t kExternalPointerPayloadMask
const int kSmiTagSize
Definition v8-internal.h:87
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr ExternalPointerTagRange kAnyExternalPointerTagRange(kFirstExternalPointerTag, kLastExternalPointerTag)
constexpr bool CodeKindCanTierUp(CodeKind kind)
Definition code-kind.h:95
constexpr Register kJavaScriptCallTargetRegister
constexpr int kCodePointerTableEntryCodeObjectOffset
static int InstrCountForLiLower32Bit(int64_t value)
constexpr int kTrustedPointerTableEntrySizeLog2
const Address kWeakHeapObjectMask
Definition globals.h:967
constexpr int B0
constexpr Register kJavaScriptCallArgCountRegister
Address Tagged_t
Definition globals.h:547
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
base::StrongAlias< JSDispatchHandleAliasTag, uint32_t > JSDispatchHandle
Definition globals.h:557
TagRange< ExternalPointerTag > ExternalPointerTagRange
constexpr Register kScratchReg
static const int kRegisterPassedArguments
Flag flags[]
Definition flags.cc:3797
constexpr int L
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
const char * GetAbortReason(AbortReason reason)
static constexpr int kMaxCParameters
constexpr uint32_t kDebugZapValue
Definition globals.h:1015
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
const DoubleRegList kCallerSavedFPU
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
constexpr uint32_t kTrustedPointerHandleShift
constexpr uint32_t kCodePointerHandleShift
const int kHeapObjectTag
Definition v8-internal.h:72
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
const RegList kJSCallerSaved
Definition reglist-arm.h:23
Register ToRegister(int num)
constexpr bool SmiValuesAre32Bits()
constexpr Register kJavaScriptCallCodeStartRegister
constexpr int kJSDispatchTableEntrySizeLog2
Definition globals.h:562
constexpr Register kPtrComprCageBaseRegister
const intptr_t kSmiTagMask
Definition v8-internal.h:88
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
constexpr uint8_t kInstrSize
const int kSmiTag
Definition v8-internal.h:86
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr Register cp
constexpr uint64_t kTrustedPointerTableMarkBit
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallDispatchHandleRegister
constexpr uint32_t kCodePointerHandleMarker
const uint32_t kClearedWeakHeapObjectLower32
Definition globals.h:981
constexpr Register kJavaScriptCallNewTargetRegister
constexpr uint64_t kExternalPointerShiftedTagMask
static V8_INLINE constexpr bool ExternalPointerCanBeEmpty(ExternalPointerTagRange tag_range)
constexpr int kNumRegisters
static bool IsZero(const Operand &rt)
Local< T > Handle
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001