v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-riscv.cc
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <limits.h> // For LONG_MIN, LONG_MAX.
6
7#include <optional>
8
9#include "src/base/bits.h"
19#include "src/debug/debug.h"
26#include "src/runtime/runtime.h"
29
30// Satisfy cpplint check, but don't include platform-specific header. It is
31// included recursively via macro-assembler.h.
32#if 0
34#endif
35
36namespace v8 {
37namespace internal {
38
39static inline bool IsZero(const Operand& rt) {
40 if (rt.is_reg()) {
41 return rt.rm() == zero_reg;
42 } else {
43 return rt.immediate() == 0;
44 }
45}
46
48 Register exclusion1,
49 Register exclusion2,
50 Register exclusion3) const {
51 int bytes = 0;
52
53 RegList exclusions = {exclusion1, exclusion2, exclusion3};
54 RegList list = kJSCallerSaved - exclusions;
55 bytes += list.Count() * kSystemPointerSize;
56
57 if (fp_mode == SaveFPRegsMode::kSave) {
58 bytes += kCallerSavedFPU.Count() * kDoubleSize;
59 }
60
61 return bytes;
62}
63
65 Register exclusion2, Register exclusion3) {
66 int bytes = 0;
67
68 RegList exclusions = {exclusion1, exclusion2, exclusion3};
69 RegList list = kJSCallerSaved - exclusions;
70 MultiPush(list);
71 bytes += list.Count() * kSystemPointerSize;
72
73 if (fp_mode == SaveFPRegsMode::kSave) {
75 bytes += kCallerSavedFPU.Count() * kDoubleSize;
76 }
77
78 return bytes;
79}
80
82 Register exclusion2, Register exclusion3) {
83 int bytes = 0;
84 if (fp_mode == SaveFPRegsMode::kSave) {
86 bytes += kCallerSavedFPU.Count() * kDoubleSize;
87 }
88
89 RegList exclusions = {exclusion1, exclusion2, exclusion3};
90 RegList list = kJSCallerSaved - exclusions;
91 MultiPop(list);
92 bytes += list.Count() * kSystemPointerSize;
93
94 return bytes;
95}
96
97#define __ ACCESS_MASM(masm)
98namespace {
99#ifndef V8_ENABLE_LEAPTIERING
100// Only used when leaptiering is disabled.
101static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
102 Register optimized_code_entry,
103 Register scratch1, Register scratch2) {
104 // ----------- S t a t e -------------
105 // -- a0 : actual argument count
106 // -- a3 : new target (preserved for callee if needed, and caller)
107 // -- a1 : target function (preserved for callee if needed, and caller)
108 // -----------------------------------
109 ASM_CODE_COMMENT(masm);
110 DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
111
112 Label heal_optimized_code_slot;
113
114 // If the optimized code is cleared, go to runtime to update the optimization
115 // marker field.
116 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
117 &heal_optimized_code_slot);
118
119 // The entry references a CodeWrapper object. Unwrap it now.
120 __ LoadCodePointerField(
121 optimized_code_entry,
122 FieldMemOperand(optimized_code_entry, CodeWrapper::kCodeOffset));
123
124 // Check if the optimized code is marked for deopt. If it is, call the
125 // runtime to clear it.
126 __ JumpIfCodeIsMarkedForDeoptimization(optimized_code_entry, scratch1,
127 &heal_optimized_code_slot);
128
129 // Optimized code is good, get it into the closure and link the closure into
130 // the optimized functions list, then tail call the optimized code.
131 // The feedback vector is no longer used, so reuse it as a scratch
132 // register.
133 __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, a1);
134
135 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
136 __ LoadCodeInstructionStart(a2, optimized_code_entry, kJSEntrypointTag);
137 __ Jump(a2);
138
139 // Optimized code slot contains deoptimized code or code is cleared and
140 // optimized code marker isn't updated. Evict the code, update the marker
141 // and re-enter the closure's code.
142 __ bind(&heal_optimized_code_slot);
143 __ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
144}
145#endif // V8_ENABLE_LEAPTIERING
146
147} // namespace
148#ifdef V8_ENABLE_DEBUG_CODE
149void MacroAssembler::AssertFeedbackCell(Register object, Register scratch) {
150 if (v8_flags.debug_code) {
151 GetObjectType(object, scratch, scratch);
152 Assert(eq, AbortReason::kExpectedFeedbackCell, scratch,
153 Operand(FEEDBACK_CELL_TYPE));
154 }
155}
156void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
157 if (v8_flags.debug_code) {
158 GetObjectType(object, scratch, scratch);
159 Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
160 Operand(FEEDBACK_VECTOR_TYPE));
161 }
162}
164 if (v8_flags.debug_code) Abort(reason);
165}
166#endif // V8_ENABLE_DEBUG_CODE
167
169 Register optimized_code, Register closure) {
170 ASM_CODE_COMMENT(this);
171 DCHECK(!AreAliased(optimized_code, closure));
172#ifdef V8_ENABLE_LEAPTIERING
173 UNREACHABLE();
174#else
175 StoreCodePointerField(optimized_code,
176 FieldMemOperand(closure, JSFunction::kCodeOffset));
177 RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
180#endif // V8_ENABLE_LEAPTIERING
181}
182
184 Runtime::FunctionId function_id) {
185 // ----------- S t a t e -------------
186 // -- a0 : actual argument count
187 // -- a1 : target function (preserved for callee)
188 // -- a3 : new target (preserved for callee)
189 // -----------------------------------
190 {
191 FrameScope scope(this, StackFrame::INTERNAL);
192 // Push a copy of the target function, the new target and the actual
193 // argument count.
194 // Push function as parameter to the runtime call.
198
199 CallRuntime(function_id, 1);
200 // Use the return value before restoring a0
202 // Restore target function, new target and actual argument count.
206 }
207
208 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
209 Jump(a2);
210}
211
212#ifndef V8_ENABLE_LEAPTIERING
213// Read off the flags in the feedback vector and check if there
214// is optimized code or a tiering state that needs to be processed.
216 Register flags, Register feedback_vector, CodeKind current_code_kind,
217 Label* flags_need_processing) {
218 ASM_CODE_COMMENT(this);
219 DCHECK(!AreAliased(flags, feedback_vector));
220 DCHECK(CodeKindCanTierUp(current_code_kind));
221 UseScratchRegisterScope temps(this);
222 Register scratch = temps.Acquire();
223 Lhu(flags, FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
224 uint32_t flag_mask =
226 And(scratch, flags, Operand(flag_mask));
227 Branch(flags_need_processing, ne, scratch, Operand(zero_reg));
228}
229
231 Register flags, Register feedback_vector) {
232 ASM_CODE_COMMENT(this);
233 DCHECK(!AreAliased(flags, feedback_vector));
234 Label maybe_has_optimized_code, maybe_needs_logging;
235 // Check if optimized code is available.
236 {
237 UseScratchRegisterScope temps(this);
238 temps.Include(t0, t1);
239 Register scratch = temps.Acquire();
240 And(scratch, flags,
242 Branch(&maybe_needs_logging, eq, scratch, Operand(zero_reg),
244 }
245 GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
246
247 bind(&maybe_needs_logging);
248 {
249 UseScratchRegisterScope temps(this);
250 temps.Include(t0, t1);
251 Register scratch = temps.Acquire();
252 And(scratch, flags, Operand(FeedbackVector::LogNextExecutionBit::kMask));
253 Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg),
255 }
256
257 GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution);
258
259 bind(&maybe_has_optimized_code);
260 Register optimized_code_entry = flags;
261 LoadTaggedField(optimized_code_entry,
262 FieldMemOperand(feedback_vector,
263 FeedbackVector::kMaybeOptimizedCodeOffset));
264 TailCallOptimizedCodeSlot(this, optimized_code_entry, temps.Acquire(),
265 temps.Acquire());
266}
267#endif // V8_ENABLE_LEAPTIERING
268
272
274#if V8_TARGET_ARCH_RISCV64
276 is_int12(ReadOnlyRootPtr(index))) {
278 return;
279 }
280#endif
281 // Many roots have addresses that are too large to fit into addition immediate
282 // operands. Evidence suggests that the extra instruction for decompression
283 // costs us more than the load.
284 LoadWord(destination,
286}
287
290 is_int12(ReadOnlyRootPtr(index))) {
291 li(destination, (int32_t)ReadOnlyRootPtr(index));
292 return;
293 }
294 LoadWord(destination,
296}
298 RootIndex index) {
299#ifdef V8_TARGET_ARCH_RISCV64
302#else
303 LoadWord(destination,
305#endif
306}
307
309 if (marker_reg.is_valid()) {
310 Push(ra, fp, marker_reg);
311 AddWord(fp, sp, Operand(kSystemPointerSize));
312 } else {
313 Push(ra, fp);
314 Mv(fp, sp);
315 }
316}
317
320 if (function_reg.is_valid()) {
321 Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
323 } else {
326 }
327 AddWord(fp, sp, Operand(offset));
328}
329
331 // The registers are pushed starting with the highest encoding,
332 // which means that lowest encodings are closest to the stack pointer.
333 return kSafepointRegisterStackIndexMap[reg_code];
334}
335
336// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
337// The register 'object' contains a heap object pointer. The heap object
338// tag is shifted away.
340 Register value, RAStatus ra_status,
341 SaveFPRegsMode save_fp,
342 SmiCheck smi_check,
343 ReadOnlyCheck ro_check,
344 SlotDescriptor slot) {
345 DCHECK(!AreAliased(object, value));
346 // First, check if a write barrier is even needed. The tests below
347 // catch stores of smisand read-only objects, as well as stores into the
348 // young generation.
349 Label done;
350
351 // #if V8_STATIC_ROOTS_BOOL
352 // if (ro_check == ReadOnlyCheck::kInline) {
353 // // Quick check for Read-only and small Smi values.
354 // static_assert(StaticReadOnlyRoot::kLastAllocatedRoot <
355 // kRegularPageSize); JumpIfUnsignedLessThan(value, kRegularPageSize,
356 // &done);
357 // }
358 // #endif // V8_STATIC_ROOTS_BOOL
359
360 // Skip the barrier if writing a smi.
361 if (smi_check == SmiCheck::kInline) {
362 JumpIfSmi(value, &done);
363 }
364
365 // Although the object register is tagged, the offset is relative to the start
366 // of the object, so offset must be a multiple of kTaggedSize.
368
369 if (v8_flags.slow_debug_code) {
370 Label ok;
371 UseScratchRegisterScope temps(this);
372 Register scratch = temps.Acquire();
373 DCHECK(!AreAliased(object, value, scratch));
374 AddWord(scratch, object, offset - kHeapObjectTag);
375 And(scratch, scratch, Operand(kTaggedSize - 1));
376 BranchShort(&ok, eq, scratch, Operand(zero_reg));
377 Abort(AbortReason::kUnalignedCellInWriteBarrier);
378 bind(&ok);
379 }
380
381 RecordWrite(object, Operand(offset - kHeapObjectTag), value, ra_status,
382 save_fp, SmiCheck::kOmit, ReadOnlyCheck::kOmit, slot);
383
384 bind(&done);
385}
386
388 MemOperand field_operand,
389 IndirectPointerTag tag) {
390#ifdef V8_ENABLE_SANDBOX
391 LoadIndirectPointerField(destination, field_operand, tag);
392#else
393 LoadTaggedField(destination, field_operand);
394#endif
395}
396
398 MemOperand dst_field_operand) {
399#ifdef V8_ENABLE_SANDBOX
400 StoreIndirectPointerField(value, dst_field_operand);
401#else
402 StoreTaggedField(value, dst_field_operand);
403#endif
404}
405
406#ifdef V8_ENABLE_SANDBOX
407void MacroAssembler::ResolveIndirectPointerHandle(Register destination,
409 IndirectPointerTag tag) {
410 ASM_CODE_COMMENT(this);
411 // The tag implies which pointer table to use.
412 if (tag == kUnknownIndirectPointerTag) {
413 // In this case we have to rely on the handle marking to determine which
414 // pointer table to use.
415 Label is_trusted_pointer_handle, done;
418 Branch(&is_trusted_pointer_handle, eq, destination, Operand(zero_reg));
419 ResolveCodePointerHandle(destination, handle);
420 Branch(&done);
421 bind(&is_trusted_pointer_handle);
422 ResolveTrustedPointerHandle(destination, handle,
424 bind(&done);
425 } else if (tag == kCodeIndirectPointerTag) {
426 ResolveCodePointerHandle(destination, handle);
427 } else {
428 ResolveTrustedPointerHandle(destination, handle, tag);
429 }
430}
431
432void MacroAssembler::ResolveTrustedPointerHandle(Register destination,
433 Register handle,
434 IndirectPointerTag tag) {
435 ASM_CODE_COMMENT(this);
436 DCHECK_NE(tag, kCodeIndirectPointerTag);
438
439 Register table = destination;
441 LoadWord(table, MemOperand{kRootRegister,
442 IsolateData::trusted_pointer_table_offset()});
446 LoadWord(destination, MemOperand(destination, 0));
447 // The LSB is used as marking bit by the trusted pointer table, so here we
448 // have to set it using a bitwise OR as it may or may not be set.
449 // Untag the pointer and remove the marking bit in one operation.
450 Register tag_reg = handle;
451 li(tag_reg, Operand(~(tag | kTrustedPointerTableMarkBit)));
452 and_(destination, destination, tag_reg);
453}
454
455void MacroAssembler::ResolveCodePointerHandle(Register destination,
456 Register handle) {
457 ASM_CODE_COMMENT(this);
459
460 Register table = destination;
461 LoadCodePointerTableBase(table);
464 LoadWord(destination,
466 // The LSB is used as marking bit by the code pointer table, so here we have
467 // to set it using a bitwise OR as it may or may not be set.
469}
470
471void MacroAssembler::LoadCodeEntrypointViaCodePointer(Register destination,
472 MemOperand field_operand,
473 CodeEntrypointTag tag) {
475 ASM_CODE_COMMENT(this);
476 UseScratchRegisterScope temps(this);
477 Register scratch = temps.Acquire();
478 LoadCodePointerTableBase(scratch);
479 Lwu(destination, field_operand);
482 AddWord(scratch, scratch, destination);
483 LoadWord(destination, MemOperand(scratch, 0));
484 if (tag != 0) {
485 li(scratch, Operand(tag));
486 xor_(destination, destination, scratch);
487 }
488}
489
490void MacroAssembler::LoadCodePointerTableBase(Register destination) {
491#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
492 if (!options().isolate_independent_code && isolate()) {
493 // Embed the code pointer table address into the code.
495 ExternalReference::code_pointer_table_base_address(isolate()));
496 } else {
497 // Force indirect load via root register as a workaround for
498 // isolate-independent code (for example, for Wasm).
499 LoadWord(
503 destination));
504 }
505#else
506 // Embed the code pointer table address into the code.
507 li(destination, ExternalReference::global_code_pointer_table_base_address());
508#endif // V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
509}
510#endif // V8_ENABLE_SANDBOX
511
513 MemOperand field_operand,
515 Register isolate_root) {
516 DCHECK(!AreAliased(destination, isolate_root));
517 ASM_CODE_COMMENT(this);
518#ifdef V8_ENABLE_SANDBOX
521 UseScratchRegisterScope temps(this);
522 Register external_table = temps.Acquire();
523 if (isolate_root == no_reg) {
525 isolate_root = kRootRegister;
526 }
527 LoadWord(external_table,
528 MemOperand(isolate_root,
529 IsolateData::external_pointer_table_offset() +
531 Lwu(destination, field_operand);
532 srli(destination, destination, kExternalPointerIndexShift);
533 slli(destination, destination, kExternalPointerTableEntrySizeLog2);
534 AddWord(external_table, external_table, destination);
535 LoadWord(destination, MemOperand(external_table, 0));
536 temps.Include(external_table);
537 external_table = no_reg;
539#else
540 LoadWord(destination, field_operand);
541#endif // V8_ENABLE_SANDBOX
542}
543
544#ifdef V8_TARGET_ARCH_RISCV64
546 MemOperand field_operand,
547 IndirectPointerTag tag) {
548#ifdef V8_ENABLE_SANDBOX
549 ASM_CODE_COMMENT(this);
550 UseScratchRegisterScope temps(this);
551 Register handle = t6;
553 Lwu(handle, field_operand);
554
555 ResolveIndirectPointerHandle(destination, handle, tag);
556#else
557 UNREACHABLE();
558#endif // V8_ENABLE_SANDBOX
559}
560
562 MemOperand dst_field_operand,
563 Trapper&& trapper) {
564#ifdef V8_ENABLE_SANDBOX
565 UseScratchRegisterScope temps(this);
566 Register scratch = temps.Acquire();
567 Lw(scratch,
568 FieldMemOperand(value, ExposedTrustedObject::kSelfIndirectPointerOffset));
569 Sw(scratch, dst_field_operand, std::forward<Trapper>(trapper));
570#else
571 UNREACHABLE();
572#endif // V8_ENABLE_SANDBOX
573}
574#endif // V8_TARGET_ARCH_RISCV64
575
580
585
601
620
623 SaveFPRegsMode fp_mode,
624 StubCallMode mode) {
625 ASM_CODE_COMMENT(this);
628
630 Register slot_address_parameter =
632
633 MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
634
635 CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode);
636
638}
639
641 SaveFPRegsMode fp_mode,
642 StubCallMode mode) {
643 // Use CallRecordWriteStubSaveRegisters if the object and slot registers
644 // need to be caller saved.
647 if (mode == StubCallMode::kCallWasmRuntimeStub) {
648 auto wasm_target =
649 static_cast<Address>(wasm::WasmCode::GetRecordWriteBuiltin(fp_mode));
650 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
651 } else {
653 }
654}
655
657 Register object, Operand offset) {
658 ASM_CODE_COMMENT(this);
659 DCHECK_NE(dst_object, dst_slot);
660 // If `offset` is a register, it cannot overlap with `object`.
661 DCHECK_IMPLIES(!offset.IsImmediate(), offset.rm() != object);
662
663 // If the slot register does not overlap with the object register, we can
664 // overwrite it.
665 if (dst_slot != object) {
666 AddWord(dst_slot, object, offset);
667 mv(dst_object, object);
668 return;
669 }
670
671 DCHECK_EQ(dst_slot, object);
672
673 // If the destination object register does not overlap with the offset
674 // register, we can overwrite it.
675 if (offset.IsImmediate() || (offset.rm() != dst_object)) {
676 mv(dst_object, dst_slot);
677 AddWord(dst_slot, dst_slot, offset);
678 return;
679 }
680
681 DCHECK_EQ(dst_object, offset.rm());
682
683 // We only have `dst_slot` and `dst_object` left as distinct registers so we
684 // have to swap them. We write this as a add+sub sequence to avoid using a
685 // scratch register.
686 AddWord(dst_slot, dst_slot, dst_object);
687 SubWord(dst_object, dst_slot, dst_object);
688}
689
690// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
691// The register 'object' contains a heap object pointer. The heap object
692// tag is shifted away.
694 Register value, RAStatus ra_status,
695 SaveFPRegsMode fp_mode, SmiCheck smi_check,
696 ReadOnlyCheck ro_check, SlotDescriptor slot) {
697 DCHECK(!AreAliased(object, value));
698
699 if (v8_flags.slow_debug_code) {
700 UseScratchRegisterScope temps(this);
701 Register temp = temps.Acquire();
702 DCHECK(!AreAliased(object, value, temp));
703 AddWord(temp, object, offset);
704#ifdef V8_TARGET_ARCH_RISCV64
705 if (slot.contains_indirect_pointer()) {
707 slot.indirect_pointer_tag());
708 } else {
710 LoadTaggedField(temp, MemOperand(temp, 0));
711 }
712#else
713 LoadTaggedField(temp, MemOperand(temp));
714#endif
715 Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, temp,
716 Operand(value));
717 }
718
719 if (v8_flags.disable_write_barriers) {
720 return;
721 }
722
723 // First, check if a write barrier is even needed. The tests below
724 // catch stores of smisand read-only objects, as well as stores into the
725 // young generation.
726 Label done;
727 // #if V8_STATIC_ROOTS_BOOL
728 // if (ro_check == ReadOnlyCheck::kInline) {
729 // // Quick check for Read-only and small Smi values.
730 // static_assert(StaticReadOnlyRoot::kLastAllocatedRoot <
731 // kRegularPageSize); JumpIfUnsignedLessThan(value, kRegularPageSize,
732 // &done);
733 // }
734 // #endif // V8_STATIC_ROOTS_BOOL
735
736 if (smi_check == SmiCheck::kInline) {
737 DCHECK_EQ(0, kSmiTag);
738 JumpIfSmi(value, &done);
739 }
740
742 eq, // In RISC-V, it uses cc for a comparison with 0, so if
743 // no bits are set, and cc is eq, it will branch to done
744 &done);
745
747 eq, // In RISC-V, it uses cc for a comparison with 0, so if
748 // no bits are set, and cc is eq, it will branch to done
749 &done);
750 // Record the actual write.
751 if (ra_status == kRAHasNotBeenSaved) {
752 push(ra);
753 }
755 DCHECK(!AreAliased(object, slot_address, value));
756 // TODO(cbruni): Turn offset into int.
757 if (slot.contains_direct_pointer()) {
758 DCHECK(offset.IsImmediate());
759 AddWord(slot_address, object, offset);
760 CallRecordWriteStub(object, slot_address, fp_mode,
762 } else {
764 CallIndirectPointerBarrier(object, offset, fp_mode,
765 slot.indirect_pointer_tag());
766 }
767 if (ra_status == kRAHasNotBeenSaved) {
768 pop(ra);
769 }
770 if (v8_flags.slow_debug_code) li(slot_address, Operand(kZapValue));
771
772 bind(&done);
773}
774
775// ---------------------------------------------------------------------------
776// Instruction macros.
777#if V8_TARGET_ARCH_RISCV64
779 ASM_CODE_COMMENT(this);
780#ifdef V8_ENABLE_SANDBOX
781 srli(value, value, kSandboxedPointerShift);
782 AddWord(value, value, kPtrComprCageBaseRegister);
783#else
784 UNREACHABLE();
785#endif
786}
787
789 const MemOperand& field_operand,
790 Trapper&& trapper) {
791#ifdef V8_ENABLE_SANDBOX
792 ASM_CODE_COMMENT(this);
793 LoadWord(destination, field_operand, std::forward<Trapper>(trapper));
795#else
796 UNREACHABLE();
797#endif
798}
799
801 Register value, const MemOperand& dst_field_operand, Trapper&& trapper) {
802#ifdef V8_ENABLE_SANDBOX
803 ASM_CODE_COMMENT(this);
804 UseScratchRegisterScope temps(this);
805 Register scratch = temps.Acquire();
806 SubWord(scratch, value, kPtrComprCageBaseRegister);
807 slli(scratch, scratch, kSandboxedPointerShift);
808 StoreWord(scratch, dst_field_operand, std::forward<Trapper>(trapper));
809#else
810 UNREACHABLE();
811#endif
812}
813
814void MacroAssembler::Add32(Register rd, Register rs, const Operand& rt) {
815 if (rt.is_reg()) {
816 if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
817 ((rd.code() & 0b11000) == 0b01000) &&
818 ((rt.rm().code() & 0b11000) == 0b01000)) {
819 c_addw(rd, rt.rm());
820 } else {
821 addw(rd, rs, rt.rm());
822 }
823 } else {
824 if (v8_flags.riscv_c_extension && is_int6(rt.immediate()) &&
825 (rd.code() == rs.code()) && (rd != zero_reg) &&
826 !MustUseReg(rt.rmode())) {
827 c_addiw(rd, static_cast<int8_t>(rt.immediate()));
828 } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
829 addiw(rd, rs, static_cast<int32_t>(rt.immediate()));
830 } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
831 (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
832 addiw(rd, rs, rt.immediate() / 2);
833 addiw(rd, rd, rt.immediate() - (rt.immediate() / 2));
834 } else {
835 // li handles the relocation.
836 UseScratchRegisterScope temps(this);
837 Register scratch = temps.Acquire();
838 Li(scratch, rt.immediate());
839 addw(rd, rs, scratch);
840 }
841 }
842}
843
844void MacroAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
845 if (rt.is_reg()) {
846 if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
847 ((rd.code() & 0b11000) == 0b01000) &&
848 ((rt.rm().code() & 0b11000) == 0b01000)) {
849 c_subw(rd, rt.rm());
850 } else {
851 subw(rd, rs, rt.rm());
852 }
853 } else {
854 DCHECK(is_int32(rt.immediate()));
855 if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
856 (rd != zero_reg) && is_int6(-rt.immediate()) &&
857 !MustUseReg(rt.rmode())) {
858 c_addiw(
859 rd,
860 static_cast<int8_t>(
861 -rt.immediate())); // No c_subiw instr, use c_addiw(x, y, -imm).
862 } else if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
863 addiw(rd, rs,
864 static_cast<int32_t>(
865 -rt.immediate())); // No subiw instr, use addiw(x, y, -imm).
866 } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
867 (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
868 addiw(rd, rs, -rt.immediate() / 2);
869 addiw(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
870 } else {
871 UseScratchRegisterScope temps(this);
872 Register scratch = temps.Acquire();
873 if (-rt.immediate() >> 12 == 0 && !MustUseReg(rt.rmode())) {
874 // Use load -imm and addu when loading -imm generates one instruction.
875 Li(scratch, -rt.immediate());
876 addw(rd, rs, scratch);
877 } else {
878 // li handles the relocation.
879 Li(scratch, rt.immediate());
880 subw(rd, rs, scratch);
881 }
882 }
883 }
884}
885
886void MacroAssembler::AddWord(Register rd, Register rs, const Operand& rt) {
887 Add64(rd, rs, rt);
888}
889
890void MacroAssembler::SubWord(Register rd, Register rs, const Operand& rt) {
891 Sub64(rd, rs, rt);
892}
893
894void MacroAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
895 if (rt.is_reg()) {
896 if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
897 ((rd.code() & 0b11000) == 0b01000) &&
898 ((rt.rm().code() & 0b11000) == 0b01000)) {
899 c_sub(rd, rt.rm());
900 } else {
901 sub(rd, rs, rt.rm());
902 }
903 } else if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
904 (rd != zero_reg) && is_int6(-rt.immediate()) &&
905 (rt.immediate() != 0) && !MustUseReg(rt.rmode())) {
906 c_addi(rd,
907 static_cast<int8_t>(
908 -rt.immediate())); // No c_subi instr, use c_addi(x, y, -imm).
909
910 } else if (v8_flags.riscv_c_extension && is_int10(-rt.immediate()) &&
911 (rt.immediate() != 0) && ((rt.immediate() & 0xf) == 0) &&
912 (rd.code() == rs.code()) && (rd == sp) &&
913 !MustUseReg(rt.rmode())) {
914 c_addi16sp(static_cast<int16_t>(-rt.immediate()));
915 } else if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
916 addi(rd, rs,
917 static_cast<int32_t>(
918 -rt.immediate())); // No subi instr, use addi(x, y, -imm).
919 } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
920 (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
921 addi(rd, rs, -rt.immediate() / 2);
922 addi(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
923 } else {
924 int li_count = InstrCountForLi64Bit(rt.immediate());
925 int li_neg_count = InstrCountForLi64Bit(-rt.immediate());
926 if (li_neg_count < li_count && !MustUseReg(rt.rmode())) {
927 // Use load -imm and add when loading -imm generates one instruction.
928 DCHECK(rt.immediate() != std::numeric_limits<int32_t>::min());
929 UseScratchRegisterScope temps(this);
930 Register scratch = temps.Acquire();
931 li(scratch, Operand(-rt.immediate()));
932 add(rd, rs, scratch);
933 } else {
934 // li handles the relocation.
935 UseScratchRegisterScope temps(this);
936 Register scratch = temps.Acquire();
937 li(scratch, rt);
938 sub(rd, rs, scratch);
939 }
940 }
941}
942
943void MacroAssembler::Add64(Register rd, Register rs, const Operand& rt) {
944 if (rt.is_reg()) {
945 if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
946 (rt.rm() != zero_reg) && (rs != zero_reg)) {
947 c_add(rd, rt.rm());
948 } else {
949 add(rd, rs, rt.rm());
950 }
951 } else {
952 if (v8_flags.riscv_c_extension && is_int6(rt.immediate()) &&
953 (rd.code() == rs.code()) && (rd != zero_reg) && (rt.immediate() != 0) &&
954 !MustUseReg(rt.rmode())) {
955 c_addi(rd, static_cast<int8_t>(rt.immediate()));
956 } else if (v8_flags.riscv_c_extension && is_int10(rt.immediate()) &&
957 (rt.immediate() != 0) && ((rt.immediate() & 0xf) == 0) &&
958 (rd.code() == rs.code()) && (rd == sp) &&
959 !MustUseReg(rt.rmode())) {
960 c_addi16sp(static_cast<int16_t>(rt.immediate()));
961 } else if (v8_flags.riscv_c_extension &&
962 ((rd.code() & 0b11000) == 0b01000) && (rs == sp) &&
963 is_uint10(rt.immediate()) && (rt.immediate() != 0) &&
964 !MustUseReg(rt.rmode())) {
965 c_addi4spn(rd, static_cast<uint16_t>(rt.immediate()));
966 } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
967 addi(rd, rs, static_cast<int32_t>(rt.immediate()));
968 } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
969 (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
970 addi(rd, rs, rt.immediate() / 2);
971 addi(rd, rd, rt.immediate() - (rt.immediate() / 2));
972 } else {
973 // li handles the relocation.
974 UseScratchRegisterScope temps(this);
975 Register scratch = temps.Acquire();
976 BlockTrampolinePoolScope block_trampoline_pool(this);
977 li(scratch, rt);
978 add(rd, rs, scratch);
979 }
980 }
981}
982
983void MacroAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
984 if (rt.is_reg()) {
985 mulw(rd, rs, rt.rm());
986 } else {
987 // li handles the relocation.
988 UseScratchRegisterScope temps(this);
989 Register scratch = temps.Acquire();
990 Li(scratch, rt.immediate());
991 mulw(rd, rs, scratch);
992 }
993}
994
995void MacroAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
996 if (rt.is_reg()) {
997 mul(rd, rs, rt.rm());
998 } else {
999 // li handles the relocation.
1000 UseScratchRegisterScope temps(this);
1001 Register scratch = temps.Acquire();
1002 Li(scratch, rt.immediate());
1003 mul(rd, rs, scratch);
1004 }
1005 srai(rd, rd, 32);
1006}
1007
1008void MacroAssembler::Mulhu32(Register rd, Register rs, const Operand& rt,
1009 Register rsz, Register rtz) {
1010 slli(rsz, rs, 32);
1011 if (rt.is_reg()) {
1012 slli(rtz, rt.rm(), 32);
1013 } else {
1014 Li(rtz, rt.immediate() << 32);
1015 }
1016 mulhu(rd, rsz, rtz);
1017 srai(rd, rd, 32);
1018}
1019
1020void MacroAssembler::Mul64(Register rd, Register rs, const Operand& rt) {
1021 if (rt.is_reg()) {
1022 mul(rd, rs, rt.rm());
1023 } else {
1024 // li handles the relocation.
1025 UseScratchRegisterScope temps(this);
1026 Register scratch = temps.Acquire();
1027 Li(scratch, rt.immediate());
1028 mul(rd, rs, scratch);
1029 }
1030}
1031
1032void MacroAssembler::Mulh64(Register rd, Register rs, const Operand& rt) {
1033 if (rt.is_reg()) {
1034 mulh(rd, rs, rt.rm());
1035 } else {
1036 // li handles the relocation.
1037 UseScratchRegisterScope temps(this);
1038 Register scratch = temps.Acquire();
1039 Li(scratch, rt.immediate());
1040 mulh(rd, rs, scratch);
1041 }
1042}
1043
1044void MacroAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) {
1045 if (rt.is_reg()) {
1046 mulhu(rd, rs, rt.rm());
1047 } else {
1048 // li handles the relocation.
1049 UseScratchRegisterScope temps(this);
1050 Register scratch = temps.Acquire();
1051 Li(scratch, rt.immediate());
1052 mulhu(rd, rs, scratch);
1053 }
1054}
1055
1056void MacroAssembler::Div32(Register res, Register rs, const Operand& rt) {
1057 if (rt.is_reg()) {
1058 divw(res, rs, rt.rm());
1059 } else {
1060 // li handles the relocation.
1061 UseScratchRegisterScope temps(this);
1062 Register scratch = temps.Acquire();
1063 Li(scratch, rt.immediate());
1064 divw(res, rs, scratch);
1065 }
1066}
1067
1068void MacroAssembler::Mod32(Register rd, Register rs, const Operand& rt) {
1069 if (rt.is_reg()) {
1070 remw(rd, rs, rt.rm());
1071 } else {
1072 // li handles the relocation.
1073 UseScratchRegisterScope temps(this);
1074 Register scratch = temps.Acquire();
1075 Li(scratch, rt.immediate());
1076 remw(rd, rs, scratch);
1077 }
1078}
1079
1080void MacroAssembler::Modu32(Register rd, Register rs, const Operand& rt) {
1081 if (rt.is_reg()) {
1082 remuw(rd, rs, rt.rm());
1083 } else {
1084 // li handles the relocation.
1085 UseScratchRegisterScope temps(this);
1086 Register scratch = temps.Acquire();
1087 Li(scratch, rt.immediate());
1088 remuw(rd, rs, scratch);
1089 }
1090}
1091
1092void MacroAssembler::Div64(Register rd, Register rs, const Operand& rt) {
1093 if (rt.is_reg()) {
1094 div(rd, rs, rt.rm());
1095 } else {
1096 // li handles the relocation.
1097 UseScratchRegisterScope temps(this);
1098 Register scratch = temps.Acquire();
1099 Li(scratch, rt.immediate());
1100 div(rd, rs, scratch);
1101 }
1102}
1103
1104void MacroAssembler::Divu32(Register res, Register rs, const Operand& rt) {
1105 if (rt.is_reg()) {
1106 divuw(res, rs, rt.rm());
1107 } else {
1108 // li handles the relocation.
1109 UseScratchRegisterScope temps(this);
1110 Register scratch = temps.Acquire();
1111 Li(scratch, rt.immediate());
1112 divuw(res, rs, scratch);
1113 }
1114}
1115
1116void MacroAssembler::Divu64(Register res, Register rs, const Operand& rt) {
1117 if (rt.is_reg()) {
1118 divu(res, rs, rt.rm());
1119 } else {
1120 // li handles the relocation.
1121 UseScratchRegisterScope temps(this);
1122 Register scratch = temps.Acquire();
1123 Li(scratch, rt.immediate());
1124 divu(res, rs, scratch);
1125 }
1126}
1127
1128void MacroAssembler::Mod64(Register rd, Register rs, const Operand& rt) {
1129 if (rt.is_reg()) {
1130 rem(rd, rs, rt.rm());
1131 } else {
1132 // li handles the relocation.
1133 UseScratchRegisterScope temps(this);
1134 Register scratch = temps.Acquire();
1135 Li(scratch, rt.immediate());
1136 rem(rd, rs, scratch);
1137 }
1138}
1139
1140void MacroAssembler::Modu64(Register rd, Register rs, const Operand& rt) {
1141 if (rt.is_reg()) {
1142 remu(rd, rs, rt.rm());
1143 } else {
1144 // li handles the relocation.
1145 UseScratchRegisterScope temps(this);
1146 Register scratch = temps.Acquire();
1147 Li(scratch, rt.immediate());
1148 remu(rd, rs, scratch);
1149 }
1150}
1151#elif V8_TARGET_ARCH_RISCV32
1152void MacroAssembler::AddWord(Register rd, Register rs, const Operand& rt) {
1153 Add32(rd, rs, rt);
1154}
1155
1156void MacroAssembler::Add32(Register rd, Register rs, const Operand& rt) {
1157 if (rt.is_reg()) {
1158 if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
1159 (rt.rm() != zero_reg) && (rs != zero_reg)) {
1160 c_add(rd, rt.rm());
1161 } else {
1162 add(rd, rs, rt.rm());
1163 }
1164 } else {
1165 if (v8_flags.riscv_c_extension && is_int6(rt.immediate()) &&
1166 (rd.code() == rs.code()) && (rd != zero_reg) && (rt.immediate() != 0) &&
1167 !MustUseReg(rt.rmode())) {
1168 c_addi(rd, static_cast<int8_t>(rt.immediate()));
1169 } else if (v8_flags.riscv_c_extension && is_int10(rt.immediate()) &&
1170 (rt.immediate() != 0) && ((rt.immediate() & 0xf) == 0) &&
1171 (rd.code() == rs.code()) && (rd == sp) &&
1172 !MustUseReg(rt.rmode())) {
1173 c_addi16sp(static_cast<int16_t>(rt.immediate()));
1174 } else if (v8_flags.riscv_c_extension &&
1175 ((rd.code() & 0b11000) == 0b01000) && (rs == sp) &&
1176 is_uint10(rt.immediate()) && (rt.immediate() != 0) &&
1177 !MustUseReg(rt.rmode())) {
1178 c_addi4spn(rd, static_cast<uint16_t>(rt.immediate()));
1179 } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
1180 addi(rd, rs, static_cast<int32_t>(rt.immediate()));
1181 } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
1182 (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
1183 addi(rd, rs, rt.immediate() / 2);
1184 addi(rd, rd, rt.immediate() - (rt.immediate() / 2));
1185 } else {
1186 // li handles the relocation.
1187 UseScratchRegisterScope temps(this);
1188 Register scratch = temps.Acquire();
1189 BlockTrampolinePoolScope block_trampoline_pool(this);
1190 li(scratch, rt);
1191 add(rd, rs, scratch);
1192 }
1193 }
1194}
1195
1196void MacroAssembler::SubWord(Register rd, Register rs, const Operand& rt) {
1197 Sub32(rd, rs, rt);
1198}
1199
1200void MacroAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
1201 if (rt.is_reg()) {
1202 if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
1203 ((rd.code() & 0b11000) == 0b01000) &&
1204 ((rt.rm().code() & 0b11000) == 0b01000)) {
1205 c_sub(rd, rt.rm());
1206 } else {
1207 sub(rd, rs, rt.rm());
1208 }
1209 } else if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
1210 (rd != zero_reg) && is_int6(-rt.immediate()) &&
1211 (rt.immediate() != 0) && !MustUseReg(rt.rmode())) {
1212 c_addi(rd,
1213 static_cast<int8_t>(
1214 -rt.immediate())); // No c_subi instr, use c_addi(x, y, -imm).
1215
1216 } else if (v8_flags.riscv_c_extension && is_int10(-rt.immediate()) &&
1217 (rt.immediate() != 0) && ((rt.immediate() & 0xf) == 0) &&
1218 (rd.code() == rs.code()) && (rd == sp) &&
1219 !MustUseReg(rt.rmode())) {
1220 c_addi16sp(static_cast<int16_t>(-rt.immediate()));
1221 } else if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
1222 addi(rd, rs,
1223 static_cast<int32_t>(
1224 -rt.immediate())); // No subi instr, use addi(x, y, -imm).
1225 } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
1226 (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
1227 addi(rd, rs, -rt.immediate() / 2);
1228 addi(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
1229 } else {
1230 // RV32G todo: imm64 or imm32 here
1231 int li_count = InstrCountForLi64Bit(rt.immediate());
1232 int li_neg_count = InstrCountForLi64Bit(-rt.immediate());
1233 if (li_neg_count < li_count && !MustUseReg(rt.rmode())) {
1234 // Use load -imm and add when loading -imm generates one instruction.
1235 DCHECK(rt.immediate() != std::numeric_limits<int32_t>::min());
1236 UseScratchRegisterScope temps(this);
1237 Register scratch = temps.Acquire();
1238 li(scratch, Operand(-rt.immediate()));
1239 add(rd, rs, scratch);
1240 } else {
1241 // li handles the relocation.
1242 UseScratchRegisterScope temps(this);
1243 Register scratch = temps.Acquire();
1244 li(scratch, rt);
1245 sub(rd, rs, scratch);
1246 }
1247 }
1248}
1249
1250void MacroAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
1251 Mul(rd, rs, rt);
1252}
1253
1254void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
1255 if (rt.is_reg()) {
1256 mul(rd, rs, rt.rm());
1257 } else {
1258 // li handles the relocation.
1259 UseScratchRegisterScope temps(this);
1260 Register scratch = temps.Acquire();
1261 Li(scratch, rt.immediate());
1262 mul(rd, rs, scratch);
1263 }
1264}
1265
1266void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
1267 if (rt.is_reg()) {
1268 mulh(rd, rs, rt.rm());
1269 } else {
1270 // li handles the relocation.
1271 UseScratchRegisterScope temps(this);
1272 Register scratch = temps.Acquire();
1273 Li(scratch, rt.immediate());
1274 mulh(rd, rs, scratch);
1275 }
1276}
1277
1278void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt,
1279 Register rsz, Register rtz) {
1280 if (rt.is_reg()) {
1281 mulhu(rd, rs, rt.rm());
1282 } else {
1283 // li handles the relocation.
1284 UseScratchRegisterScope temps(this);
1285 Register scratch = temps.Acquire();
1286 Li(scratch, rt.immediate());
1287 mulhu(rd, rs, scratch);
1288 }
1289}
1290
1291void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
1292 if (rt.is_reg()) {
1293 div(res, rs, rt.rm());
1294 } else {
1295 // li handles the relocation.
1296 UseScratchRegisterScope temps(this);
1297 Register scratch = temps.Acquire();
1298 Li(scratch, rt.immediate());
1299 div(res, rs, scratch);
1300 }
1301}
1302
1303void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
1304 if (rt.is_reg()) {
1305 rem(rd, rs, rt.rm());
1306 } else {
1307 // li handles the relocation.
1308 UseScratchRegisterScope temps(this);
1309 Register scratch = temps.Acquire();
1310 Li(scratch, rt.immediate());
1311 rem(rd, rs, scratch);
1312 }
1313}
1314
1315void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
1316 if (rt.is_reg()) {
1317 remu(rd, rs, rt.rm());
1318 } else {
1319 // li handles the relocation.
1320 UseScratchRegisterScope temps(this);
1321 Register scratch = temps.Acquire();
1322 Li(scratch, rt.immediate());
1323 remu(rd, rs, scratch);
1324 }
1325}
1326
1327void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
1328 if (rt.is_reg()) {
1329 divu(res, rs, rt.rm());
1330 } else {
1331 // li handles the relocation.
1332 UseScratchRegisterScope temps(this);
1333 Register scratch = temps.Acquire();
1334 Li(scratch, rt.immediate());
1335 divu(res, rs, scratch);
1336 }
1337}
1338
1339#endif
1340
1342 if (rt.is_reg()) {
1343 if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
1344 ((rd.code() & 0b11000) == 0b01000) &&
1345 ((rt.rm().code() & 0b11000) == 0b01000)) {
1346 c_and(rd, rt.rm());
1347 } else {
1348 and_(rd, rs, rt.rm());
1349 }
1350 } else {
1351 if (v8_flags.riscv_c_extension && is_int6(rt.immediate()) &&
1352 !MustUseReg(rt.rmode()) && (rd.code() == rs.code()) &&
1353 ((rd.code() & 0b11000) == 0b01000)) {
1354 c_andi(rd, static_cast<int8_t>(rt.immediate()));
1355 } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
1356 andi(rd, rs, static_cast<int32_t>(rt.immediate()));
1357 } else {
1358 // li handles the relocation.
1359 UseScratchRegisterScope temps(this);
1360 Register scratch = temps.Acquire();
1361 Li(scratch, rt.immediate());
1362 and_(rd, rs, scratch);
1363 }
1364 }
1365}
1366
1368 if (rt.is_reg()) {
1369 if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
1370 ((rd.code() & 0b11000) == 0b01000) &&
1371 ((rt.rm().code() & 0b11000) == 0b01000)) {
1372 c_or(rd, rt.rm());
1373 } else {
1374 or_(rd, rs, rt.rm());
1375 }
1376 } else {
1377 if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
1378 ori(rd, rs, static_cast<int32_t>(rt.immediate()));
1379 } else {
1380 // li handles the relocation.
1381 UseScratchRegisterScope temps(this);
1382 Register scratch = temps.Acquire();
1383 Li(scratch, rt.immediate());
1384 or_(rd, rs, scratch);
1385 }
1386 }
1387}
1388
1390 if (rt.is_reg()) {
1391 if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
1392 ((rd.code() & 0b11000) == 0b01000) &&
1393 ((rt.rm().code() & 0b11000) == 0b01000)) {
1394 c_xor(rd, rt.rm());
1395 } else {
1396 xor_(rd, rs, rt.rm());
1397 }
1398 } else {
1399 if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
1400 xori(rd, rs, static_cast<int32_t>(rt.immediate()));
1401 } else {
1402 // li handles the relocation.
1403 UseScratchRegisterScope temps(this);
1404 Register scratch = temps.Acquire();
1405 Li(scratch, rt.immediate());
1406 xor_(rd, rs, scratch);
1407 }
1408 }
1409}
1410
1411void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
1412 if (rt.is_reg()) {
1413 or_(rd, rs, rt.rm());
1414 not_(rd, rd);
1415 } else {
1416 Or(rd, rs, rt);
1417 not_(rd, rd);
1418 }
1419}
1420
1421void MacroAssembler::Neg(Register rs, const Operand& rt) {
1422 DCHECK(rt.is_reg());
1423 neg(rs, rt.rm());
1424}
1425
1426void MacroAssembler::Seqz(Register rd, const Operand& rt) {
1427 if (rt.is_reg()) {
1428 seqz(rd, rt.rm());
1429 } else {
1430 li(rd, rt.immediate() == 0);
1431 }
1432}
1433
1434void MacroAssembler::Snez(Register rd, const Operand& rt) {
1435 if (rt.is_reg()) {
1436 snez(rd, rt.rm());
1437 } else {
1438 li(rd, rt.immediate() != 0);
1439 }
1440}
1441
1442void MacroAssembler::Seq(Register rd, Register rs, const Operand& rt) {
1443 if (rs == zero_reg) {
1444 Seqz(rd, rt);
1445 } else if (IsZero(rt)) {
1446 seqz(rd, rs);
1447 } else {
1448 SubWord(rd, rs, rt);
1449 seqz(rd, rd);
1450 }
1451}
1452
1453void MacroAssembler::Sne(Register rd, Register rs, const Operand& rt) {
1454 if (rs == zero_reg) {
1455 Snez(rd, rt);
1456 } else if (IsZero(rt)) {
1457 snez(rd, rs);
1458 } else {
1459 SubWord(rd, rs, rt);
1460 snez(rd, rd);
1461 }
1462}
1463
1464void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1465 if (rt.is_reg()) {
1466 slt(rd, rs, rt.rm());
1467 } else {
1468 if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
1469 slti(rd, rs, static_cast<int32_t>(rt.immediate()));
1470 } else {
1471 // li handles the relocation.
1472 UseScratchRegisterScope temps(this);
1473 Register scratch = temps.Acquire();
1474 BlockTrampolinePoolScope block_trampoline_pool(this);
1475 Li(scratch, rt.immediate());
1476 slt(rd, rs, scratch);
1477 }
1478 }
1479}
1480
1481void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1482 if (rt.is_reg()) {
1483 sltu(rd, rs, rt.rm());
1484 } else {
1485 if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
1486 sltiu(rd, rs, static_cast<int32_t>(rt.immediate()));
1487 } else {
1488 // li handles the relocation.
1489 UseScratchRegisterScope temps(this);
1490 Register scratch = temps.Acquire();
1491 BlockTrampolinePoolScope block_trampoline_pool(this);
1492 Li(scratch, rt.immediate());
1493 sltu(rd, rs, scratch);
1494 }
1495 }
1496}
1497
1498void MacroAssembler::Sle(Register rd, Register rs, const Operand& rt) {
1499 if (rt.is_reg()) {
1500 slt(rd, rt.rm(), rs);
1501 } else {
1502 // li handles the relocation.
1503 UseScratchRegisterScope temps(this);
1504 Register scratch = temps.Acquire();
1505 BlockTrampolinePoolScope block_trampoline_pool(this);
1506 Li(scratch, rt.immediate());
1507 slt(rd, scratch, rs);
1508 }
1509 xori(rd, rd, 1);
1510}
1511
1512void MacroAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
1513 if (rt.is_reg()) {
1514 sltu(rd, rt.rm(), rs);
1515 } else {
1516 // li handles the relocation.
1517 UseScratchRegisterScope temps(this);
1518 Register scratch = temps.Acquire();
1519 BlockTrampolinePoolScope block_trampoline_pool(this);
1520 Li(scratch, rt.immediate());
1521 sltu(rd, scratch, rs);
1522 }
1523 xori(rd, rd, 1);
1524}
1525
1526void MacroAssembler::Sge(Register rd, Register rs, const Operand& rt) {
1527 Slt(rd, rs, rt);
1528 xori(rd, rd, 1);
1529}
1530
1531void MacroAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
1532 Sltu(rd, rs, rt);
1533 xori(rd, rd, 1);
1534}
1535
1536void MacroAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
1537 if (rt.is_reg()) {
1538 slt(rd, rt.rm(), rs);
1539 } else {
1540 // li handles the relocation.
1541 UseScratchRegisterScope temps(this);
1542 Register scratch = temps.Acquire();
1543 BlockTrampolinePoolScope block_trampoline_pool(this);
1544 Li(scratch, rt.immediate());
1545 slt(rd, scratch, rs);
1546 }
1547}
1548
1549void MacroAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
1550 if (rt.is_reg()) {
1551 sltu(rd, rt.rm(), rs);
1552 } else {
1553 // li handles the relocation.
1554 UseScratchRegisterScope temps(this);
1555 Register scratch = temps.Acquire();
1556 BlockTrampolinePoolScope block_trampoline_pool(this);
1557 Li(scratch, rt.immediate());
1558 sltu(rd, scratch, rs);
1559 }
1560}
1561
1562#if V8_TARGET_ARCH_RISCV64
1563void MacroAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
1564 if (rt.is_reg()) {
1565 sllw(rd, rs, rt.rm());
1566 } else {
1567 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
1568 slliw(rd, rs, shamt);
1569 }
1570}
1571
1572void MacroAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
1573 if (rt.is_reg()) {
1574 sraw(rd, rs, rt.rm());
1575 } else {
1576 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
1577 sraiw(rd, rs, shamt);
1578 }
1579}
1580
1581void MacroAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
1582 if (rt.is_reg()) {
1583 srlw(rd, rs, rt.rm());
1584 } else {
1585 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
1586 srliw(rd, rs, shamt);
1587 }
1588}
1589
1590void MacroAssembler::SraWord(Register rd, Register rs, const Operand& rt) {
1591 Sra64(rd, rs, rt);
1592}
1593
1594void MacroAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
1595 if (rt.is_reg()) {
1596 sra(rd, rs, rt.rm());
1597 } else if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
1598 ((rd.code() & 0b11000) == 0b01000) && is_int6(rt.immediate())) {
1599 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
1600 c_srai(rd, shamt);
1601 } else {
1602 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
1603 srai(rd, rs, shamt);
1604 }
1605}
1606
1607void MacroAssembler::SrlWord(Register rd, Register rs, const Operand& rt) {
1608 Srl64(rd, rs, rt);
1609}
1610
1611void MacroAssembler::Srl64(Register rd, Register rs, const Operand& rt) {
1612 if (rt.is_reg()) {
1613 srl(rd, rs, rt.rm());
1614 } else if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
1615 ((rd.code() & 0b11000) == 0b01000) && is_int6(rt.immediate())) {
1616 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
1617 c_srli(rd, shamt);
1618 } else {
1619 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
1620 srli(rd, rs, shamt);
1621 }
1622}
1623
1624void MacroAssembler::SllWord(Register rd, Register rs, const Operand& rt) {
1625 Sll64(rd, rs, rt);
1626}
1627
1628void MacroAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
1629 if (rt.is_reg()) {
1630 sll(rd, rs, rt.rm());
1631 } else {
1632 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
1633 if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) &&
1634 (rd != zero_reg) && (shamt != 0) && is_uint6(shamt)) {
1635 c_slli(rd, shamt);
1636 } else {
1637 slli(rd, rs, shamt);
1638 }
1639 }
1640}
1641
1642void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1643 if (CpuFeatures::IsSupported(ZBB)) {
1644 if (rt.is_reg()) {
1645 rorw(rd, rs, rt.rm());
1646 } else {
1647 int64_t ror_value = rt.immediate() % 32;
1648 if (ror_value < 0) {
1649 ror_value += 32;
1650 }
1651 roriw(rd, rs, ror_value);
1652 }
1653 return;
1654 }
1655 UseScratchRegisterScope temps(this);
1656 Register scratch = temps.Acquire();
1657 BlockTrampolinePoolScope block_trampoline_pool(this);
1658 if (rt.is_reg()) {
1659 negw(scratch, rt.rm());
1660 sllw(scratch, rs, scratch);
1661 srlw(rd, rs, rt.rm());
1662 or_(rd, scratch, rd);
1663 sext_w(rd, rd);
1664 } else {
1665 int64_t ror_value = rt.immediate() % 32;
1666 if (ror_value == 0) {
1667 Mv(rd, rs);
1668 return;
1669 } else if (ror_value < 0) {
1670 ror_value += 32;
1671 }
1672 srliw(scratch, rs, ror_value);
1673 slliw(rd, rs, 32 - ror_value);
1674 or_(rd, scratch, rd);
1675 sext_w(rd, rd);
1676 }
1677}
1678
1679void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1680 if (CpuFeatures::IsSupported(ZBB)) {
1681 if (rt.is_reg()) {
1682 ror(rd, rs, rt.rm());
1683 } else {
1684 int64_t dror_value = rt.immediate() % 64;
1685 if (dror_value < 0) {
1686 dror_value += 64;
1687 }
1688 rori(rd, rs, dror_value);
1689 }
1690 return;
1691 }
1692 UseScratchRegisterScope temps(this);
1693 Register scratch = temps.Acquire();
1694 BlockTrampolinePoolScope block_trampoline_pool(this);
1695 if (rt.is_reg()) {
1696 negw(scratch, rt.rm());
1697 sll(scratch, rs, scratch);
1698 srl(rd, rs, rt.rm());
1699 or_(rd, scratch, rd);
1700 } else {
1701 int64_t dror_value = rt.immediate() % 64;
1702 if (dror_value == 0) {
1703 Mv(rd, rs);
1704 return;
1705 } else if (dror_value < 0) {
1706 dror_value += 64;
1707 }
1708 srli(scratch, rs, dror_value);
1709 slli(rd, rs, 64 - dror_value);
1710 or_(rd, scratch, rd);
1711 }
1712}
1713#elif V8_TARGET_ARCH_RISCV32
1714void MacroAssembler::SllWord(Register rd, Register rs, const Operand& rt) {
1715 Sll32(rd, rs, rt);
1716}
1717
1718void MacroAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
1719 if (rt.is_reg()) {
1720 sll(rd, rs, rt.rm());
1721 } else {
1722 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
1723 slli(rd, rs, shamt);
1724 }
1725}
1726
1727void MacroAssembler::SraWord(Register rd, Register rs, const Operand& rt) {
1728 Sra32(rd, rs, rt);
1729}
1730
1731void MacroAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
1732 if (rt.is_reg()) {
1733 sra(rd, rs, rt.rm());
1734 } else {
1735 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
1736 srai(rd, rs, shamt);
1737 }
1738}
1739
1740void MacroAssembler::SrlWord(Register rd, Register rs, const Operand& rt) {
1741 Srl32(rd, rs, rt);
1742}
1743
1744void MacroAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
1745 if (rt.is_reg()) {
1746 srl(rd, rs, rt.rm());
1747 } else {
1748 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
1749 srli(rd, rs, shamt);
1750 }
1751}
1752
1753void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1754 if (CpuFeatures::IsSupported(ZBB)) {
1755 if (rt.is_reg()) {
1756 ror(rd, rs, rt.rm());
1757 } else {
1758 int32_t ror_value = rt.immediate() % 32;
1759 if (ror_value < 0) {
1760 ror_value += 32;
1761 }
1762 rori(rd, rs, ror_value);
1763 }
1764 return;
1765 }
1766 UseScratchRegisterScope temps(this);
1767 Register scratch = temps.Acquire();
1768 BlockTrampolinePoolScope block_trampoline_pool(this);
1769 if (rt.is_reg()) {
1770 neg(scratch, rt.rm());
1771 sll(scratch, rs, scratch);
1772 srl(rd, rs, rt.rm());
1773 or_(rd, scratch, rd);
1774 } else {
1775 int32_t ror_value = rt.immediate() % 32;
1776 if (ror_value == 0) {
1777 Mv(rd, rs);
1778 return;
1779 } else if (ror_value < 0) {
1780 ror_value += 32;
1781 }
1782 srli(scratch, rs, ror_value);
1783 slli(rd, rs, 32 - ror_value);
1784 or_(rd, scratch, rd);
1785 }
1786}
1787#endif
1788
1789void MacroAssembler::Li(Register rd, intptr_t imm) {
1790 if (v8_flags.riscv_c_extension && (rd != zero_reg) && is_int6(imm)) {
1791 c_li(rd, imm);
1792 } else {
1793 RV_li(rd, imm);
1794 }
1795}
1796
1797void MacroAssembler::Mv(Register rd, const Operand& rt) {
1798 if (v8_flags.riscv_c_extension && (rd != zero_reg) && (rt.rm() != zero_reg)) {
1799 c_mv(rd, rt.rm());
1800 } else {
1801 mv(rd, rt.rm());
1802 }
1803}
1804
1806 uint8_t sa) {
1807 DCHECK(sa >= 1 && sa <= 31);
1808 if (CpuFeatures::IsSupported(ZBA)) {
1809 switch (sa) {
1810 case 1:
1811 sh1add(rd, rs, rt);
1812 return;
1813 case 2:
1814 sh2add(rd, rs, rt);
1815 return;
1816 case 3:
1817 sh3add(rd, rs, rt);
1818 return;
1819 default:
1820 break;
1821 }
1822 }
1823 UseScratchRegisterScope temps(this);
1824 Register tmp = rd == rt ? temps.Acquire() : rd;
1825 DCHECK(tmp != rt);
1826 slli(tmp, rs, sa);
1827 AddWord(rd, rt, tmp);
1828 return;
1829}
1830
1831// ------------Pseudo-instructions-------------
1832// Change endianness
1833
1834template <int NBYTES>
1836 Register tmp2) {
1837 DCHECK(tmp1 != tmp2);
1838 DCHECK((rs != tmp1) && (rs != tmp2));
1839 DCHECK((rd != tmp1) && (rd != tmp2));
1840
1841 // ByteMask - maximum value, held in byte
1842 constexpr int ByteMask = (1 << kBitsPerByte) - 1;
1843 // tmp1 = rs[0]; take least byte
1844 // tmp1 = tmp1 << kBitsPerByte;
1845 // for (nbyte = 1; nbyte < NBYTES - 1; nbyte++) {
1846 // tmp2 = rs[nbyte]; take n`th byte
1847 // tmp1 = (tmp2 | tmp1) << kBitsPerByte; add n`th source byte to tmp1
1848 // }
1849 // rd[0] = rs[NBYTES-1]; take upper byte
1850 // rd[NBYTES-1 : 1] = tmp1[NBYTES-1 : 1]; fill other bytes
1851 andi(tmp1, rs, ByteMask);
1852 slli(tmp1, tmp1, kBitsPerByte);
1853 for (int nbyte = 1; nbyte < NBYTES - 1; nbyte++) {
1854 srli(tmp2, rs, nbyte * kBitsPerByte);
1855 andi(tmp2, tmp2, ByteMask);
1856 or_(tmp1, tmp1, tmp2);
1857 slli(tmp1, tmp1, kBitsPerByte);
1858 }
1859 srli(rd, rs, (NBYTES - 1) * kBitsPerByte);
1860 andi(rd, rd, ByteMask);
1861 or_(rd, tmp1, rd);
1862}
1863
1864#if V8_TARGET_ARCH_RISCV64
1865void MacroAssembler::ByteSwap(Register rd, Register rs, int operand_size,
1866 Register scratch) {
1867 DCHECK(operand_size == 4 || operand_size == 8);
1868 if (CpuFeatures::IsSupported(ZBB)) {
1869 rev8(rd, rs);
1870 if (operand_size == 4) {
1871 srai(rd, rd, 32);
1872 }
1873 return;
1874 }
1875 UseScratchRegisterScope temps(this);
1876 temps.Include(t4, t6);
1877 Register x0 = temps.Acquire();
1878 Register x1 = temps.Acquire();
1879 DCHECK(!AreAliased(rs, rd, x0, x1, scratch));
1880 BlockTrampolinePoolScope block_trampoline_pool(this);
1881 if (operand_size == 4) {
1882 DCHECK((rd != t6) && (rs != t6));
1883 if (scratch == no_reg) {
1884 ReverseBytesHelper<8>(rd, rs, x0, x1);
1885 srai(rd, rd, 32);
1886 } else {
1887 // Uint32_t x1 = 0x00FF00FF;
1888 // x0 = (x0 << 16 | x0 >> 16);
1889 // x0 = (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8));
1890 Register x2 = scratch;
1891 li(x1, 0x00FF00FF);
1892 slliw(x0, rs, 16);
1893 srliw(rd, rs, 16);
1894 or_(x0, rd, x0); // x0 <- x0 << 16 | x0 >> 16
1895 and_(x2, x0, x1); // x2 <- x0 & 0x00FF00FF
1896 slliw(x2, x2, 8); // x2 <- (x0 & x1) << 8
1897 slliw(x1, x1, 8); // x1 <- 0xFF00FF00
1898 and_(rd, x0, x1); // x0 & 0xFF00FF00
1899 srliw(rd, rd, 8);
1900 or_(rd, rd, x2); // (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8))
1901 }
1902 } else {
1903 DCHECK((rd != t6) && (rs != t6));
1904 if (scratch == no_reg) {
1905 ReverseBytesHelper<8>(rd, rs, x0, x1);
1906 } else {
1907 // uinx24_t x1 = 0x0000FFFF0000FFFFl;
1908 // uinx24_t x1 = 0x00FF00FF00FF00FFl;
1909 // x0 = (x0 << 32 | x0 >> 32);
1910 // x0 = (x0 & x1) << 16 | (x0 & (x1 << 16)) >> 16;
1911 // x0 = (x0 & x1) << 8 | (x0 & (x1 << 8)) >> 8;
1912 Register x2 = scratch;
1913 li(x1, 0x0000FFFF0000FFFFl);
1914 slli(x0, rs, 32);
1915 srli(rd, rs, 32);
1916 or_(x0, rd, x0); // x0 <- x0 << 32 | x0 >> 32
1917 and_(x2, x0, x1); // x2 <- x0 & 0x0000FFFF0000FFFF
1918 slli(x2, x2, 16); // x2 <- (x0 & 0x0000FFFF0000FFFF) << 16
1919 slli(x1, x1, 16); // x1 <- 0xFFFF0000FFFF0000
1920 and_(rd, x0, x1); // rd <- x0 & 0xFFFF0000FFFF0000
1921 srli(rd, rd, 16); // rd <- x0 & (x1 << 16)) >> 16
1922 or_(x0, rd, x2); // (x0 & x1) << 16 | (x0 & (x1 << 16)) >> 16;
1923 li(x1, 0x00FF00FF00FF00FFl);
1924 and_(x2, x0, x1); // x2 <- x0 & 0x00FF00FF00FF00FF
1925 slli(x2, x2, 8); // x2 <- (x0 & x1) << 8
1926 slli(x1, x1, 8); // x1 <- 0xFF00FF00FF00FF00
1927 and_(rd, x0, x1);
1928 srli(rd, rd, 8); // rd <- (x0 & (x1 << 8)) >> 8
1929 or_(rd, rd, x2); // (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8))
1930 }
1931 }
1932}
1933
1934#elif V8_TARGET_ARCH_RISCV32
1935void MacroAssembler::ByteSwap(Register rd, Register rs, int operand_size,
1936 Register scratch) {
1937 if (CpuFeatures::IsSupported(ZBB)) {
1938 rev8(rd, rs);
1939 return;
1940 }
1941 DCHECK_NE(scratch, rs);
1942 DCHECK_NE(scratch, rd);
1943 UseScratchRegisterScope temps(this);
1944 BlockTrampolinePoolScope block_trampoline_pool(this);
1945 DCHECK((rd != t6) && (rs != t6));
1946 Register x0 = temps.Acquire();
1947 Register x1 = temps.Acquire();
1948 if (scratch == no_reg) {
1949 ReverseBytesHelper<4>(rd, rs, x0, x1);
1950 } else {
1951 // Uint32_t x1 = 0x00FF00FF;
1952 // x0 = (x0 << 16 | x0 >> 16);
1953 // x0 = (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8));
1954 Register x2 = scratch;
1955 li(x1, 0x00FF00FF);
1956 slli(x0, rs, 16);
1957 srli(rd, rs, 16);
1958 or_(x0, rd, x0); // x0 <- x0 << 16 | x0 >> 16
1959 and_(x2, x0, x1); // x2 <- x0 & 0x00FF00FF
1960 slli(x2, x2, 8); // x2 <- (x0 & x1) << 8
1961 slli(x1, x1, 8); // x1 <- 0xFF00FF00
1962 and_(rd, x0, x1); // x0 & 0xFF00FF00
1963 srli(rd, rd, 8);
1964 or_(rd, rd, x2); // (((x0 & x1) << 8) | ((x0 & (x1 << 8)) >> 8))
1965 }
1966}
1967#endif
1968
1969template <int NBYTES, bool LOAD_SIGNED>
1971 Register scratch) {
1972 DCHECK(rd != rs.rm() && rd != scratch);
1973 DCHECK_LE(NBYTES, 8);
1974
1975 // load the most significant byte
1976 if (LOAD_SIGNED) {
1977 lb(rd, rs.rm(), rs.offset() + (NBYTES - 1));
1978 } else {
1979 lbu(rd, rs.rm(), rs.offset() + (NBYTES - 1));
1980 }
1981
1982 // load remaining (nbytes-1) bytes from higher to lower
1983 slli(rd, rd, 8 * (NBYTES - 1));
1984 for (int i = (NBYTES - 2); i >= 0; i--) {
1985 lbu(scratch, rs.rm(), rs.offset() + i);
1986 if (i) slli(scratch, scratch, i * 8);
1987 or_(rd, rd, scratch);
1988 }
1989}
1990
1991template <int NBYTES, bool LOAD_SIGNED>
1993 Register scratch0,
1994 Register scratch1) {
1995 // This function loads nbytes from memory specified by rs and into rs.rm()
1996 DCHECK(rs.rm() != scratch0 && rs.rm() != scratch1 && scratch0 != scratch1);
1997 DCHECK_LE(NBYTES, 8);
1998
1999 // load the most significant byte
2000 if (LOAD_SIGNED) {
2001 lb(scratch0, rs.rm(), rs.offset() + (NBYTES - 1));
2002 } else {
2003 lbu(scratch0, rs.rm(), rs.offset() + (NBYTES - 1));
2004 }
2005
2006 // load remaining (nbytes-1) bytes from higher to lower
2007 slli(scratch0, scratch0, 8 * (NBYTES - 1));
2008 for (int i = (NBYTES - 2); i >= 0; i--) {
2009 lbu(scratch1, rs.rm(), rs.offset() + i);
2010 if (i) {
2011 slli(scratch1, scratch1, i * 8);
2012 or_(scratch0, scratch0, scratch1);
2013 } else {
2014 // write to rs.rm() when processing the last byte
2015 or_(rs.rm(), scratch0, scratch1);
2016 }
2017 }
2018}
2019
2020template <int NBYTES, bool IS_SIGNED>
2022 BlockTrampolinePoolScope block_trampoline_pool(this);
2023 UseScratchRegisterScope temps(this);
2024
2026 // Adjust offset for two accesses and check if offset + 3 fits into int12.
2027 MemOperand source = rs;
2028 Register scratch_base = temps.Acquire();
2029 DCHECK(scratch_base != rs.rm());
2031 NBYTES - 1);
2032
2033 // Since source.rm() is scratch_base, assume rd != source.rm()
2034 DCHECK(rd != source.rm());
2035 Register scratch_other = temps.Acquire();
2036 LoadNBytes<NBYTES, IS_SIGNED>(rd, source, scratch_other);
2037 } else {
2038 // no need to adjust base-and-offset
2039 if (rd != rs.rm()) {
2040 Register scratch = temps.Acquire();
2041 LoadNBytes<NBYTES, IS_SIGNED>(rd, rs, scratch);
2042 } else { // rd == rs.rm()
2043 Register scratch = temps.Acquire();
2044 Register scratch2 = temps.Acquire();
2046 }
2047 }
2048}
2049
2050#if V8_TARGET_ARCH_RISCV64
2051template <int NBYTES>
2053 const MemOperand& rs) {
2054 DCHECK(NBYTES == 4 || NBYTES == 8);
2055 BlockTrampolinePoolScope block_trampoline_pool(this);
2056 MemOperand source = rs;
2057 UseScratchRegisterScope temps(this);
2058 Register scratch_base = temps.Acquire();
2060 // Adjust offset for two accesses and check if offset + 3 fits into int12.
2061 DCHECK(scratch_base != rs.rm());
2063 NBYTES - 1);
2064 }
2065 temps.Include(t4, t6);
2066 Register scratch = temps.Acquire();
2067 Register scratch_other = temps.Acquire();
2068 DCHECK(scratch != rs.rm() && scratch_other != scratch &&
2069 scratch_other != rs.rm());
2070 LoadNBytes<NBYTES, true>(scratch, source, scratch_other);
2071 if (NBYTES == 4)
2072 fmv_w_x(frd, scratch);
2073 else
2074 fmv_d_x(frd, scratch);
2075}
2076#elif V8_TARGET_ARCH_RISCV32
2077template <int NBYTES>
2078void MacroAssembler::UnalignedFLoadHelper(FPURegister frd,
2079 const MemOperand& rs) {
2080 DCHECK_EQ(NBYTES, 4);
2081 BlockTrampolinePoolScope block_trampoline_pool(this);
2082 MemOperand source = rs;
2083 UseScratchRegisterScope temps(this);
2084 Register scratch_base = temps.Acquire();
2086 // Adjust offset for two accesses and check if offset + 3 fits into int12.
2087 DCHECK(scratch_base != rs.rm());
2089 NBYTES - 1);
2090 }
2091 temps.Include(t4, t6);
2092 Register scratch = temps.Acquire();
2093 Register scratch_other = temps.Acquire();
2094 DCHECK(scratch != rs.rm() && scratch_other != scratch &&
2095 scratch_other != rs.rm());
2096 LoadNBytes<NBYTES, true>(scratch, source, scratch_other);
2097 fmv_w_x(frd, scratch);
2098}
2099
2100void MacroAssembler::UnalignedDoubleHelper(FPURegister frd,
2101 const MemOperand& rs) {
2102 BlockTrampolinePoolScope block_trampoline_pool(this);
2103 MemOperand source = rs;
2104 UseScratchRegisterScope temps(this);
2105 Register scratch_base = temps.Acquire();
2107 // Adjust offset for two accesses and check if offset + 3 fits into int12.
2108 DCHECK(scratch_base != rs.rm());
2110 8 - 1);
2111 }
2112 temps.Include(t4, t6);
2113 Register scratch = temps.Acquire();
2114 Register scratch_other = temps.Acquire();
2115 DCHECK(scratch != rs.rm() && scratch_other != scratch &&
2116 scratch_other != rs.rm());
2117 LoadNBytes<4, true>(scratch, source, scratch_other);
2118 SubWord(sp, sp, 8);
2119 Sw(scratch, MemOperand(sp, 0));
2120 source.set_offset(source.offset() + 4);
2121 LoadNBytes<4, true>(scratch, source, scratch_other);
2122 Sw(scratch, MemOperand(sp, 4));
2123 LoadDouble(frd, MemOperand(sp, 0));
2124 AddWord(sp, sp, 8);
2125}
2126#endif
2127
2128template <int NBYTES>
2130 Register scratch_other) {
2131 DCHECK(scratch_other != rs.rm());
2132 DCHECK_NE(scratch_other, no_reg);
2133 DCHECK_LE(NBYTES, 8);
2134 MemOperand source = rs;
2135 UseScratchRegisterScope temps(this);
2136 Register scratch_base = temps.Acquire();
2137 // Adjust offset for two accesses and check if offset + 3 fits into int12.
2139 DCHECK(scratch_base != rd && scratch_base != rs.rm());
2141 NBYTES - 1);
2142 }
2143
2144 BlockTrampolinePoolScope block_trampoline_pool(this);
2145 DCHECK(scratch_other != rd && scratch_other != rs.rm() &&
2146 scratch_other != source.rm());
2147
2148 sb(rd, source.rm(), source.offset());
2149 for (size_t i = 1; i <= (NBYTES - 1); i++) {
2150 srli(scratch_other, rd, i * 8);
2151 sb(scratch_other, source.rm(), source.offset() + i);
2152 }
2153}
2154
2155#if V8_TARGET_ARCH_RISCV64
2156template <int NBYTES>
2158 const MemOperand& rs) {
2159 DCHECK(NBYTES == 8 || NBYTES == 4);
2160 UseScratchRegisterScope temps(this);
2161 Register scratch = temps.Acquire();
2162 if (NBYTES == 4) {
2163 fmv_x_w(scratch, frd);
2164 } else {
2165 fmv_x_d(scratch, frd);
2166 }
2167 UnalignedStoreHelper<NBYTES>(scratch, rs, t4);
2168}
2169#elif V8_TARGET_ARCH_RISCV32
2170template <int NBYTES>
2171void MacroAssembler::UnalignedFStoreHelper(FPURegister frd,
2172 const MemOperand& rs) {
2173 DCHECK_EQ(NBYTES, 4);
2174 UseScratchRegisterScope temps(this);
2175 Register scratch = temps.Acquire();
2176 fmv_x_w(scratch, frd);
2177 UnalignedStoreHelper<NBYTES>(scratch, rs, t4);
2178}
2179void MacroAssembler::UnalignedDStoreHelper(FPURegister frd,
2180 const MemOperand& rs) {
2181 UseScratchRegisterScope temps(this);
2182 Register scratch = temps.Acquire();
2183 Sub32(sp, sp, 8);
2184 StoreDouble(frd, MemOperand(sp, 0));
2185 Lw(scratch, MemOperand(sp, 0));
2186 UnalignedStoreHelper<4>(scratch, rs, t4);
2187 Lw(scratch, MemOperand(sp, 4));
2188 MemOperand source = rs;
2189 source.set_offset(source.offset() + 4);
2190 UnalignedStoreHelper<4>(scratch, source, t4);
2191 Add32(sp, sp, 8);
2192}
2193#endif
2194
2195template <typename Reg_T, typename Func>
2197 Func generator) {
2198 MemOperand source = rs;
2199 UseScratchRegisterScope temps(this);
2200 BlockTrampolinePoolScope block_trampoline_pool(this);
2201 if (NeedAdjustBaseAndOffset(source)) {
2202 Register scratch = temps.Acquire();
2203 DCHECK(scratch != rs.rm());
2204 AdjustBaseAndOffset(&source, scratch);
2205 }
2206 generator(target, source);
2207}
2208
2209template <typename Reg_T, typename Func>
2211 Func generator) {
2212 MemOperand source = rs;
2213 UseScratchRegisterScope temps(this);
2214 BlockTrampolinePoolScope block_trampoline_pool(this);
2215 if (NeedAdjustBaseAndOffset(source)) {
2216 Register scratch = temps.Acquire();
2217 // make sure scratch does not overwrite value
2218 if (std::is_same<Reg_T, Register>::value) {
2219 DCHECK(scratch.code() != value.code());
2220 }
2221 DCHECK(scratch != rs.rm());
2222 AdjustBaseAndOffset(&source, scratch);
2223 }
2224 generator(value, source);
2225}
2226
2230
2231#if V8_TARGET_ARCH_RISCV64
2232void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
2234}
2235#endif
2237 UnalignedStoreHelper<4>(rd, rs, t4);
2238}
2239
2243
2247
2249 UnalignedStoreHelper<2>(rd, rs, t4);
2250}
2251
2255#if V8_TARGET_ARCH_RISCV64
2256// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
2257// bits,
2258// second word in high bits.
2260 UseScratchRegisterScope temps(this);
2261 Register scratch = temps.Acquire();
2262 Lwu(rd, rs);
2263 Lw(scratch, MemOperand(rs.rm(), rs.offset() + kSystemPointerSize / 2));
2264 slli(scratch, scratch, 32);
2265 AddWord(rd, rd, scratch);
2266}
2267
2268// Do 64-bit store as two consequent 32-bit stores to unaligned address.
2269void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) {
2270 UseScratchRegisterScope temps(this);
2271 Register scratch = temps.Acquire();
2272 Sw(rd, rs);
2273 srai(scratch, rd, 32);
2274 Sw(scratch, MemOperand(rs.rm(), rs.offset() + kSystemPointerSize / 2));
2275}
2276#endif
2277
2279 UnalignedStoreHelper<8>(rd, rs, t4);
2280}
2281
2285
2289
2291#if V8_TARGET_ARCH_RISCV64
2293#elif V8_TARGET_ARCH_RISCV32
2294 UnalignedDoubleHelper(fd, rs);
2295#endif
2296}
2297
2299#if V8_TARGET_ARCH_RISCV64
2301#elif V8_TARGET_ARCH_RISCV32
2302 UnalignedDStoreHelper(fd, rs);
2303#endif
2304}
2305
2306void MacroAssembler::Lb(Register rd, const MemOperand& rs, Trapper&& trapper) {
2307 auto fn = [&](Register target, const MemOperand& source) {
2308 trapper(pc_offset());
2309 lb(target, source.rm(), source.offset());
2310 };
2311 AlignedLoadHelper(rd, rs, fn);
2312}
2313
2314void MacroAssembler::Lbu(Register rd, const MemOperand& rs, Trapper&& trapper) {
2315 auto fn = [&](Register target, const MemOperand& source) {
2316 trapper(pc_offset());
2317 lbu(target, source.rm(), source.offset());
2318 };
2319 AlignedLoadHelper(rd, rs, fn);
2320}
2321
2322void MacroAssembler::Sb(Register rd, const MemOperand& rs, Trapper&& trapper) {
2323 auto fn = [&](Register value, const MemOperand& source) {
2324 trapper(pc_offset());
2325 sb(value, source.rm(), source.offset());
2326 };
2327 AlignedStoreHelper(rd, rs, fn);
2328}
2329
2330void MacroAssembler::Lh(Register rd, const MemOperand& rs, Trapper&& trapper) {
2331 auto fn = [&](Register target, const MemOperand& source) {
2332 trapper(pc_offset());
2333 lh(target, source.rm(), source.offset());
2334 };
2335 AlignedLoadHelper(rd, rs, fn);
2336}
2337
2338void MacroAssembler::Lhu(Register rd, const MemOperand& rs, Trapper&& trapper) {
2339 auto fn = [&](Register target, const MemOperand& source) {
2340 trapper(pc_offset());
2341 lhu(target, source.rm(), source.offset());
2342 };
2343 AlignedLoadHelper(rd, rs, fn);
2344}
2345
2346void MacroAssembler::Sh(Register rd, const MemOperand& rs, Trapper&& trapper) {
2347 auto fn = [&](Register value, const MemOperand& source) {
2348 trapper(pc_offset());
2349 sh(value, source.rm(), source.offset());
2350 };
2351 AlignedStoreHelper(rd, rs, fn);
2352}
2353
2354void MacroAssembler::Lw(Register rd, const MemOperand& rs, Trapper&& trapper) {
2355 auto fn = [&](Register target, const MemOperand& source) {
2356 trapper(pc_offset());
2357 if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
2358 ((source.rm().code() & 0b11000) == 0b01000) &&
2359 is_uint7(source.offset()) && ((source.offset() & 0x3) == 0)) {
2360 c_lw(target, source.rm(), source.offset());
2361 } else if (v8_flags.riscv_c_extension && (target != zero_reg) &&
2362 is_uint8(source.offset()) && (source.rm() == sp) &&
2363 ((source.offset() & 0x3) == 0)) {
2364 c_lwsp(target, source.offset());
2365 } else {
2366 lw(target, source.rm(), source.offset());
2367 }
2368 };
2369 AlignedLoadHelper(rd, rs, fn);
2370}
2371
2372#if V8_TARGET_ARCH_RISCV64
2373void MacroAssembler::Lwu(Register rd, const MemOperand& rs, Trapper&& trapper) {
2374 auto fn = [&](Register target, const MemOperand& source) {
2375 trapper(pc_offset());
2376 lwu(target, source.rm(), source.offset());
2377 };
2378 AlignedLoadHelper(rd, rs, fn);
2379}
2380#endif
2381
2382void MacroAssembler::Sw(Register rd, const MemOperand& rs, Trapper&& trapper) {
2383 auto fn = [&](Register value, const MemOperand& source) {
2384 trapper(pc_offset());
2385 if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
2386 ((source.rm().code() & 0b11000) == 0b01000) &&
2387 is_uint7(source.offset()) && ((source.offset() & 0x3) == 0)) {
2388 c_sw(value, source.rm(), source.offset());
2389 } else if (v8_flags.riscv_c_extension && (source.rm() == sp) &&
2390 is_uint8(source.offset()) && (((source.offset() & 0x3) == 0))) {
2391 c_swsp(value, source.offset());
2392 } else {
2393 sw(value, source.rm(), source.offset());
2394 }
2395 };
2396 AlignedStoreHelper(rd, rs, fn);
2397}
2398
2399#if V8_TARGET_ARCH_RISCV64
2400void MacroAssembler::Ld(Register rd, const MemOperand& rs, Trapper&& trapper) {
2401 auto fn = [&](Register target, const MemOperand& source) {
2402 trapper(pc_offset());
2403 if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
2404 ((source.rm().code() & 0b11000) == 0b01000) &&
2405 is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
2406 c_ld(target, source.rm(), source.offset());
2407 } else if (v8_flags.riscv_c_extension && (target != zero_reg) &&
2408 is_uint9(source.offset()) && (source.rm() == sp) &&
2409 ((source.offset() & 0x7) == 0)) {
2410 c_ldsp(target, source.offset());
2411 } else {
2412 ld(target, source.rm(), source.offset());
2413 }
2414 };
2415 AlignedLoadHelper(rd, rs, fn);
2416}
2417
2418void MacroAssembler::Sd(Register rd, const MemOperand& rs, Trapper&& trapper) {
2419 auto fn = [&](Register value, const MemOperand& source) {
2420 trapper(pc_offset());
2421 if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
2422 ((source.rm().code() & 0b11000) == 0b01000) &&
2423 is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
2424 c_sd(value, source.rm(), source.offset());
2425 } else if (v8_flags.riscv_c_extension && (source.rm() == sp) &&
2426 is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) {
2427 c_sdsp(value, source.offset());
2428 } else {
2429 sd(value, source.rm(), source.offset());
2430 }
2431 };
2432 AlignedStoreHelper(rd, rs, fn);
2433}
2434#endif
2435
2437 Trapper&& trapper) {
2438 auto fn = [&](FPURegister target, const MemOperand& source) {
2439 trapper(pc_offset());
2440 flw(target, source.rm(), source.offset());
2441 };
2442 AlignedLoadHelper(fd, src, fn);
2443}
2444
2446 Trapper&& trapper) {
2447 auto fn = [&](FPURegister value, const MemOperand& source) {
2448 trapper(pc_offset());
2449 fsw(value, source.rm(), source.offset());
2450 };
2451 AlignedStoreHelper(fs, src, fn);
2452}
2453
2455 Trapper&& trapper) {
2456 auto fn = [&](FPURegister target, const MemOperand& source) {
2457 trapper(pc_offset());
2458 if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) &&
2459 ((source.rm().code() & 0b11000) == 0b01000) &&
2460 is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
2461 c_fld(target, source.rm(), source.offset());
2462 } else if (v8_flags.riscv_c_extension && (source.rm() == sp) &&
2463 is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) {
2464 c_fldsp(target, source.offset());
2465 } else {
2466 fld(target, source.rm(), source.offset());
2467 }
2468 };
2469 AlignedLoadHelper(fd, src, fn);
2470}
2471
2473 Trapper&& trapper) {
2474 auto fn = [&](FPURegister value, const MemOperand& source) {
2475 trapper(pc_offset());
2476 if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) &&
2477 ((source.rm().code() & 0b11000) == 0b01000) &&
2478 is_uint8(source.offset()) && ((source.offset() & 0x7) == 0)) {
2479 c_fsd(value, source.rm(), source.offset());
2480 } else if (v8_flags.riscv_c_extension && (source.rm() == sp) &&
2481 is_uint9(source.offset()) && ((source.offset() & 0x7) == 0)) {
2482 c_fsdsp(value, source.offset());
2483 } else {
2484 fsd(value, source.rm(), source.offset());
2485 }
2486 };
2487 AlignedStoreHelper(fs, src, fn);
2488}
2489
2490void MacroAssembler::Ll(Register rd, const MemOperand& rs, Trapper&& trapper) {
2491 bool is_one_instruction = rs.offset() == 0;
2492 if (is_one_instruction) {
2493 trapper(pc_offset());
2494 lr_w(false, false, rd, rs.rm());
2495 } else {
2496 UseScratchRegisterScope temps(this);
2497 Register scratch = temps.Acquire();
2498 AddWord(scratch, rs.rm(), rs.offset());
2499 trapper(pc_offset());
2500 lr_w(false, false, rd, scratch);
2501 }
2502}
2503
2504#if V8_TARGET_ARCH_RISCV64
2505void MacroAssembler::Lld(Register rd, const MemOperand& rs, Trapper&& trapper) {
2506 bool is_one_instruction = rs.offset() == 0;
2507 if (is_one_instruction) {
2508 trapper(pc_offset());
2509 lr_d(false, false, rd, rs.rm());
2510 } else {
2511 UseScratchRegisterScope temps(this);
2512 Register scratch = temps.Acquire();
2513 AddWord(scratch, rs.rm(), rs.offset());
2514 trapper(pc_offset());
2515 lr_d(false, false, rd, scratch);
2516 }
2517}
2518#endif
2519
2520void MacroAssembler::Sc(Register rd, const MemOperand& rs, Trapper&& trapper) {
2521 bool is_one_instruction = rs.offset() == 0;
2522 if (is_one_instruction) {
2523 trapper(pc_offset());
2524 sc_w(false, false, rd, rs.rm(), rd);
2525 } else {
2526 UseScratchRegisterScope temps(this);
2527 Register scratch = temps.Acquire();
2528 AddWord(scratch, rs.rm(), rs.offset());
2529 trapper(pc_offset());
2530 sc_w(false, false, rd, scratch, rd);
2531 }
2532}
2533
2534#if V8_TARGET_ARCH_RISCV64
2535void MacroAssembler::Scd(Register rd, const MemOperand& rs, Trapper&& trapper) {
2536 bool is_one_instruction = rs.offset() == 0;
2537 if (is_one_instruction) {
2538 trapper(pc_offset());
2539 sc_d(false, false, rd, rs.rm(), rd);
2540 } else {
2541 UseScratchRegisterScope temps(this);
2542 Register scratch = temps.Acquire();
2543 AddWord(scratch, rs.rm(), rs.offset());
2544 trapper(pc_offset());
2545 sc_d(false, false, rd, scratch, rd);
2546 }
2547}
2548#endif
2549
2551 RelocInfo::Mode rmode) {
2552 ASM_CODE_COMMENT(this);
2553 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
2554 // non-isolate-independent code. In many cases it might be cheaper than
2555 // embedding the relocatable value.
2556 if (root_array_available_ && options().isolate_independent_code) {
2557 IndirectLoadConstant(dst, value);
2558 return;
2559 } else {
2560 li(dst, Operand(value, rmode));
2561 }
2562}
2563
2565 LiFlags mode) {
2566 if (root_array_available()) {
2567 if (reference.IsIsolateFieldId()) {
2568 AddWord(dst, kRootRegister,
2569 Operand(reference.offset_from_root_register()));
2570 return;
2571 }
2572 if (options().isolate_independent_code) {
2573 IndirectLoadExternalReference(dst, reference);
2574 return;
2575 }
2576 }
2577 // External references should not get created with IDs if
2578 // `!root_array_available()`.
2579 CHECK(!reference.IsIsolateFieldId());
2580 li(dst, Operand(reference), mode);
2581}
2582
2583static inline int InstrCountForLiLower32Bit(int64_t value) {
2584 int64_t Hi20 = ((value + 0x800) >> 12);
2585 int64_t Lo12 = value << 52 >> 52;
2586 if (Hi20 == 0 || Lo12 == 0) {
2587 return 1;
2588 }
2589 return 2;
2590}
2591
2593 if (is_int32(value + 0x800)) {
2594 return InstrCountForLiLower32Bit(value);
2595 } else {
2596 return RV_li_count(value);
2597 }
2598 UNREACHABLE();
2599 return INT_MAX;
2600}
2601
2603 DCHECK(!j.is_reg());
2604 DCHECK(!MustUseReg(j.rmode()));
2605 DCHECK(mode == OPTIMIZE_SIZE);
2606 Li(rd, j.immediate());
2607}
2608
2610 DCHECK(!j.is_reg());
2611 BlockTrampolinePoolScope block_trampoline_pool(this);
2612 if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
2613 UseScratchRegisterScope temps(this);
2614 int count = RV_li_count(j.immediate(), temps.CanAcquire());
2615 int reverse_count = RV_li_count(~j.immediate(), temps.CanAcquire());
2616 if (v8_flags.riscv_constant_pool && count >= 4 && reverse_count >= 4) {
2617 // Ld/Lw an Address from a constant pool.
2618#if V8_TARGET_ARCH_RISCV32
2619 RecordEntry((uint32_t)j.immediate(), j.rmode());
2620#elif V8_TARGET_ARCH_RISCV64
2621 RecordEntry((uint64_t)j.immediate(), j.rmode());
2622#endif
2623 auipc(rd, 0);
2624 // Record a value into constant pool, passing 1 as the offset makes the
2625 // promise that LoadWord() generates full 32-bit instruction to be
2626 // patched with real value in the future
2627 LoadWord(rd, MemOperand(rd, 1));
2628 } else {
2629 if ((count - reverse_count) > 1) {
2630 Li(rd, ~j.immediate());
2631 not_(rd, rd);
2632 } else {
2633 Li(rd, j.immediate());
2634 }
2635 }
2636 } else if (MustUseReg(j.rmode())) {
2637 int64_t immediate;
2638 if (RelocInfo::IsWasmCanonicalSigId(j.rmode()) ||
2641 // These reloc data are 32-bit values.
2642 DCHECK(is_int32(j.immediate()) || is_uint32(j.immediate()));
2643 RecordRelocInfo(j.rmode());
2644#if V8_TARGET_ARCH_RISCV64
2645 li_constant32(rd, int32_t(j.immediate()));
2646#elif V8_TARGET_ARCH_RISCV32
2647 li_constant(rd, int32_t(j.immediate()));
2648#endif
2649 return;
2650 } else if (RelocInfo::IsCompressedEmbeddedObject(j.rmode())) {
2651 Handle<HeapObject> handle(reinterpret_cast<Address*>(j.immediate()));
2653 DCHECK(is_uint32(index));
2654 RecordRelocInfo(j.rmode());
2655#if V8_TARGET_ARCH_RISCV64
2656 li_constant32(rd, static_cast<uint32_t>(index));
2657#elif V8_TARGET_ARCH_RISCV32
2658 li_constant(rd, index);
2659#endif
2660 return;
2661 } else if (RelocInfo::IsFullEmbeddedObject(j.rmode())) {
2662 if (j.IsHeapNumberRequest()) {
2663 RequestHeapNumber(j.heap_number_request());
2664 immediate = j.immediate_for_heap_number_request();
2665 } else {
2666 immediate = j.immediate();
2667 }
2668#if V8_TARGET_ARCH_RISCV64
2669 BlockPoolsScope block_pools(this);
2670 Handle<HeapObject> handle(reinterpret_cast<Address*>(immediate));
2672 if (RecordEntry(static_cast<uint64_t>(index), j.rmode()) ==
2673 RelocInfoStatus::kMustRecord) {
2674 RecordRelocInfo(j.rmode(), index);
2675 }
2676 DEBUG_PRINTF("\t EmbeddedObjectIndex%lu\n", index);
2677 auipc(rd, 0);
2678 // Record a value into constant pool, passing 1 as the offset makes the
2679 // promise that LoadWord() generates full 32-bit instruction to be
2680 // patched with real value in the future
2681 LoadWord(rd, MemOperand(rd, 1));
2682#elif V8_TARGET_ARCH_RISCV32
2683 RecordRelocInfo(j.rmode());
2684 li_constant(rd, immediate);
2685#endif
2686 return;
2687 } else {
2688 immediate = j.immediate();
2689 }
2690 RecordRelocInfo(j.rmode(), immediate);
2691 li_ptr(rd, immediate);
2692 } else if (mode == ADDRESS_LOAD) {
2693 // We always need the same number of instructions as we may need to patch
2694 // this code to load another value which may need all 6 instructions.
2695 RecordRelocInfo(j.rmode());
2696 li_ptr(rd, j.immediate());
2697 } else { // Always emit the same 48 bit instruction
2698 // sequence.
2699 li_ptr(rd, j.immediate());
2700 }
2701}
2702
2703static RegList t_regs = {t0, t1, t2, t3, t4, t5, t6};
2704static RegList a_regs = {a0, a1, a2, a3, a4, a5, a6, a7};
2705static RegList s_regs = {s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11};
2706
2708 int16_t num_to_push = regs.Count();
2709 int16_t stack_offset = num_to_push * kSystemPointerSize;
2710
2711#define TEST_AND_PUSH_REG(reg) \
2712 if (regs.has(reg)) { \
2713 stack_offset -= kSystemPointerSize; \
2714 StoreWord(reg, MemOperand(sp, stack_offset)); \
2715 regs.clear(reg); \
2716 }
2717
2718#define T_REGS(V) V(t6) V(t5) V(t4) V(t3) V(t2) V(t1) V(t0)
2719#define A_REGS(V) V(a7) V(a6) V(a5) V(a4) V(a3) V(a2) V(a1) V(a0)
2720#define S_REGS(V) \
2721 V(s11) V(s10) V(s9) V(s8) V(s7) V(s6) V(s5) V(s4) V(s3) V(s2) V(s1)
2722
2723 SubWord(sp, sp, Operand(stack_offset));
2724
2725 // Certain usage of MultiPush requires that registers are pushed onto the
2726 // stack in a particular: ra, fp, sp, gp, .... (basically in the decreasing
2727 // order of register numbers according to MIPS register numbers)
2733 if (!(regs & s_regs).is_empty()) {
2735 }
2736 if (!(regs & a_regs).is_empty()) {
2738 }
2739 if (!(regs & t_regs).is_empty()) {
2741 }
2742
2743 DCHECK(regs.is_empty());
2744
2745#undef TEST_AND_PUSH_REG
2746#undef T_REGS
2747#undef A_REGS
2748#undef S_REGS
2749}
2750
2752 int16_t stack_offset = 0;
2753
2754#define TEST_AND_POP_REG(reg) \
2755 if (regs.has(reg)) { \
2756 LoadWord(reg, MemOperand(sp, stack_offset)); \
2757 stack_offset += kSystemPointerSize; \
2758 regs.clear(reg); \
2759 }
2760
2761#define T_REGS(V) V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6)
2762#define A_REGS(V) V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7)
2763#define S_REGS(V) \
2764 V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) V(s9) V(s10) V(s11)
2765
2766 // MultiPop pops from the stack in reverse order as MultiPush
2767 if (!(regs & t_regs).is_empty()) {
2769 }
2770 if (!(regs & a_regs).is_empty()) {
2772 }
2773 if (!(regs & s_regs).is_empty()) {
2775 }
2776 TEST_AND_POP_REG(tp);
2777 TEST_AND_POP_REG(gp);
2778 TEST_AND_POP_REG(sp);
2779 TEST_AND_POP_REG(fp);
2781
2782 DCHECK(regs.is_empty());
2783
2784 addi(sp, sp, stack_offset);
2785
2786#undef TEST_AND_POP_REG
2787#undef T_REGS
2788#undef S_REGS
2789#undef A_REGS
2790}
2791
2793 int16_t num_to_push = regs.Count();
2794 int16_t stack_offset = num_to_push * kDoubleSize;
2795
2796 SubWord(sp, sp, Operand(stack_offset));
2797 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
2798 if ((regs.bits() & (1 << i)) != 0) {
2799 stack_offset -= kDoubleSize;
2800 StoreDouble(FPURegister::from_code(i), MemOperand(sp, stack_offset));
2801 }
2802 }
2803}
2804
2806 int16_t stack_offset = 0;
2807
2808 for (int16_t i = 0; i < kNumRegisters; i++) {
2809 if ((regs.bits() & (1 << i)) != 0) {
2810 LoadDouble(FPURegister::from_code(i), MemOperand(sp, stack_offset));
2811 stack_offset += kDoubleSize;
2812 }
2813 }
2814 addi(sp, sp, stack_offset);
2815}
2816
2817#if V8_TARGET_ARCH_RISCV32
2818void MacroAssembler::AddPair(Register dst_low, Register dst_high,
2819 Register left_low, Register left_high,
2820 Register right_low, Register right_high,
2821 Register scratch1, Register scratch2) {
2822 UseScratchRegisterScope temps(this);
2823 Register scratch3 = temps.Acquire();
2824 BlockTrampolinePoolScope block_trampoline_pool(this);
2825
2826 Add32(scratch1, left_low, right_low);
2827 // Save the carry
2828 Sltu(scratch3, scratch1, left_low);
2829 Add32(scratch2, left_high, right_high);
2830
2831 // Output higher 32 bits + carry
2832 Add32(dst_high, scratch2, scratch3);
2833 Move(dst_low, scratch1);
2834}
2835
2836void MacroAssembler::SubPair(Register dst_low, Register dst_high,
2837 Register left_low, Register left_high,
2838 Register right_low, Register right_high,
2839 Register scratch1, Register scratch2) {
2840 UseScratchRegisterScope temps(this);
2841 Register scratch3 = temps.Acquire();
2842 BlockTrampolinePoolScope block_trampoline_pool(this);
2843
2844 // Check if we need a borrow
2845 Sltu(scratch3, left_low, right_low);
2846 Sub32(scratch1, left_low, right_low);
2847 Sub32(scratch2, left_high, right_high);
2848
2849 // Output higher 32 bits - borrow
2850 Sub32(dst_high, scratch2, scratch3);
2851 Move(dst_low, scratch1);
2852}
2853
2854void MacroAssembler::AndPair(Register dst_low, Register dst_high,
2855 Register left_low, Register left_high,
2856 Register right_low, Register right_high) {
2857 And(dst_low, left_low, right_low);
2858 And(dst_high, left_high, right_high);
2859}
2860
2861void MacroAssembler::OrPair(Register dst_low, Register dst_high,
2862 Register left_low, Register left_high,
2863 Register right_low, Register right_high) {
2864 Or(dst_low, left_low, right_low);
2865 Or(dst_high, left_high, right_high);
2866}
2867void MacroAssembler::XorPair(Register dst_low, Register dst_high,
2868 Register left_low, Register left_high,
2869 Register right_low, Register right_high) {
2870 Xor(dst_low, left_low, right_low);
2871 Xor(dst_high, left_high, right_high);
2872}
2873
2874void MacroAssembler::MulPair(Register dst_low, Register dst_high,
2875 Register left_low, Register left_high,
2876 Register right_low, Register right_high,
2877 Register scratch1, Register scratch2) {
2878 UseScratchRegisterScope temps(this);
2879 Register scratch3 = temps.Acquire();
2880 BlockTrampolinePoolScope block_trampoline_pool(this);
2881 if (dst_low == right_low) {
2882 mv(scratch1, right_low);
2883 }
2884 Mul(scratch3, left_low, right_high);
2885 // NOTE: do not move these around, recommended sequence is MULH-MUL
2886 // LL * RL : higher 32 bits
2887 mulhu(scratch2, left_low, right_low);
2888 // LL * RL : lower 32 bits
2889 Mul(dst_low, left_low, right_low);
2890 // (LL * RH) + (LL * RL : higher 32 bits)
2891 Add32(scratch2, scratch2, scratch3);
2892 if (dst_low != right_low) {
2893 Mul(scratch3, left_high, right_low);
2894 } else {
2895 Mul(scratch3, left_high, scratch1);
2896 }
2897 Add32(dst_high, scratch2, scratch3);
2898}
2899
2900void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
2901 Register src_low, Register src_high,
2902 Register shift, Register scratch1,
2903 Register scratch2) {
2904 ASM_CODE_COMMENT(this);
2905 BlockTrampolinePoolScope block_trampoline_pool(this);
2906 Label done;
2907 UseScratchRegisterScope temps(this);
2908 Register scratch3 = no_reg;
2909 if (dst_low == src_low) {
2910 scratch3 = temps.Acquire();
2911 mv(scratch3, src_low);
2912 }
2913 And(scratch1, shift, 0x1F);
2914 // LOW32 << shamt
2915 sll(dst_low, src_low, scratch1);
2916 // HIGH32 << shamt
2917 sll(dst_high, src_high, scratch1);
2918
2919 // If the shift amount is 0, we're done
2920 Branch(&done, eq, shift, Operand(zero_reg));
2921
2922 // LOW32 >> (32 - shamt)
2923 li(scratch2, 32);
2924 Sub32(scratch2, scratch2, scratch1);
2925 if (dst_low == src_low) {
2926 srl(scratch1, scratch3, scratch2);
2927 } else {
2928 srl(scratch1, src_low, scratch2);
2929 }
2930
2931 // (HIGH32 << shamt) | (LOW32 >> (32 - shamt))
2932 Or(dst_high, dst_high, scratch1);
2933
2934 // If the shift amount is < 32, we're done
2935 // Note: the shift amount is always < 64, so we can just test if the 6th bit
2936 // is set
2937 And(scratch1, shift, 32);
2938 Branch(&done, eq, scratch1, Operand(zero_reg));
2939 Move(dst_high, dst_low);
2940 Move(dst_low, zero_reg);
2941
2942 bind(&done);
2943}
2944
2945void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
2946 Register src_low, Register src_high, int32_t shift,
2947 Register scratch1, Register scratch2) {
2948 DCHECK_GE(63, shift);
2949 DCHECK_NE(dst_low, src_low);
2950 DCHECK_NE(dst_high, src_low);
2951 shift &= 0x3F;
2952 if (shift == 0) {
2953 Move(dst_high, src_high);
2954 Move(dst_low, src_low);
2955 } else if (shift == 32) {
2956 Move(dst_high, src_low);
2957 li(dst_low, Operand(0));
2958 } else if (shift > 32) {
2959 shift &= 0x1F;
2960 slli(dst_high, src_low, shift);
2961 li(dst_low, Operand(0));
2962 } else {
2963 slli(dst_high, src_high, shift);
2964 slli(dst_low, src_low, shift);
2965 srli(scratch1, src_low, 32 - shift);
2966 Or(dst_high, dst_high, scratch1);
2967 }
2968}
2969
2970void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
2971 Register src_low, Register src_high,
2972 Register shift, Register scratch1,
2973 Register scratch2) {
2974 ASM_CODE_COMMENT(this);
2975 BlockTrampolinePoolScope block_trampoline_pool(this);
2976 Label done;
2977 UseScratchRegisterScope temps(this);
2978 Register scratch3 = no_reg;
2979 if (dst_high == src_high) {
2980 scratch3 = temps.Acquire();
2981 mv(scratch3, src_high);
2982 }
2983 And(scratch1, shift, 0x1F);
2984 // HIGH32 >> shamt
2985 srl(dst_high, src_high, scratch1);
2986 // LOW32 >> shamt
2987 srl(dst_low, src_low, scratch1);
2988
2989 // If the shift amount is 0, we're done
2990 Branch(&done, eq, shift, Operand(zero_reg));
2991
2992 // HIGH32 << (32 - shamt)
2993 li(scratch2, 32);
2994 Sub32(scratch2, scratch2, scratch1);
2995 if (dst_high == src_high) {
2996 sll(scratch1, scratch3, scratch2);
2997 } else {
2998 sll(scratch1, src_high, scratch2);
2999 }
3000
3001 // (HIGH32 << (32 - shamt)) | (LOW32 >> shamt)
3002 Or(dst_low, dst_low, scratch1);
3003
3004 // If the shift amount is < 32, we're done
3005 // Note: the shift amount is always < 64, so we can just test if the 6th bit
3006 // is set
3007 And(scratch1, shift, 32);
3008 Branch(&done, eq, scratch1, Operand(zero_reg));
3009 Move(dst_low, dst_high);
3010 Move(dst_high, zero_reg);
3011
3012 bind(&done);
3013}
3014
3015void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
3016 Register src_low, Register src_high, int32_t shift,
3017 Register scratch1, Register scratch2) {
3018 DCHECK_GE(63, shift);
3019 DCHECK_NE(dst_low, src_high);
3020 DCHECK_NE(dst_high, src_high);
3021 shift &= 0x3F;
3022 if (shift == 32) {
3023 mv(dst_low, src_high);
3024 li(dst_high, Operand(0));
3025 } else if (shift > 32) {
3026 shift &= 0x1F;
3027 srli(dst_low, src_high, shift);
3028 li(dst_high, Operand(0));
3029 } else if (shift == 0) {
3030 Move(dst_low, src_low);
3031 Move(dst_high, src_high);
3032 } else {
3033 srli(dst_low, src_low, shift);
3034 srli(dst_high, src_high, shift);
3035 slli(scratch1, src_high, 32 - shift);
3036 Or(dst_low, dst_low, scratch1);
3037 }
3038}
3039
3040void MacroAssembler::SarPair(Register dst_low, Register dst_high,
3041 Register src_low, Register src_high,
3042 Register shift, Register scratch1,
3043 Register scratch2) {
3044 BlockTrampolinePoolScope block_trampoline_pool(this);
3045 Label done;
3046 UseScratchRegisterScope temps(this);
3047 Register scratch3 = no_reg;
3048 if (dst_high == src_high) {
3049 scratch3 = temps.Acquire();
3050 mv(scratch3, src_high);
3051 }
3052 And(scratch1, shift, 0x1F);
3053 // HIGH32 >> shamt (arithmetic)
3054 sra(dst_high, src_high, scratch1);
3055 // LOW32 >> shamt (logical)
3056 srl(dst_low, src_low, scratch1);
3057
3058 // If the shift amount is 0, we're done
3059 Branch(&done, eq, shift, Operand(zero_reg));
3060
3061 // HIGH32 << (32 - shamt)
3062 li(scratch2, 32);
3063 Sub32(scratch2, scratch2, scratch1);
3064 if (dst_high == src_high) {
3065 sll(scratch1, scratch3, scratch2);
3066 } else {
3067 sll(scratch1, src_high, scratch2);
3068 }
3069 // (HIGH32 << (32 - shamt)) | (LOW32 >> shamt)
3070 Or(dst_low, dst_low, scratch1);
3071
3072 // If the shift amount is < 32, we're done
3073 // Note: the shift amount is always < 64, so we can just test if the 6th bit
3074 // is set
3075 And(scratch1, shift, 32);
3076 Branch(&done, eq, scratch1, Operand(zero_reg));
3077 Move(dst_low, dst_high);
3078 Sra32(dst_high, dst_high, 31);
3079
3080 bind(&done);
3081}
3082
3083void MacroAssembler::SarPair(Register dst_low, Register dst_high,
3084 Register src_low, Register src_high, int32_t shift,
3085 Register scratch1, Register scratch2) {
3086 DCHECK_GE(63, shift);
3087 DCHECK_NE(dst_low, src_high);
3088 DCHECK_NE(dst_high, src_high);
3089 shift = shift & 0x3F;
3090 if (shift == 0) {
3091 mv(dst_low, src_low);
3092 mv(dst_high, src_high);
3093 } else if (shift < 32) {
3094 srli(dst_low, src_low, shift);
3095 srai(dst_high, src_high, shift);
3096 slli(scratch1, src_high, 32 - shift);
3097 Or(dst_low, dst_low, scratch1);
3098 } else if (shift == 32) {
3099 srai(dst_high, src_high, 31);
3100 mv(dst_low, src_high);
3101 } else {
3102 srai(dst_high, src_high, 31);
3103 srai(dst_low, src_high, shift - 32);
3104 }
3105}
3106#endif
3107
3109 uint16_t size, bool sign_extend) {
3110#if V8_TARGET_ARCH_RISCV64
3111 DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
3112 pos + size <= 64);
3113 slli(rt, rs, 64 - (pos + size));
3114 if (sign_extend) {
3115 srai(rt, rt, 64 - size);
3116 } else {
3117 srli(rt, rt, 64 - size);
3118 }
3119#elif V8_TARGET_ARCH_RISCV32
3120 DCHECK_LT(pos, 32);
3121 DCHECK_GT(size, 0);
3122 DCHECK_LE(size, 32);
3123 DCHECK_GT(pos + size, 0);
3124 DCHECK_LE(pos + size, 32);
3125 slli(rt, rs, 32 - (pos + size));
3126 if (sign_extend) {
3127 srai(rt, rt, 32 - size);
3128 } else {
3129 srli(rt, rt, 32 - size);
3130 }
3131#endif
3132}
3133
3135 int size) {
3136#if V8_TARGET_ARCH_RISCV64
3137 DCHECK_LT(size, 64);
3138#elif V8_TARGET_ARCH_RISCV32
3139 DCHECK_LT(size, 32);
3140#endif
3141 UseScratchRegisterScope temps(this);
3142 Register mask = temps.Acquire();
3143 BlockTrampolinePoolScope block_trampoline_pool(this);
3144 Register source_ = temps.Acquire();
3145 // Create a mask of the length=size.
3146 li(mask, 1);
3147 slli(mask, mask, size);
3148 addi(mask, mask, -1);
3149 and_(source_, mask, source);
3151 // Make a mask containing 0's. 0's start at "pos" with length=size.
3152 sll(mask, mask, pos);
3153 not_(mask, mask);
3154 // cut area for insertion of source.
3155 and_(dest, mask, dest);
3156 // insert source
3157 or_(dest, dest, source_);
3158}
3159
3161
3163
3165 // Convert rs to a FP value in fd.
3166 fcvt_d_wu(fd, rs);
3167}
3168
3170 // Convert rs to a FP value in fd.
3171 fcvt_d_w(fd, rs);
3172}
3173#if V8_TARGET_ARCH_RISCV64
3175 // Convert rs to a FP value in fd.
3176 fcvt_d_lu(fd, rs);
3177}
3178#endif
3180 // Convert rs to a FP value in fd.
3181 fcvt_s_wu(fd, rs);
3182}
3183
3185 // Convert rs to a FP value in fd.
3186 fcvt_s_w(fd, rs);
3187}
3188#if V8_TARGET_ARCH_RISCV64
3190 // Convert rs to a FP value in fd.
3191 fcvt_s_lu(fd, rs);
3192}
3193#endif
3194template <typename CvtFunc>
3197 CvtFunc fcvt_generator) {
3198 if (result.is_valid()) {
3199 BlockTrampolinePoolScope block_trampoline_pool(this);
3200
3201 int exception_flags = kInvalidOperation;
3202 // clear invalid operation accrued flags, don't preserve old fflags
3203 csrrci(zero_reg, csr_fflags, exception_flags);
3204
3205 // actual conversion instruction
3206 fcvt_generator(this, rd, fs);
3207
3208 // check kInvalidOperation flag (out-of-range, NaN)
3209 // set result to 1 if normal, otherwise set result to 0 for abnormal
3210 frflags(result);
3211 andi(result, result, exception_flags);
3212 seqz(result, result); // result <-- 1 (normal), result <-- 0 (abnormal)
3213 } else {
3214 // actual conversion instruction
3215 fcvt_generator(this, rd, fs);
3216 }
3217}
3218
3220 Label no_nan;
3222 feq_d(kScratchReg, fs, fs);
3223 bnez(kScratchReg, &no_nan);
3224 Move(rd, zero_reg);
3225 bind(&no_nan);
3226}
3227
3229 Label no_nan;
3231 feq_s(kScratchReg, fs, fs);
3232 bnez(kScratchReg, &no_nan);
3233 Move(rd, zero_reg);
3234 bind(&no_nan);
3235}
3236
3239 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3240 masm->fcvt_wu_d(dst, src, RTZ);
3241 });
3242}
3243
3245#if V8_TARGET_ARCH_RISCV64 // For RV64 we can go faster csr-less approach
3246 if (result.is_valid()) {
3247 Label bad, done;
3248 UseScratchRegisterScope temps(this);
3249 Register scratch = temps.Acquire();
3250 // actual conversion instruction
3251 fcvt_l_d(rd, fs, RTZ);
3252 li(scratch, kMinInt);
3253 blt(rd, scratch, &bad);
3254 Sub32(scratch, scratch, 1); // converts kMinInt into kMaxInt
3255 bgt(rd, scratch, &bad);
3256 li(result, 1);
3257 j(&done);
3258 bind(&bad);
3259 // scratch still holds proper max/min value
3260 mv(rd, scratch);
3261 li(result, 0);
3262 // set result to 1 if normal, otherwise set result to 0 for abnormal
3263 bind(&done);
3264 } else {
3265 fcvt_w_d(rd, fs, RTZ);
3266 }
3267#else
3269 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3270 masm->fcvt_w_d(dst, src, RTZ);
3271 });
3272#endif
3273}
3274
3277 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3278 masm->fcvt_wu_s(dst, src, RTZ);
3279 });
3280}
3281
3284 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3285 masm->fcvt_w_s(dst, src, RTZ);
3286 });
3287}
3288#if V8_TARGET_ARCH_RISCV64
3291 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3292 masm->fcvt_lu_d(dst, src, RTZ);
3293 });
3294}
3295
3296void MacroAssembler::Trunc_l_d(Register rd, FPURegister fs, Register result) {
3298 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3299 masm->fcvt_l_d(dst, src, RTZ);
3300 });
3301}
3302
3303void MacroAssembler::Trunc_ul_s(Register rd, FPURegister fs, Register result) {
3305 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3306 masm->fcvt_lu_s(dst, src, RTZ);
3307 });
3308}
3309
3310void MacroAssembler::Trunc_l_s(Register rd, FPURegister fs, Register result) {
3312 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3313 masm->fcvt_l_s(dst, src, RTZ);
3314 });
3315}
3316#endif
3319 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3320 masm->fcvt_w_s(dst, src, RNE);
3321 });
3322}
3323
3326 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3327 masm->fcvt_w_d(dst, src, RNE);
3328 });
3329}
3330
3333 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3334 masm->fcvt_w_s(dst, src, RUP);
3335 });
3336}
3337
3340 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3341 masm->fcvt_w_d(dst, src, RUP);
3342 });
3343}
3344
3347 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3348 masm->fcvt_w_s(dst, src, RDN);
3349 });
3350}
3351
3354 rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) {
3355 masm->fcvt_w_d(dst, src, RDN);
3356 });
3357}
3358
3359// According to JS ECMA specification, for floating-point round operations, if
3360// the input is NaN, +/-infinity, or +/-0, the same input is returned as the
3361// rounded result; this differs from behavior of RISCV fcvt instructions (which
3362// round out-of-range values to the nearest max or min value), therefore special
3363// handling is needed by NaN, +/-Infinity, +/-0
3364#if V8_TARGET_ARCH_RISCV64
3365template <typename F>
3367 FPURegister fpu_scratch, FPURoundingMode frm) {
3368 BlockTrampolinePoolScope block_trampoline_pool(this);
3369 UseScratchRegisterScope temps(this);
3370 Register scratch2 = temps.Acquire();
3371
3372 DCHECK((std::is_same<float, F>::value) || (std::is_same<double, F>::value));
3373 // Need at least two FPRs, so check against dst == src == fpu_scratch
3374 DCHECK(!(dst == src && dst == fpu_scratch));
3375
3376 const int kFloatMantissaBits =
3378 const int kFloatExponentBits =
3380 const int kFloatExponentBias =
3382 Label done;
3383
3384 {
3385 UseScratchRegisterScope temps2(this);
3386 Register scratch = temps2.Acquire();
3387 // extract exponent value of the source floating-point to scratch
3388 if (std::is_same<F, double>::value) {
3389 fmv_x_d(scratch, src);
3390 } else {
3391 fmv_x_w(scratch, src);
3392 }
3394 }
3395
3396 // if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits
3397 // in mantissa, the result is the same as src, so move src to dest (to avoid
3398 // generating another branch)
3399 if (dst != src) {
3400 if (std::is_same<F, double>::value) {
3401 fmv_d(dst, src);
3402 } else {
3403 fmv_s(dst, src);
3404 }
3405 }
3406 {
3407 Label not_NaN;
3408 UseScratchRegisterScope temps2(this);
3409 Register scratch = temps2.Acquire();
3410 // According to the wasm spec
3411 // (https://webassembly.github.io/spec/core/exec/numerics.html#aux-nans)
3412 // if input is canonical NaN, then output is canonical NaN, and if input is
3413 // any other NaN, then output is any NaN with most significant bit of
3414 // payload is 1. In RISC-V, feq_d will set scratch to 0 if src is a NaN. If
3415 // src is not a NaN, branch to the label and do nothing, but if it is,
3416 // fmin_d will set dst to the canonical NaN.
3417 if (std::is_same<F, double>::value) {
3418 feq_d(scratch, src, src);
3419 bnez(scratch, &not_NaN);
3420 fmin_d(dst, src, src);
3421 } else {
3422 feq_s(scratch, src, src);
3423 bnez(scratch, &not_NaN);
3424 fmin_s(dst, src, src);
3425 }
3426 bind(&not_NaN);
3427 }
3428
3429 // If real exponent (i.e., scratch2 - kFloatExponentBias) is greater than
3430 // kFloat32MantissaBits, it means the floating-point value has no fractional
3431 // part, thus the input is already rounded, jump to done. Note that, NaN and
3432 // Infinity in floating-point representation sets maximal exponent value, so
3433 // they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits),
3434 // and JS round semantics specify that rounding of NaN (Infinity) returns NaN
3435 // (Infinity), so NaN and Infinity are considered rounded value too.
3436 Branch(&done, greater_equal, scratch2,
3438
3439 // Actual rounding is needed along this path
3440
3441 // old_src holds the original input, needed for the case of src == dst
3442 FPURegister old_src = src;
3443 if (src == dst) {
3444 DCHECK(fpu_scratch != dst);
3445 Move(fpu_scratch, src);
3446 old_src = fpu_scratch;
3447 }
3448
3449 // Since only input whose real exponent value is less than kMantissaBits
3450 // (i.e., 23 or 52-bits) falls into this path, the value range of the input
3451 // falls into that of 23- or 53-bit integers. So we round the input to integer
3452 // values, then convert them back to floating-point.
3453 {
3454 UseScratchRegisterScope temps(this);
3455 Register scratch = temps.Acquire();
3456 if (std::is_same<F, double>::value) {
3457 fcvt_l_d(scratch, src, frm);
3458 fcvt_d_l(dst, scratch, frm);
3459 } else {
3460 fcvt_w_s(scratch, src, frm);
3461 fcvt_s_w(dst, scratch, frm);
3462 }
3463 }
3464 // A special handling is needed if the input is a very small positive/negative
3465 // number that rounds to zero. JS semantics requires that the rounded result
3466 // retains the sign of the input, so a very small positive (negative)
3467 // floating-point number should be rounded to positive (negative) 0.
3468 // Therefore, we use sign-bit injection to produce +/-0 correctly. Instead of
3469 // testing for zero w/ a branch, we just insert sign-bit for everyone on this
3470 // path (this is where old_src is needed)
3471 if (std::is_same<F, double>::value) {
3472 fsgnj_d(dst, dst, old_src);
3473 } else {
3474 fsgnj_s(dst, dst, old_src);
3475 }
3476
3477 bind(&done);
3478}
3479#elif V8_TARGET_ARCH_RISCV32
3480// According to JS ECMA specification, for floating-point round operations, if
3481// the input is NaN, +/-infinity, or +/-0, the same input is returned as the
3482// rounded result; this differs from behavior of RISCV fcvt instructions (which
3483// round out-of-range values to the nearest max or min value), therefore special
3484// handling is needed by NaN, +/-Infinity, +/-0
3485void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src,
3486 FPURegister fpu_scratch, FPURoundingMode frm) {
3487 BlockTrampolinePoolScope block_trampoline_pool(this);
3488 UseScratchRegisterScope temps(this);
3489 Register scratch2 = temps.Acquire();
3490
3491 // Need at least two FPRs, so check against dst == src == fpu_scratch
3492 DCHECK(!(dst == src && dst == fpu_scratch));
3493
3497 Label done;
3498
3499 {
3500 UseScratchRegisterScope temps2(this);
3501 Register scratch = temps2.Acquire();
3502 // extract exponent value of the source floating-point to scratch
3503 fmv_x_w(scratch, src);
3505 }
3506
3507 // if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits
3508 // in mantissa, the result is the same as src, so move src to dest (to avoid
3509 // generating another branch)
3510 if (dst != src) {
3511 fmv_s(dst, src);
3512 }
3513 {
3514 Label not_NaN;
3515 UseScratchRegisterScope temps2(this);
3516 Register scratch = temps2.Acquire();
3517 // According to the wasm spec
3518 // (https://webassembly.github.io/spec/core/exec/numerics.html#aux-nans)
3519 // if input is canonical NaN, then output is canonical NaN, and if input is
3520 // any other NaN, then output is any NaN with most significant bit of
3521 // payload is 1. In RISC-V, feq_d will set scratch to 0 if src is a NaN. If
3522 // src is not a NaN, branch to the label and do nothing, but if it is,
3523 // fmin_d will set dst to the canonical NaN.
3524 feq_s(scratch, src, src);
3525 bnez(scratch, &not_NaN);
3526 fmin_s(dst, src, src);
3527 bind(&not_NaN);
3528 }
3529
3530 // If real exponent (i.e., scratch2 - kFloatExponentBias) is greater than
3531 // kFloat32MantissaBits, it means the floating-point value has no fractional
3532 // part, thus the input is already rounded, jump to done. Note that, NaN and
3533 // Infinity in floating-point representation sets maximal exponent value, so
3534 // they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits),
3535 // and JS round semantics specify that rounding of NaN (Infinity) returns NaN
3536 // (Infinity), so NaN and Infinity are considered rounded value too.
3537 Branch(&done, greater_equal, scratch2,
3539
3540 // Actual rounding is needed along this path
3541
3542 // old_src holds the original input, needed for the case of src == dst
3543 FPURegister old_src = src;
3544 if (src == dst) {
3545 DCHECK(fpu_scratch != dst);
3546 Move(fpu_scratch, src);
3547 old_src = fpu_scratch;
3548 }
3549
3550 // Since only input whose real exponent value is less than kMantissaBits
3551 // (i.e., 23 or 52-bits) falls into this path, the value range of the input
3552 // falls into that of 23- or 53-bit integers. So we round the input to integer
3553 // values, then convert them back to floating-point.
3554 {
3555 UseScratchRegisterScope temps(this);
3556 Register scratch = temps.Acquire();
3557 fcvt_w_s(scratch, src, frm);
3558 fcvt_s_w(dst, scratch, frm);
3559 }
3560 // A special handling is needed if the input is a very small positive/negative
3561 // number that rounds to zero. JS semantics requires that the rounded result
3562 // retains the sign of the input, so a very small positive (negative)
3563 // floating-point number should be rounded to positive (negative) 0.
3564 // Therefore, we use sign-bit injection to produce +/-0 correctly. Instead of
3565 // testing for zero w/ a branch, we just insert sign-bit for everyone on this
3566 // path (this is where old_src is needed)
3567 fsgnj_s(dst, dst, old_src);
3568
3569 bind(&done);
3570}
3571#endif // V8_TARGET_ARCH_RISCV32
3572// According to JS ECMA specification, for floating-point round operations, if
3573// the input is NaN, +/-infinity, or +/-0, the same input is returned as the
3574// rounded result; this differs from behavior of RISCV fcvt instructions (which
3575// round out-of-range values to the nearest max or min value), therefore special
3576// handling is needed by NaN, +/-Infinity, +/-0
3577template <typename F>
3579 VRegister v_scratch, FPURoundingMode frm,
3580 bool keep_nan_same) {
3581 VU.set(scratch, std::is_same<F, float>::value ? E32 : E64, m1);
3582 // if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits
3583 // in mantissa, the result is the same as src, so move src to dest (to avoid
3584 // generating another branch)
3585
3586 // If real exponent (i.e., scratch2 - kFloatExponentBias) is greater than
3587 // kFloat32MantissaBits, it means the floating-point value has no fractional
3588 // part, thus the input is already rounded, jump to done. Note that, NaN and
3589 // Infinity in floating-point representation sets maximal exponent value, so
3590 // they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits),
3591 // and JS round semantics specify that rounding of NaN (Infinity) returns NaN
3592 // (Infinity), so NaN and Infinity are considered rounded value too.
3593 const int kFloatMantissaBits =
3595 const int kFloatExponentBits =
3597 const int kFloatExponentBias =
3599
3600 // slli(rt, rs, 64 - (pos + size));
3601 // if (sign_extend) {
3602 // srai(rt, rt, 64 - size);
3603 // } else {
3604 // srli(rt, rt, 64 - size);
3605 // }
3606 vmv_vx(v_scratch, zero_reg);
3607 li(scratch, 64 - kFloatMantissaBits - kFloatExponentBits);
3608 vsll_vx(v_scratch, src, scratch);
3609 li(scratch, 64 - kFloatExponentBits);
3610 vsrl_vx(v_scratch, v_scratch, scratch);
3612 vmslt_vx(v0, v_scratch, scratch);
3613 VU.set(frm);
3614 vmv_vv(dst, src);
3615 if (dst == src) {
3616 vmv_vv(v_scratch, src);
3617 }
3618 vfcvt_x_f_v(dst, src, MaskType::Mask);
3619 vfcvt_f_x_v(dst, dst, MaskType::Mask);
3620
3621 // A special handling is needed if the input is a very small positive/negative
3622 // number that rounds to zero. JS semantics requires that the rounded result
3623 // retains the sign of the input, so a very small positive (negative)
3624 // floating-point number should be rounded to positive (negative) 0.
3625 if (dst == src) {
3626 vfsngj_vv(dst, dst, v_scratch);
3627 } else {
3628 vfsngj_vv(dst, dst, src);
3629 }
3630 if (!keep_nan_same) {
3631 vmfeq_vv(v0, src, src);
3632 vnot_vv(v0, v0);
3633 if (std::is_same<F, float>::value) {
3634 fmv_w_x(kScratchDoubleReg, zero_reg);
3635 } else {
3636#ifdef V8_TARGET_ARCH_RISCV64
3637 fmv_d_x(kScratchDoubleReg, zero_reg);
3638#elif V8_TARGET_ARCH_RISCV32
3639 fcvt_d_w(kScratchDoubleReg, zero_reg);
3640#endif
3641 }
3642 vfadd_vf(dst, src, kScratchDoubleReg, MaskType::Mask);
3643 }
3644}
3645
3647 VRegister v_scratch) {
3648 RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RUP, false);
3649}
3650
3652 VRegister v_scratch) {
3653 RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RUP, false);
3654}
3655
3657 VRegister v_scratch) {
3658 RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RDN, false);
3659}
3660
3662 VRegister v_scratch) {
3663 RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RDN, false);
3664}
3665
3667 VRegister v_scratch) {
3668 RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RTZ, false);
3669}
3670
3672 VRegister v_scratch) {
3673 RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RTZ, false);
3674}
3675
3677 VRegister v_scratch) {
3678 RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RNE, false);
3679}
3680
3682 VRegister v_scratch) {
3683 RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RNE, false);
3684}
3685
3686#if V8_TARGET_ARCH_RISCV64
3688 FPURegister fpu_scratch) {
3689 RoundHelper<double>(dst, src, fpu_scratch, RDN);
3690}
3691
3692void MacroAssembler::Ceil_d_d(FPURegister dst, FPURegister src,
3693 FPURegister fpu_scratch) {
3694 RoundHelper<double>(dst, src, fpu_scratch, RUP);
3695}
3696
3697void MacroAssembler::Trunc_d_d(FPURegister dst, FPURegister src,
3698 FPURegister fpu_scratch) {
3699 RoundHelper<double>(dst, src, fpu_scratch, RTZ);
3700}
3701
3702void MacroAssembler::Round_d_d(FPURegister dst, FPURegister src,
3703 FPURegister fpu_scratch) {
3704 RoundHelper<double>(dst, src, fpu_scratch, RNE);
3705}
3706#endif
3707
3709 FPURegister fpu_scratch) {
3710#if V8_TARGET_ARCH_RISCV64
3711 RoundHelper<float>(dst, src, fpu_scratch, RDN);
3712#elif V8_TARGET_ARCH_RISCV32
3713 RoundFloat(dst, src, fpu_scratch, RDN);
3714#endif
3715}
3716
3718 FPURegister fpu_scratch) {
3719#if V8_TARGET_ARCH_RISCV64
3720 RoundHelper<float>(dst, src, fpu_scratch, RUP);
3721#elif V8_TARGET_ARCH_RISCV32
3722 RoundFloat(dst, src, fpu_scratch, RUP);
3723#endif
3724}
3725
3727 FPURegister fpu_scratch) {
3728#if V8_TARGET_ARCH_RISCV64
3729 RoundHelper<float>(dst, src, fpu_scratch, RTZ);
3730#elif V8_TARGET_ARCH_RISCV32
3731 RoundFloat(dst, src, fpu_scratch, RTZ);
3732#endif
3733}
3734
3736 FPURegister fpu_scratch) {
3737#if V8_TARGET_ARCH_RISCV64
3738 RoundHelper<float>(dst, src, fpu_scratch, RNE);
3739#elif V8_TARGET_ARCH_RISCV32
3740 RoundFloat(dst, src, fpu_scratch, RNE);
3741#endif
3742}
3743
3745 FPURegister ft) {
3746 fmadd_s(fd, fs, ft, fr);
3747}
3748
3750 FPURegister ft) {
3751 fmadd_d(fd, fs, ft, fr);
3752}
3753
3755 FPURegister ft) {
3756 fmsub_s(fd, fs, ft, fr);
3757}
3758
3760 FPURegister ft) {
3761 fmsub_d(fd, fs, ft, fr);
3762}
3763
3765 FPURegister cmp2) {
3766 switch (cc) {
3767 case EQ:
3768 feq_s(rd, cmp1, cmp2);
3769 break;
3770 case NE:
3771 feq_s(rd, cmp1, cmp2);
3772 NegateBool(rd, rd);
3773 break;
3774 case LT:
3775 flt_s(rd, cmp1, cmp2);
3776 break;
3777 case GE:
3778 fle_s(rd, cmp2, cmp1);
3779 break;
3780 case LE:
3781 fle_s(rd, cmp1, cmp2);
3782 break;
3783 case GT:
3784 flt_s(rd, cmp2, cmp1);
3785 break;
3786 default:
3787 UNREACHABLE();
3788 }
3789}
3790
3792 FPURegister cmp2) {
3793 switch (cc) {
3794 case EQ:
3795 feq_d(rd, cmp1, cmp2);
3796 break;
3797 case NE:
3798 feq_d(rd, cmp1, cmp2);
3799 NegateBool(rd, rd);
3800 break;
3801 case LT:
3802 flt_d(rd, cmp1, cmp2);
3803 break;
3804 case GE:
3805 fle_d(rd, cmp2, cmp1);
3806 break;
3807 case LE:
3808 fle_d(rd, cmp1, cmp2);
3809 break;
3810 case GT:
3811 flt_d(rd, cmp2, cmp1);
3812 break;
3813 default:
3814 UNREACHABLE();
3815 }
3816}
3817
3819 FPURegister cmp2) {
3820 UseScratchRegisterScope temps(this);
3821 BlockTrampolinePoolScope block_trampoline_pool(this);
3822 Register scratch = temps.Acquire();
3823
3824 feq_s(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
3825 feq_s(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
3826 And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
3827}
3828
3830 FPURegister cmp2) {
3831 UseScratchRegisterScope temps(this);
3832 BlockTrampolinePoolScope block_trampoline_pool(this);
3833 Register scratch = temps.Acquire();
3834
3835 feq_d(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
3836 feq_d(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
3837 And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
3838}
3839
3841 FPURegister cmp2) {
3842 CompareIsNotNanF32(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
3843 Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
3844}
3845
3847 FPURegister cmp2) {
3848 CompareIsNotNanF64(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
3849 Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
3850}
3851
3853 Branch(target, not_equal, rs, Operand(zero_reg));
3854}
3855
3857 Branch(target, equal, rs, Operand(zero_reg));
3858}
3859
3861 bool long_branch =
3862 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
3863 if (long_branch) {
3864 Label skip;
3865 BranchFalseShortF(rs, &skip);
3866 BranchLong(target);
3867 bind(&skip);
3868 } else {
3869 BranchTrueShortF(rs, target);
3870 }
3871}
3872
3874 bool long_branch =
3875 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
3876 if (long_branch) {
3877 Label skip;
3878 BranchTrueShortF(rs, &skip);
3879 BranchLong(target);
3880 bind(&skip);
3881 } else {
3882 BranchFalseShortF(rs, target);
3883 }
3884}
3885
3887#if V8_TARGET_ARCH_RISCV64
3888 UseScratchRegisterScope temps(this);
3889 Register scratch = temps.Acquire();
3890 Register scratch2 = temps.Acquire();
3891 BlockTrampolinePoolScope block_trampoline_pool(this);
3892
3893 DCHECK(src_high != scratch2 && src_high != scratch);
3894
3895 fmv_x_d(scratch, dst);
3896 slli(scratch2, src_high, 32);
3897 slli(scratch, scratch, 32);
3898 srli(scratch, scratch, 32);
3899 or_(scratch, scratch, scratch2);
3900 fmv_d_x(dst, scratch);
3901#elif V8_TARGET_ARCH_RISCV32
3902 BlockTrampolinePoolScope block_trampoline_pool(this);
3903 Add32(sp, sp, Operand(-8));
3904 StoreDouble(dst, MemOperand(sp, 0));
3905 Sw(src_high, MemOperand(sp, 4));
3906 LoadDouble(dst, MemOperand(sp, 0));
3907 Add32(sp, sp, Operand(8));
3908#endif
3909}
3910
3912#if V8_TARGET_ARCH_RISCV64
3913 UseScratchRegisterScope temps(this);
3914 Register scratch = temps.Acquire();
3915 Register scratch2 = temps.Acquire();
3916 BlockTrampolinePoolScope block_trampoline_pool(this);
3917
3918 DCHECK(src_low != scratch && src_low != scratch2);
3919 fmv_x_d(scratch, dst);
3920 slli(scratch2, src_low, 32);
3921 srli(scratch2, scratch2, 32);
3922 srli(scratch, scratch, 32);
3923 slli(scratch, scratch, 32);
3924 or_(scratch, scratch, scratch2);
3925 fmv_d_x(dst, scratch);
3926#elif V8_TARGET_ARCH_RISCV32
3927 BlockTrampolinePoolScope block_trampoline_pool(this);
3928 AddWord(sp, sp, Operand(-8));
3929 StoreDouble(dst, MemOperand(sp, 0));
3930 Sw(src_low, MemOperand(sp, 0));
3931 LoadDouble(dst, MemOperand(sp, 0));
3932 AddWord(sp, sp, Operand(8));
3933#endif
3934}
3935
3937 ASM_CODE_COMMENT(this);
3938 // Handle special values first.
3940 if (dst != kSingleRegZero) fmv_s(dst, kSingleRegZero);
3941 } else if (src == base::bit_cast<uint32_t>(-0.0f) &&
3943 Neg_s(dst, kSingleRegZero);
3944 } else {
3945 if (dst == kSingleRegZero) {
3946 DCHECK(src == base::bit_cast<uint32_t>(0.0f));
3947 fcvt_s_w(dst, zero_reg);
3949 } else {
3950 if (src == base::bit_cast<uint32_t>(0.0f)) {
3951 fcvt_s_w(dst, zero_reg);
3952 } else {
3953 UseScratchRegisterScope temps(this);
3954 Register scratch = temps.Acquire();
3955 li(scratch, Operand(static_cast<int32_t>(src)));
3956 fmv_w_x(dst, scratch);
3957 }
3958 }
3959 }
3960}
3961
3963 ASM_CODE_COMMENT(this);
3964 // Handle special values first.
3966 if (dst != kDoubleRegZero) fmv_d(dst, kDoubleRegZero);
3967 } else if (src == base::bit_cast<uint64_t>(-0.0) &&
3969 Neg_d(dst, kDoubleRegZero);
3970 } else {
3971#if V8_TARGET_ARCH_RISCV64
3972 if (dst == kDoubleRegZero) {
3973 DCHECK(src == base::bit_cast<uint64_t>(0.0));
3974 fcvt_d_l(dst, zero_reg);
3976 } else {
3977 UseScratchRegisterScope temps(this);
3978 Register scratch = temps.Acquire();
3979 if (src == base::bit_cast<uint64_t>(0.0)) {
3980 fcvt_d_l(dst, zero_reg);
3981 } else {
3982 li(scratch, Operand(src));
3983 fmv_d_x(dst, scratch);
3984 }
3985 }
3986#elif V8_TARGET_ARCH_RISCV32
3987 if (dst == kDoubleRegZero) {
3988 DCHECK(src == base::bit_cast<uint64_t>(0.0));
3989 fcvt_d_w(dst, zero_reg);
3991 } else {
3992 // Todo: need to clear the stack content?
3993 if (src == base::bit_cast<uint64_t>(0.0)) {
3994 fcvt_d_w(dst, zero_reg);
3995 } else {
3996 UseScratchRegisterScope temps(this);
3997 Register scratch = temps.Acquire();
3998 uint32_t low_32 = src & 0xffffffffull;
3999 uint32_t up_32 = src >> 32;
4000 AddWord(sp, sp, Operand(-8));
4001 li(scratch, Operand(static_cast<int32_t>(low_32)));
4002 Sw(scratch, MemOperand(sp, 0));
4003 li(scratch, Operand(static_cast<int32_t>(up_32)));
4004 Sw(scratch, MemOperand(sp, 4));
4005 LoadDouble(dst, MemOperand(sp, 0));
4006 AddWord(sp, sp, Operand(8));
4007 }
4008 }
4009#endif
4010 }
4011}
4012
4014 Condition cond) {
4015 switch (cond) {
4016 case eq:
4017 Seq(rd, rs, rt);
4018 break;
4019 case ne:
4020 Sne(rd, rs, rt);
4021 break;
4022
4023 // Signed comparison.
4024 case greater:
4025 Sgt(rd, rs, rt);
4026 break;
4027 case greater_equal:
4028 Sge(rd, rs, rt); // rs >= rt
4029 break;
4030 case less:
4031 Slt(rd, rs, rt); // rs < rt
4032 break;
4033 case less_equal:
4034 Sle(rd, rs, rt); // rs <= rt
4035 break;
4036
4037 // Unsigned comparison.
4038 case Ugreater:
4039 Sgtu(rd, rs, rt); // rs > rt
4040 break;
4041 case Ugreater_equal:
4042 Sgeu(rd, rs, rt); // rs >= rt
4043 break;
4044 case Uless:
4045 Sltu(rd, rs, rt); // rs < rt
4046 break;
4047 case Uless_equal:
4048 Sleu(rd, rs, rt); // rs <= rt
4049 break;
4050 case cc_always:
4051 UNREACHABLE();
4052 default:
4053 UNREACHABLE();
4054 }
4055}
4056
4057// dest <- (condition != 0 ? zero : dest)
4060 if (CpuFeatures::IsSupported(ZICOND)) {
4061 czero_nez(dest, dest, condition);
4062 } else {
4063 UseScratchRegisterScope temps(this);
4064 Register scratch = temps.Acquire();
4065 seqz(scratch, condition);
4066 // neg + and may be more efficient than mul(dest, dest, scratch)
4067 neg(scratch, scratch); // 0 is still 0, 1 becomes all 1s
4068 and_(dest, dest, scratch);
4069 }
4070}
4071
4072// dest <- (condition == 0 ? 0 : dest)
4075 if (CpuFeatures::IsSupported(ZICOND)) {
4076 czero_eqz(dest, dest, condition);
4077 } else {
4078 UseScratchRegisterScope temps(this);
4079 Register scratch = temps.Acquire();
4080 snez(scratch, condition);
4081 // neg + and may be more efficient than mul(dest, dest, scratch);
4082 neg(scratch, scratch); // 0 is still 0, 1 becomes all 1s
4083 and_(dest, dest, scratch);
4084 }
4085}
4086
4088 if (CpuFeatures::IsSupported(ZBB)) {
4089#if V8_TARGET_ARCH_RISCV64
4090 clzw(rd, xx);
4091#else
4092 clz(rd, xx);
4093#endif
4094 } else {
4095 // 32 bit unsigned in lower word: count number of leading zeros.
4096 // int n = 32;
4097 // unsigned y;
4098
4099 // y = x >>16; if (y != 0) { n = n -16; x = y; }
4100 // y = x >> 8; if (y != 0) { n = n - 8; x = y; }
4101 // y = x >> 4; if (y != 0) { n = n - 4; x = y; }
4102 // y = x >> 2; if (y != 0) { n = n - 2; x = y; }
4103 // y = x >> 1; if (y != 0) {rd = n - 2; return;}
4104 // rd = n - x;
4105
4106 Label L0, L1, L2, L3, L4;
4107 UseScratchRegisterScope temps(this);
4108 BlockTrampolinePoolScope block_trampoline_pool(this);
4109 Register x = rd;
4110 Register y = temps.Acquire();
4111 Register n = temps.Acquire();
4112 DCHECK(xx != y && xx != n);
4113 Move(x, xx);
4114 li(n, Operand(32));
4115#if V8_TARGET_ARCH_RISCV64
4116 srliw(y, x, 16);
4117 BranchShort(&L0, eq, y, Operand(zero_reg));
4118 Move(x, y);
4119 addiw(n, n, -16);
4120 bind(&L0);
4121 srliw(y, x, 8);
4122 BranchShort(&L1, eq, y, Operand(zero_reg));
4123 addiw(n, n, -8);
4124 Move(x, y);
4125 bind(&L1);
4126 srliw(y, x, 4);
4127 BranchShort(&L2, eq, y, Operand(zero_reg));
4128 addiw(n, n, -4);
4129 Move(x, y);
4130 bind(&L2);
4131 srliw(y, x, 2);
4132 BranchShort(&L3, eq, y, Operand(zero_reg));
4133 addiw(n, n, -2);
4134 Move(x, y);
4135 bind(&L3);
4136 srliw(y, x, 1);
4137 subw(rd, n, x);
4138 BranchShort(&L4, eq, y, Operand(zero_reg));
4139 addiw(rd, n, -2);
4140 bind(&L4);
4141#elif V8_TARGET_ARCH_RISCV32
4142 srli(y, x, 16);
4143 BranchShort(&L0, eq, y, Operand(zero_reg));
4144 Move(x, y);
4145 addi(n, n, -16);
4146 bind(&L0);
4147 srli(y, x, 8);
4148 BranchShort(&L1, eq, y, Operand(zero_reg));
4149 addi(n, n, -8);
4150 Move(x, y);
4151 bind(&L1);
4152 srli(y, x, 4);
4153 BranchShort(&L2, eq, y, Operand(zero_reg));
4154 addi(n, n, -4);
4155 Move(x, y);
4156 bind(&L2);
4157 srli(y, x, 2);
4158 BranchShort(&L3, eq, y, Operand(zero_reg));
4159 addi(n, n, -2);
4160 Move(x, y);
4161 bind(&L3);
4162 srli(y, x, 1);
4163 sub(rd, n, x);
4164 BranchShort(&L4, eq, y, Operand(zero_reg));
4165 addi(rd, n, -2);
4166 bind(&L4);
4167#endif
4168 }
4169}
4170
4171#if V8_TARGET_ARCH_RISCV64
4172void MacroAssembler::Clz64(Register rd, Register xx) {
4173 if (CpuFeatures::IsSupported(ZBB)) {
4174 clz(rd, xx);
4175 } else {
4176 // 64 bit: count number of leading zeros.
4177 // int n = 64;
4178 // unsigned y;
4179
4180 // y = x >>32; if (y != 0) { n = n - 32; x = y; }
4181 // y = x >>16; if (y != 0) { n = n - 16; x = y; }
4182 // y = x >> 8; if (y != 0) { n = n - 8; x = y; }
4183 // y = x >> 4; if (y != 0) { n = n - 4; x = y; }
4184 // y = x >> 2; if (y != 0) { n = n - 2; x = y; }
4185 // y = x >> 1; if (y != 0) {rd = n - 2; return;}
4186 // rd = n - x;
4187
4188 Label L0, L1, L2, L3, L4, L5;
4189 UseScratchRegisterScope temps(this);
4190 BlockTrampolinePoolScope block_trampoline_pool(this);
4191 Register x = rd;
4192 Register y = temps.Acquire();
4193 Register n = temps.Acquire();
4194 DCHECK(xx != y && xx != n);
4195 Move(x, xx);
4196 li(n, Operand(64));
4197 srli(y, x, 32);
4198 BranchShort(&L0, eq, y, Operand(zero_reg));
4199 addiw(n, n, -32);
4200 Move(x, y);
4201 bind(&L0);
4202 srli(y, x, 16);
4203 BranchShort(&L1, eq, y, Operand(zero_reg));
4204 addiw(n, n, -16);
4205 Move(x, y);
4206 bind(&L1);
4207 srli(y, x, 8);
4208 BranchShort(&L2, eq, y, Operand(zero_reg));
4209 addiw(n, n, -8);
4210 Move(x, y);
4211 bind(&L2);
4212 srli(y, x, 4);
4213 BranchShort(&L3, eq, y, Operand(zero_reg));
4214 addiw(n, n, -4);
4215 Move(x, y);
4216 bind(&L3);
4217 srli(y, x, 2);
4218 BranchShort(&L4, eq, y, Operand(zero_reg));
4219 addiw(n, n, -2);
4220 Move(x, y);
4221 bind(&L4);
4222 srli(y, x, 1);
4223 subw(rd, n, x);
4224 BranchShort(&L5, eq, y, Operand(zero_reg));
4225 addiw(rd, n, -2);
4226 bind(&L5);
4227 }
4228}
4229#endif
4230
4232 if (CpuFeatures::IsSupported(ZBB)) {
4233#if V8_TARGET_ARCH_RISCV64
4234 ctzw(rd, rs);
4235#else
4236 ctz(rd, rs);
4237#endif
4238 } else {
4239 // Convert trailing zeroes to trailing ones, and bits to their left
4240 // to zeroes.
4241 BlockTrampolinePoolScope block_trampoline_pool(this);
4242 {
4243 UseScratchRegisterScope temps(this);
4244 Register scratch = temps.Acquire();
4245 AddWord(scratch, rs, -1);
4246 Xor(rd, scratch, rs);
4247 And(rd, rd, scratch);
4248 // Count number of leading zeroes.
4249 }
4250 Clz32(rd, rd);
4251 {
4252 // Subtract number of leading zeroes from 32 to get number of trailing
4253 // ones. Remember that the trailing ones were formerly trailing zeroes.
4254 UseScratchRegisterScope temps(this);
4255 Register scratch = temps.Acquire();
4256 li(scratch, 32);
4257 Sub32(rd, scratch, rd);
4258 }
4259 }
4260}
4261#if V8_TARGET_ARCH_RISCV64
4262void MacroAssembler::Ctz64(Register rd, Register rs) {
4263 if (CpuFeatures::IsSupported(ZBB)) {
4264 ctz(rd, rs);
4265 } else {
4266 // Convert trailing zeroes to trailing ones, and bits to their left
4267 // to zeroes.
4268 BlockTrampolinePoolScope block_trampoline_pool(this);
4269 {
4270 UseScratchRegisterScope temps(this);
4271 Register scratch = temps.Acquire();
4272 AddWord(scratch, rs, -1);
4273 Xor(rd, scratch, rs);
4274 And(rd, rd, scratch);
4275 // Count number of leading zeroes.
4276 }
4277 Clz64(rd, rd);
4278 {
4279 // Subtract number of leading zeroes from 64 to get number of trailing
4280 // ones. Remember that the trailing ones were formerly trailing zeroes.
4281 UseScratchRegisterScope temps(this);
4282 Register scratch = temps.Acquire();
4283 li(scratch, 64);
4284 SubWord(rd, scratch, rd);
4285 }
4286 }
4287}
4288#endif
4290 if (CpuFeatures::IsSupported(ZBB)) {
4291#if V8_TARGET_ARCH_RISCV64
4292 cpopw(rd, rs);
4293#else
4294 cpop(rd, rs);
4295#endif
4296 } else {
4297 DCHECK_NE(scratch, rs);
4298 DCHECK_NE(scratch, rd);
4299 // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
4300 //
4301 // A generalization of the best bit counting method to integers of
4302 // bit-widths up to 128 (parameterized by type T) is this:
4303 //
4304 // v = v - ((v >> 1) & (T)~(T)0/3); // temp
4305 // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
4306 // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
4307 // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
4308 //
4309 // There are algorithms which are faster in the cases where very few
4310 // bits are set but the algorithm here attempts to minimize the total
4311 // number of instructions executed even when a large number of bits
4312 // are set.
4313 // The number of instruction is 20.
4314 // uint32_t B0 = 0x55555555; // (T)~(T)0/3
4315 // uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
4316 // uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
4317 // uint32_t value = 0x01010101; // (T)~(T)0/255
4318 uint32_t shift = 24;
4319 UseScratchRegisterScope temps(this);
4320 BlockTrampolinePoolScope block_trampoline_pool(this);
4321 Register scratch2 = temps.Acquire();
4322 Register value = temps.Acquire();
4323 DCHECK((rd != value) && (rs != value));
4324 li(value, 0x01010101); // value = 0x01010101;
4325 li(scratch2, 0x55555555); // B0 = 0x55555555;
4326 Srl32(scratch, rs, 1);
4327 And(scratch, scratch, scratch2);
4328 Sub32(scratch, rs, scratch);
4329 li(scratch2, 0x33333333); // B1 = 0x33333333;
4330 slli(rd, scratch2, 4);
4331 or_(scratch2, scratch2, rd);
4332 And(rd, scratch, scratch2);
4333 Srl32(scratch, scratch, 2);
4334 And(scratch, scratch, scratch2);
4335 Add32(scratch, rd, scratch);
4336 Srl32(rd, scratch, 4);
4337 Add32(rd, rd, scratch);
4338 li(scratch2, 0xF);
4339 Mul32(scratch2, value, scratch2); // B2 = 0x0F0F0F0F;
4340 And(rd, rd, scratch2);
4341 Mul32(rd, rd, value);
4342 Srl32(rd, rd, shift);
4343 }
4344}
4345#if V8_TARGET_ARCH_RISCV64
4346void MacroAssembler::Popcnt64(Register rd, Register rs, Register scratch) {
4347 if (CpuFeatures::IsSupported(ZBB)) {
4348 cpop(rd, rs);
4349 } else {
4350 DCHECK_NE(scratch, rs);
4351 DCHECK_NE(scratch, rd);
4352 // uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
4353 // uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
4354 // uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
4355 // uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
4356 // uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
4357 uint64_t shift = 24;
4358 UseScratchRegisterScope temps(this);
4359 BlockTrampolinePoolScope block_trampoline_pool(this);
4360 Register scratch2 = temps.Acquire();
4361 Register value = temps.Acquire();
4362 DCHECK((rd != value) && (rs != value));
4363 li(value, 0x1111111111111111l); // value = 0x1111111111111111l;
4364 li(scratch2, 5);
4365 Mul64(scratch2, value, scratch2); // B0 = 0x5555555555555555l;
4366 Srl64(scratch, rs, 1);
4367 And(scratch, scratch, scratch2);
4368 SubWord(scratch, rs, scratch);
4369 li(scratch2, 3);
4370 Mul64(scratch2, value, scratch2); // B1 = 0x3333333333333333l;
4371 And(rd, scratch, scratch2);
4372 Srl64(scratch, scratch, 2);
4373 And(scratch, scratch, scratch2);
4374 AddWord(scratch, rd, scratch);
4375 Srl64(rd, scratch, 4);
4376 AddWord(rd, rd, scratch);
4377 li(scratch2, 0xF);
4378 li(value, 0x0101010101010101l); // value = 0x0101010101010101l;
4379 Mul64(scratch2, value, scratch2); // B2 = 0x0F0F0F0F0F0F0F0Fl;
4380 And(rd, rd, scratch2);
4381 Mul64(rd, rd, value);
4382 srli(rd, rd, 32 + shift);
4383 }
4384}
4385#endif
4386
4388 DoubleRegister double_input,
4389 Label* done) {
4390 UseScratchRegisterScope temps(this);
4391 Register scratch = temps.Acquire();
4392 // if scratch == 1, exception happens during truncation
4393 Trunc_w_d(result, double_input, scratch);
4394 // If we had no exceptions (i.e., scratch==1) we are done.
4395 Branch(done, eq, scratch, Operand(1));
4396}
4397
4400 DoubleRegister double_input,
4401 StubCallMode stub_mode) {
4402 Label done;
4403
4404 TryInlineTruncateDoubleToI(result, double_input, &done);
4405
4406 // If we fell through then inline version didn't succeed - call stub
4407 // instead.
4408 push(ra);
4409 SubWord(sp, sp, Operand(kDoubleSize)); // Put input on stack.
4410 fsd(double_input, sp, 0);
4411#if V8_ENABLE_WEBASSEMBLY
4412 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
4413 Call(static_cast<Address>(Builtin::kDoubleToI), RelocInfo::WASM_STUB_CALL);
4414#else
4415 // For balance.
4416 if (false) {
4417#endif // V8_ENABLE_WEBASSEMBLY
4418 } else {
4419 CallBuiltin(Builtin::kDoubleToI);
4420 }
4421 LoadWord(result, MemOperand(sp, 0));
4422
4423 AddWord(sp, sp, Operand(kDoubleSize));
4424 pop(ra);
4425
4426 bind(&done);
4427}
4428
4429// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
4430#define BRANCH_ARGS_CHECK(cond, rs, rt) \
4431 DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \
4432 (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg)))
4433
4435 DCHECK(is_int21(offset));
4437}
4438
4440 const Operand& rt, Label::Distance distance) {
4441 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt);
4442 DCHECK(is_near);
4443 USE(is_near);
4444}
4445
4447 if (L->is_bound()) {
4448 if (is_near(L)) {
4449 BranchShort(L);
4450 } else {
4451 BranchLong(L);
4452 }
4453 } else {
4454 if (is_trampoline_emitted()) {
4455 BranchLong(L);
4456 } else {
4457 BranchShort(L);
4458 }
4459 }
4460}
4461
4463 const Operand& rt, Label::Distance distance) {
4464 if (L->is_bound()) {
4465 if (!BranchShortCheck(0, L, cond, rs, rt)) {
4466 if (cond != cc_always) {
4467 Label skip;
4468 Condition neg_cond = NegateCondition(cond);
4469 BlockPoolsScope block_pools(this, 3 * kInstrSize);
4470 BranchShort(&skip, neg_cond, rs, rt);
4471 BranchLong(L);
4472 bind(&skip);
4473 } else {
4474 BranchLong(L);
4476 }
4477 }
4478 } else {
4479 DEBUG_PRINTF("\t Branch is_trampoline_emitted %d\n",
4481 if (is_trampoline_emitted()) {
4482 if (cond != cc_always) {
4483 Label skip;
4484 Condition neg_cond = NegateCondition(cond);
4485 BlockPoolsScope block_pools(this, 3 * kInstrSize);
4486 BranchShort(&skip, neg_cond, rs, rt);
4487 BranchLong(L);
4488 bind(&skip);
4489 } else {
4490 BranchLong(L);
4492 }
4493 } else {
4494 BranchShort(L, cond, rs, rt);
4495 }
4496 }
4497}
4498
4500 RootIndex index, Label::Distance distance) {
4501 UseScratchRegisterScope temps(this);
4502 Register right = temps.Acquire();
4504 Register left = rs;
4506 is_int12(ReadOnlyRootPtr(index))) {
4507 left = temps.Acquire();
4508 Sll32(left, rs, 0);
4509 }
4510 LoadTaggedRoot(right, index);
4511 Branch(L, cond, left, Operand(right));
4512 } else {
4513 LoadRoot(right, index);
4514 Branch(L, cond, rs, Operand(right));
4515 }
4516}
4517
4519 Register r1, const Operand& r2,
4520 bool need_link) {
4522 UseScratchRegisterScope temps(this);
4523 Register scratch0 = temps.Acquire();
4524 Sll32(scratch0, r1, 0);
4525 if (IsZero(r2)) {
4526 Branch(label, cond, scratch0, Operand(zero_reg));
4527 } else {
4528 Register scratch1 = temps.Acquire();
4529 if (r2.is_reg()) {
4530 Sll32(scratch1, r2.rm(), 0);
4531 } else {
4532 li(scratch1, r2);
4533 Sll32(scratch1, scratch1, 0);
4534 }
4535 Branch(label, cond, scratch0, Operand(scratch1));
4536 }
4537 } else {
4538 Branch(label, cond, r1, r2);
4539 }
4540}
4541
4543 DCHECK(L == nullptr || offset == 0);
4544 offset = GetOffset(offset, L, OffsetSize::kOffset21);
4545 j(offset);
4546}
4547
4549 DCHECK(is_int21(offset));
4550 BranchShortHelper(offset, nullptr);
4551}
4552
4554
4556 if (L) {
4557 offset = branch_offset_helper(L, bits);
4558 } else {
4559 DCHECK(is_intn(offset, bits));
4560 }
4561 return offset;
4562}
4563
4565 Register scratch) {
4566 Register r2 = no_reg;
4567 if (rt.is_reg()) {
4568 r2 = rt.rm();
4569 } else {
4570 r2 = scratch;
4571 li(r2, rt);
4572 }
4573
4574 return r2;
4575}
4576
4578 OffsetSize bits) {
4579 if (!is_near(L, bits)) return false;
4580 *offset = GetOffset(*offset, L, bits);
4581 return true;
4582}
4583
4585 Register* scratch, const Operand& rt) {
4586 if (!is_near(L, bits)) return false;
4587 *scratch = GetRtAsRegisterHelper(rt, *scratch);
4588 *offset = GetOffset(*offset, L, bits);
4589 return true;
4590}
4591
4593 Register rs, const Operand& rt) {
4594 DCHECK(L == nullptr || offset == 0);
4595 UseScratchRegisterScope temps(this);
4596 BlockTrampolinePoolScope block_trampoline_pool(this);
4597 Register scratch = no_reg;
4598 if (!rt.is_reg()) {
4599 if (rt.immediate() == 0) {
4600 scratch = zero_reg;
4601 } else {
4602 scratch = temps.Acquire();
4603 li(scratch, rt);
4604 }
4605 } else {
4606 scratch = rt.rm();
4607 }
4608 {
4609 BlockTrampolinePoolScope block_trampoline_pool(this);
4610 switch (cond) {
4611 case cc_always:
4612 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
4613 j(offset);
4615 break;
4616 case eq:
4617 // rs == rt
4618 if (rt.is_reg() && rs == rt.rm()) {
4619 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
4620 j(offset);
4621 } else {
4622 if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
4623 beq(rs, scratch, offset);
4624 }
4625 break;
4626 case ne:
4627 // rs != rt
4628 if (rt.is_reg() && rs == rt.rm()) {
4629 break; // No code needs to be emitted
4630 } else {
4631 if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
4632 bne(rs, scratch, offset);
4633 }
4634 break;
4635
4636 // Signed comparison.
4637 case greater:
4638 // rs > rt
4639 if (rt.is_reg() && rs == rt.rm()) {
4640 break; // No code needs to be emitted.
4641 } else {
4642 if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
4643 bgt(rs, scratch, offset);
4644 }
4645 break;
4646 case greater_equal:
4647 // rs >= rt
4648 if (rt.is_reg() && rs == rt.rm()) {
4649 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
4650 j(offset);
4651 } else {
4652 if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
4653 bge(rs, scratch, offset);
4654 }
4655 break;
4656 case less:
4657 // rs < rt
4658 if (rt.is_reg() && rs == rt.rm()) {
4659 break; // No code needs to be emitted.
4660 } else {
4661 if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
4662 blt(rs, scratch, offset);
4663 }
4664 break;
4665 case less_equal:
4666 // rs <= rt
4667 if (rt.is_reg() && rs == rt.rm()) {
4668 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
4669 j(offset);
4670 } else {
4671 if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
4672 ble(rs, scratch, offset);
4673 }
4674 break;
4675
4676 // Unsigned comparison.
4677 case Ugreater:
4678 // rs > rt
4679 if (rt.is_reg() && rs == rt.rm()) {
4680 break; // No code needs to be emitted.
4681 } else {
4682 if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
4683 bgtu(rs, scratch, offset);
4684 }
4685 break;
4686 case Ugreater_equal:
4687 // rs >= rt
4688 if (rt.is_reg() && rs == rt.rm()) {
4689 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
4690 j(offset);
4691 } else {
4692 if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
4693 bgeu(rs, scratch, offset);
4694 }
4695 break;
4696 case Uless:
4697 // rs < rt
4698 if (rt.is_reg() && rs == rt.rm()) {
4699 break; // No code needs to be emitted.
4700 } else {
4701 if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
4702 bltu(rs, scratch, offset);
4703 }
4704 break;
4705 case Uless_equal:
4706 // rs <= rt
4707 if (rt.is_reg() && rs == rt.rm()) {
4708 if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false;
4709 j(offset);
4710 } else {
4711 if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
4712 bleu(rs, scratch, offset);
4713 }
4714 break;
4715 case Condition::overflow:
4716 if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
4717 bnez(rs, offset);
4718 break;
4719 case Condition::no_overflow:
4720 if (!CalculateOffset(L, &offset, OffsetSize::kOffset13)) return false;
4721 beqz(rs, offset);
4722 break;
4723 default:
4724 UNREACHABLE();
4725 }
4726 }
4727
4729 return true;
4730}
4731
4733 Register rs, const Operand& rt) {
4734 BRANCH_ARGS_CHECK(cond, rs, rt);
4735
4736 if (!L) {
4737 DCHECK(is_int13(offset));
4738 return BranchShortHelper(offset, nullptr, cond, rs, rt);
4739 } else {
4740 DCHECK_EQ(offset, 0);
4741 return BranchShortHelper(0, L, cond, rs, rt);
4742 }
4743}
4744
4746 const Operand& rt) {
4747 BranchShortCheck(offset, nullptr, cond, rs, rt);
4748}
4749
4750void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
4751 const Operand& rt) {
4752 BranchShortCheck(0, L, cond, rs, rt);
4753}
4754
4755void MacroAssembler::BranchAndLink(int32_t offset) {
4757}
4758
4759void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
4760 const Operand& rt) {
4761 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt);
4762 DCHECK(is_near);
4763 USE(is_near);
4764}
4765
4766void MacroAssembler::BranchAndLink(Label* L) {
4767 if (L->is_bound()) {
4768 if (is_near(L)) {
4770 } else {
4772 }
4773 } else {
4774 if (is_trampoline_emitted()) {
4776 } else {
4778 }
4779 }
4780}
4781
4782void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
4783 const Operand& rt) {
4784 if (L->is_bound()) {
4785 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt)) {
4786 Label skip;
4787 Condition neg_cond = NegateCondition(cond);
4788 BranchShort(&skip, neg_cond, rs, rt);
4790 bind(&skip);
4791 }
4792 } else {
4793 if (is_trampoline_emitted()) {
4794 Label skip;
4795 Condition neg_cond = NegateCondition(cond);
4796 BranchShort(&skip, neg_cond, rs, rt);
4798 bind(&skip);
4799 } else {
4800 BranchAndLinkShortCheck(0, L, cond, rs, rt);
4801 }
4802 }
4803}
4804
4806 DCHECK(L == nullptr || offset == 0);
4807 offset = GetOffset(offset, L, OffsetSize::kOffset21);
4808 jal(offset);
4809}
4810
4812 DCHECK(is_int21(offset));
4814}
4815
4819
4820// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
4821// with the slt instructions. We could use sub or add instead but we would miss
4822// overflow cases, so we keep slt and add an intermediate third instruction.
4824 Condition cond, Register rs,
4825 const Operand& rt) {
4826 DCHECK(L == nullptr || offset == 0);
4827 if (!is_near(L, OffsetSize::kOffset21)) return false;
4828
4829 UseScratchRegisterScope temps(this);
4830 Register scratch = temps.Acquire();
4831 BlockTrampolinePoolScope block_trampoline_pool(this);
4832
4833 if (cond == cc_always) {
4834 offset = GetOffset(offset, L, OffsetSize::kOffset21);
4835 jal(offset);
4836 } else {
4837 Branch(kInstrSize * 2, NegateCondition(cond), rs,
4838 Operand(GetRtAsRegisterHelper(rt, scratch)));
4839 offset = GetOffset(offset, L, OffsetSize::kOffset21);
4840 jal(offset);
4841 }
4842
4843 return true;
4844}
4845
4847 Condition cond, Register rs,
4848 const Operand& rt) {
4849 BRANCH_ARGS_CHECK(cond, rs, rt);
4850
4851 if (!L) {
4852 DCHECK(is_int21(offset));
4853 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt);
4854 } else {
4855 DCHECK_EQ(offset, 0);
4856 return BranchAndLinkShortHelper(0, L, cond, rs, rt);
4857 }
4858}
4859
4861 int constant_index) {
4862 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
4863 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
4866 constant_index)));
4867}
4868
4872
4874 StoreWord(value, MemOperand(kRootRegister, offset));
4875}
4876
4878 ExternalReference reference, Register scratch) {
4879 if (root_array_available()) {
4880 if (reference.IsIsolateFieldId()) {
4882 }
4883 if (options().enable_root_relative_access) {
4884 int64_t offset =
4886 if (is_int32(offset)) {
4887 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
4888 }
4889 }
4890 if (root_array_available_ && options().isolate_independent_code) {
4891 if (IsAddressableThroughRootRegister(isolate(), reference)) {
4892 // Some external references can be efficiently loaded as an offset from
4893 // kRootRegister.
4894 intptr_t offset =
4896 CHECK(is_int32(offset));
4897 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
4898 } else {
4899 // Otherwise, do a memory load from the external reference table.
4900 DCHECK(scratch.is_valid());
4901 LoadWord(scratch,
4904 isolate(), reference)));
4905 return MemOperand(scratch, 0);
4906 }
4907 }
4908 }
4909 DCHECK(scratch.is_valid());
4910 li(scratch, reference);
4911 return MemOperand(scratch, 0);
4912}
4913
4915 intptr_t offset) {
4916 if (offset == 0) {
4918 } else {
4920 }
4921}
4922
4923void MacroAssembler::Jump(Register target, Condition cond, Register rs,
4924 const Operand& rt) {
4925 BlockTrampolinePoolScope block_trampoline_pool(this);
4926 if (cond == cc_always) {
4927 jr(target);
4929 } else {
4930 BRANCH_ARGS_CHECK(cond, rs, rt);
4931 Branch(kInstrSize * 2, NegateCondition(cond), rs, rt);
4932 jr(target);
4933 }
4934}
4935
4936void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
4937 Condition cond, Register rs, const Operand& rt) {
4938 Label skip;
4939 if (cond != cc_always) {
4940 Branch(&skip, NegateCondition(cond), rs, rt);
4941 }
4942 {
4943 BlockTrampolinePoolScope block_trampoline_pool(this);
4944 li(t6, Operand(target, rmode));
4945 Jump(t6, al, zero_reg, Operand(zero_reg));
4947 bind(&skip);
4948 }
4949}
4950
4951void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
4952 Register rs, const Operand& rt) {
4954 Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt);
4955}
4956
4958 Condition cond, Register rs, const Operand& rt) {
4960 DCHECK_IMPLIES(options().isolate_independent_code,
4962
4964 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
4965 // Inline the trampoline.
4966 Label skip;
4967 if (cond != al) Branch(&skip, NegateCondition(cond), rs, rt);
4968 TailCallBuiltin(builtin);
4969 bind(&skip);
4970 return;
4971 }
4973 if (CanUseNearCallOrJump(rmode)) {
4975 DCHECK(is_int32(index));
4976 Label skip;
4977 if (cond != al) Branch(&skip, NegateCondition(cond), rs, rt);
4979 static_cast<int32_t>(index));
4980 GenPCRelativeJump(t6, static_cast<int32_t>(index));
4981 bind(&skip);
4982 } else {
4983 Jump(code.address(), rmode, cond);
4984 }
4985}
4986
4988 li(t6, reference);
4989 Jump(t6);
4990}
4991
4992// Note: To call gcc-compiled C code on riscv64, you must call through t6.
4993void MacroAssembler::Call(Register target, Condition cond, Register rs,
4994 const Operand& rt) {
4995 BlockTrampolinePoolScope block_trampoline_pool(this);
4996 if (cond == cc_always) {
4997 jalr(ra, target, 0);
4998 } else {
4999 BRANCH_ARGS_CHECK(cond, rs, rt);
5000 Branch(kInstrSize * 2, NegateCondition(cond), rs, rt);
5001 jalr(ra, target, 0);
5002 }
5003}
5004
5006 RootIndex index, Condition cc,
5007 Label* target) {
5008 ASM_CODE_COMMENT(this);
5010 UseScratchRegisterScope temps(this);
5012 CompareTaggedAndBranch(target, cc, obj, Operand(ReadOnlyRootPtr(index)));
5013 return;
5014 }
5015 // Some smi roots contain system pointer size values like stack limits.
5018 Register temp = temps.Acquire();
5019 DCHECK(!AreAliased(obj, temp));
5020 LoadRoot(temp, index);
5021 CompareTaggedAndBranch(target, cc, obj, Operand(temp));
5022}
5023// Compare the object in a register to a value from the root list.
5025 Condition cc, Label* target,
5026 ComparisonMode mode) {
5027 ASM_CODE_COMMENT(this);
5028 if (mode == ComparisonMode::kFullPointer ||
5031 // Some smi roots contain system pointer size values like stack limits.
5032 UseScratchRegisterScope temps(this);
5033 Register temp = temps.Acquire();
5034 DCHECK(!AreAliased(obj, temp));
5035 LoadRoot(temp, index);
5036 Branch(target, cc, obj, Operand(temp));
5037 return;
5038 }
5039 CompareTaggedRootAndBranch(obj, index, cc, target);
5040}
5041#if V8_TARGET_ARCH_RISCV64
5042// Compare the tagged object in a register to a value from the root list
5043// and put 0 into result if equal or 1 otherwise.
5045 const Register& result) {
5046 ASM_CODE_COMMENT(this);
5049 Li(result, ReadOnlyRootPtr(index));
5051 return;
5052 }
5053 // Some smi roots contain system pointer size values like stack limits.
5056 LoadRoot(result, index);
5058}
5059
5060void MacroAssembler::CompareRoot(const Register& obj, RootIndex index,
5061 const Register& result, ComparisonMode mode) {
5062 ASM_CODE_COMMENT(this);
5063 if (mode == ComparisonMode::kFullPointer ||
5066 // Some smi roots contain system pointer size values like stack limits.
5067 UseScratchRegisterScope temps(this);
5068 Register temp = temps.Acquire();
5069 DCHECK(!AreAliased(obj, temp));
5070 LoadRoot(temp, index);
5071 CompareI(result, obj, Operand(temp),
5072 Condition::ne); // result is 0 if equal or 1 otherwise
5073 return;
5074 }
5075 // FIXME: check that 0/1 in result is expected for all CompareRoot callers
5076 CompareTaggedRoot(obj, index, result);
5077}
5078#endif
5079
5080void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
5081 unsigned higher_limit,
5082 Label* on_in_range) {
5083 if (lower_limit != 0) {
5084 UseScratchRegisterScope temps(this);
5085 Register scratch = temps.Acquire();
5086 SubWord(scratch, value, Operand(lower_limit));
5087 Branch(on_in_range, Uless_equal, scratch,
5088 Operand(higher_limit - lower_limit));
5089 } else {
5090 Branch(on_in_range, Uless_equal, value,
5091 Operand(higher_limit - lower_limit));
5092 }
5093}
5094
5096 Label::Distance condition_met_distance) {
5097 UseScratchRegisterScope temps(this);
5098 Register scratch = temps.Acquire();
5099 LoadWord(scratch,
5100 MemOperand(kRootRegister, IsolateData::is_marking_flag_offset()));
5101 Branch(is_marking, ne, scratch, Operand(zero_reg));
5102}
5103
5105 Label::Distance condition_met_distance) {
5106 UseScratchRegisterScope temps(this);
5107 Register scratch = temps.Acquire();
5108 LoadWord(scratch,
5109 MemOperand(kRootRegister, IsolateData::is_marking_flag_offset()));
5110 Branch(not_marking, eq, scratch, Operand(zero_reg));
5111}
5112
5113// The calculated offset is either:
5114// * the 'target' input unmodified if this is a Wasm call, or
5115// * the offset of the target from the current PC, in instructions, for any
5116// other type of call.
5118 RelocInfo::Mode rmode,
5119 uint8_t* pc) {
5120 int64_t offset = static_cast<int64_t>(target);
5121 if (rmode == RelocInfo::WASM_CALL || rmode == RelocInfo::WASM_STUB_CALL) {
5122 // The target of WebAssembly calls is still an index instead of an actual
5123 // address at this point, and needs to be encoded as-is.
5124 return offset;
5125 }
5126 offset -= reinterpret_cast<int64_t>(pc);
5128 return offset;
5129}
5130
5131void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
5132 Register rs, const Operand& rt) {
5133 ASM_CODE_COMMENT(this);
5134 if (CanUseNearCallOrJump(rmode)) {
5135 int64_t offset = CalculateTargetOffset(target, rmode, pc_);
5136 DCHECK(is_int32(offset));
5137 near_call(static_cast<int>(offset), rmode);
5138 } else {
5139 li(t6, Operand(static_cast<intptr_t>(target), rmode), ADDRESS_LOAD);
5140 Call(t6, cond, rs, rt);
5141 }
5142}
5143
5145 Condition cond, Register rs, const Operand& rt) {
5146 BlockTrampolinePoolScope block_trampoline_pool(this);
5148 DCHECK_IMPLIES(options().isolate_independent_code,
5150
5152 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
5153 // Inline the trampoline.
5154 CHECK_EQ(cond, Condition::al); // Implement if necessary.
5155 CallBuiltin(builtin);
5156 return;
5157 }
5158
5160
5161 if (CanUseNearCallOrJump(rmode)) {
5163 DCHECK(is_int32(index));
5164 Label skip;
5165 if (cond != al) Branch(&skip, NegateCondition(cond), rs, rt);
5167 static_cast<int32_t>(index));
5168 GenPCRelativeJumpAndLink(t6, static_cast<int32_t>(index));
5169 bind(&skip);
5170 } else {
5171 Call(code.address(), rmode);
5172 }
5173}
5174
5176 Register target) {
5177#if V8_TARGET_ARCH_RISCV64
5178 static_assert(kSystemPointerSize == 8);
5179#elif V8_TARGET_ARCH_RISCV32
5180 static_assert(kSystemPointerSize == 4);
5181#endif
5182 static_assert(kSmiTagSize == 1);
5183 static_assert(kSmiTag == 0);
5184
5185 // The builtin register contains the builtin index as a Smi.
5186 SmiUntag(target, builtin_index);
5188 LoadWord(target,
5189 MemOperand(target, IsolateData::builtin_entry_table_offset()));
5190}
5191
5193 Register target) {
5194 LoadEntryFromBuiltinIndex(builtin_index, target);
5195 Call(target);
5196}
5197
5200 switch (options().builtin_call_jump_mode) {
5203 Call(t6);
5204 break;
5205 }
5207 near_call(static_cast<int>(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
5208 break;
5210 LoadEntryFromBuiltin(builtin, t6);
5211 Call(t6);
5212 break;
5213 }
5215 if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
5216 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
5218 DCHECK(is_int32(index));
5220 static_cast<int32_t>(index));
5221 GenPCRelativeJumpAndLink(t6, static_cast<int32_t>(index));
5222 } else {
5223 LoadEntryFromBuiltin(builtin, t6);
5224 Call(t6);
5225 }
5226 break;
5227 }
5228 }
5229}
5230
5232 Register type, Operand range) {
5233 Label done;
5234 Branch(&done, NegateCondition(cond), type, range);
5235 TailCallBuiltin(builtin);
5236 bind(&done);
5237}
5238
5241 CommentForOffHeapTrampoline("tail call", builtin));
5242 switch (options().builtin_call_jump_mode) {
5245 Jump(t6);
5246 break;
5247 }
5249 near_jump(static_cast<int>(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
5250 break;
5252 LoadEntryFromBuiltin(builtin, t6);
5253 Jump(t6);
5254 break;
5255 }
5257 if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
5258 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
5260 DCHECK(is_int32(index));
5262 static_cast<int32_t>(index));
5263 GenPCRelativeJump(t6, static_cast<int32_t>(index));
5264 } else {
5265 LoadEntryFromBuiltin(builtin, t6);
5266 Jump(t6);
5267 }
5268 break;
5269 }
5270 }
5271}
5272
5277
5283
5285 BlockTrampolinePoolScope block_trampoline_pool(this);
5286 UseScratchRegisterScope temps(this);
5287 Register scratch = temps.Acquire();
5288 auipc(scratch, 0); // Load PC into scratch
5289 LoadWord(t6, MemOperand(scratch, kInstrSize * 4));
5290 jr(t6);
5291 nop(); // For alignment
5292#if V8_TARGET_ARCH_RISCV64
5293 DCHECK_EQ(reinterpret_cast<uint64_t>(pc_) % 8, 0);
5294#elif V8_TARGET_ARCH_RISCV32
5295 DCHECK_EQ(reinterpret_cast<uint32_t>(pc_) % 4, 0);
5296#endif
5297 *reinterpret_cast<uintptr_t*>(pc_) = target; // pc_ should be align.
5298 pc_ += sizeof(uintptr_t);
5299}
5300
5302 // This generates the final instruction sequence for calls to C functions
5303 // once an exit frame has been constructed.
5304 //
5305 // Note that this assumes the caller code (i.e. the InstructionStream object
5306 // currently being generated) is immovable or that the callee function cannot
5307 // trigger GC, since the callee function will return to it.
5308 //
5309 // Compute the return address in lr to return to after the jump below. The
5310 // pc is already at '+ 8' from the current instruction; but return is after
5311 // three instructions, so add another 4 to pc to get the return address.
5312 //
5313 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
5314 int kNumInstructionsToJump = 5;
5315 if (v8_flags.riscv_c_extension) kNumInstructionsToJump = 4;
5316 Label find_ra;
5317 // Adjust the value in ra to point to the correct return location, one
5318 // instruction past the real call into C code (the jalr(t6)), and push it.
5319 // This is the return address of the exit frame.
5320 auipc(ra, 0); // Set ra the current PC
5321 bind(&find_ra);
5322 addi(ra, ra,
5323 (kNumInstructionsToJump + 1) *
5324 kInstrSize); // Set ra to insn after the call
5325
5326 // This spot was reserved in EnterExitFrame.
5327 StoreWord(ra, MemOperand(sp));
5328 addi(sp, sp, -kCArgsSlotsSize);
5329 // Stack is still aligned.
5330
5331 // Call the C routine.
5332 Mv(t6,
5333 target); // Function pointer to t6 to conform to ABI for PIC.
5334 jalr(t6);
5335 // Make sure the stored 'ra' points to this position.
5336 DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
5337}
5338
5339void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt) {
5340 Jump(ra, cond, rs, rt);
5341 if (cond == al) {
5343 }
5344}
5345
5347 // Generate position independent long branch.
5348 BlockTrampolinePoolScope block_trampoline_pool(this);
5349 int32_t imm;
5350 imm = branch_long_offset(L);
5351 if (L->is_bound() && is_intn(imm, Assembler::kJumpOffsetBits) &&
5352 (imm & 1) == 0) {
5353 j(imm);
5354 nop();
5356 return;
5357 }
5358 GenPCRelativeJump(t6, imm);
5360}
5361
5363 // Generate position independent long branch and link.
5364 BlockTrampolinePoolScope block_trampoline_pool(this);
5365 int32_t imm;
5366 imm = branch_long_offset(L);
5367 if (L->is_bound() && is_intn(imm, Assembler::kJumpOffsetBits) &&
5368 (imm & 1) == 0) {
5369 jal(t6, imm);
5370 nop();
5371 return;
5372 }
5373 GenPCRelativeJumpAndLink(t6, imm);
5374}
5375
5377 AddWord(sp, sp, drop * kSystemPointerSize);
5378 Ret();
5379}
5380
5382 const Operand& r2) {
5383 // Both Drop and Ret need to be conditional.
5384 Label skip;
5385 if (cond != cc_always) {
5386 Branch(&skip, NegateCondition(cond), r1, r2);
5387 }
5388
5389 Drop(drop);
5390 Ret();
5391
5392 if (cond != cc_always) {
5393 bind(&skip);
5394 }
5395}
5396
5398 const Operand& op) {
5399 if (count <= 0) {
5400 return;
5401 }
5402
5403 Label skip;
5404
5405 if (cond != al) {
5406 Branch(&skip, NegateCondition(cond), reg, op);
5407 }
5408
5409 AddWord(sp, sp, Operand(count * kSystemPointerSize));
5410
5411 if (cond != al) {
5412 bind(&skip);
5413 }
5414}
5415
5417 if (scratch == no_reg) {
5418 Xor(reg1, reg1, Operand(reg2));
5419 Xor(reg2, reg2, Operand(reg1));
5420 Xor(reg1, reg1, Operand(reg2));
5421 } else {
5422 Mv(scratch, reg1);
5423 Mv(reg1, reg2);
5424 Mv(reg2, scratch);
5425 }
5426}
5427
5428#ifdef V8_TARGET_ARCH_RISCV32
5429// Enforce alignment of sp.
5431 int frame_alignment = ActivationFrameAlignment();
5432 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
5433
5434 uint64_t frame_alignment_mask = ~(static_cast<uint64_t>(frame_alignment) - 1);
5435 And(sp, sp, Operand(frame_alignment_mask));
5436}
5437#endif
5438
5439void MacroAssembler::Call(Label* target) { BranchAndLink(target); }
5440
5442 RelocInfo::Mode rmode) {
5443 int32_t offset;
5444 if (CalculateOffset(target, &offset, OffsetSize::kOffset32)) {
5445 CHECK(is_int32(offset + 0x800));
5446 int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
5447 int32_t Lo12 = (int32_t)offset << 20 >> 20;
5448 BlockTrampolinePoolScope block_trampoline_pool(this);
5449 auipc(dst, Hi20);
5450 addi(dst, dst, Lo12);
5451 } else {
5452 uintptr_t address = jump_address(target);
5453 li(dst, Operand(address, rmode), ADDRESS_LOAD);
5454 }
5455}
5456
5458 int case_value_base, Label** labels,
5459 int num_labels) {
5460 Register table = scratch;
5461 Label fallthrough, jump_table;
5462 if (case_value_base != 0) {
5463 SubWord(value, value, Operand(case_value_base));
5464 }
5465 Branch(&fallthrough, Condition::Ugreater_equal, value, Operand(num_labels));
5466 LoadAddress(table, &jump_table);
5467 CalcScaledAddress(table, table, value, kSystemPointerSizeLog2);
5468 LoadWord(table, MemOperand(table, 0));
5469 Jump(table);
5470 // Calculate label area size and let MASM know that it will be impossible to
5471 // create the trampoline within the range. That forces MASM to create the
5472 // trampoline right here if necessary, i.e. if label area is too large and
5473 // all unbound forward branches cannot be bound over it. Use nop() because the
5474 // trampoline cannot be emitted right after Jump().
5475 nop();
5476 static constexpr int mask = kInstrSize - 1;
5477 int aligned_label_area_size = num_labels * kUIntptrSize + kSystemPointerSize;
5478 int instructions_per_label_area =
5479 ((aligned_label_area_size + mask) & ~mask) >> kInstrSizeLog2;
5480 BlockTrampolinePoolFor(instructions_per_label_area);
5481 // Emit the jump table inline, under the assumption that it's not too big.
5483 bind(&jump_table);
5484 for (int i = 0; i < num_labels; ++i) {
5485 dd(labels[i]);
5486 }
5487 bind(&fallthrough);
5488}
5489
5491 UseScratchRegisterScope temps(this);
5492 Register scratch = temps.Acquire();
5493 li(scratch, Operand(smi));
5494 push(scratch);
5495}
5496
5498 UseScratchRegisterScope temps(this);
5499 Register scratch = temps.Acquire();
5500 li(scratch, Operand(static_cast<uint32_t>(index.ptr())));
5501 push(scratch);
5502}
5503
5505 PushArrayOrder order) {
5506 UseScratchRegisterScope temps(this);
5507 Register scratch = temps.Acquire();
5508 Register scratch2 = temps.Acquire();
5509 Label loop, entry;
5510 if (order == PushArrayOrder::kReverse) {
5511 Mv(scratch, zero_reg);
5512 jmp(&entry);
5513 bind(&loop);
5514 CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2);
5515 LoadWord(scratch2, MemOperand(scratch2));
5516 push(scratch2);
5517 AddWord(scratch, scratch, Operand(1));
5518 bind(&entry);
5519 Branch(&loop, less, scratch, Operand(size));
5520 } else {
5521 Mv(scratch, size);
5522 jmp(&entry);
5523 bind(&loop);
5524 CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2);
5525 LoadWord(scratch2, MemOperand(scratch2));
5526 push(scratch2);
5527 bind(&entry);
5528 AddWord(scratch, scratch, Operand(-1));
5529 Branch(&loop, greater_equal, scratch, Operand(zero_reg));
5530 }
5531}
5532
5534 UseScratchRegisterScope temps(this);
5535 Register scratch = temps.Acquire();
5536 li(scratch, Operand(handle));
5537 push(scratch);
5538}
5539
5540// ---------------------------------------------------------------------------
5541// Exception handling.
5542
5544 // Adjust this code if not the case.
5547
5548 Push(Smi::zero()); // Padding.
5549
5550 // Link the current handler as the next handler.
5551 UseScratchRegisterScope temps(this);
5552 Register handler_address = temps.Acquire();
5553 li(handler_address,
5554 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
5555 Register handler = temps.Acquire();
5556 LoadWord(handler, MemOperand(handler_address));
5557 push(handler);
5558
5559 // Set this new handler as the current one.
5560 StoreWord(sp, MemOperand(handler_address));
5561}
5562
5564 static_assert(StackHandlerConstants::kNextOffset == 0);
5565 pop(a1);
5566 AddWord(sp, sp,
5567 Operand(static_cast<intptr_t>(StackHandlerConstants::kSize -
5569 UseScratchRegisterScope temps(this);
5570 Register scratch = temps.Acquire();
5571 li(scratch,
5572 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
5573 StoreWord(a1, MemOperand(scratch));
5574}
5575
5577 const DoubleRegister src) {
5578 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
5579 // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
5580 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
5581 if (!IsDoubleZeroRegSet()) {
5583 }
5584 fsub_d(dst, src, kDoubleRegZero);
5585}
5586
5588 Move(dst, fa0); // Reg fa0 is FP return value.
5589}
5590
5592 Move(dst, fa0); // Reg fa0 is FP first argument value.
5593}
5594
5596
5598
5600 DoubleRegister src2) {
5601 const DoubleRegister fparg2 = fa1;
5602 if (src2 == fa0) {
5603 DCHECK(src1 != fparg2);
5604 Move(fparg2, src2);
5605 Move(fa0, src1);
5606 } else {
5607 Move(fa0, src1);
5608 Move(fparg2, src2);
5609 }
5610}
5611
5612// -----------------------------------------------------------------------------
5613// JavaScript invokes.
5614
5623
5625 Register scratch2,
5626 Label* stack_overflow, Label* done) {
5627 // Check the stack for overflow. We are not trying to catch
5628 // interruptions (e.g. debug break and preemption) here, so the "real stack
5629 // limit" is checked.
5630 DCHECK(stack_overflow != nullptr || done != nullptr);
5632 // Make scratch1 the space we have left. The stack might already be overflowed
5633 // here which will cause scratch1 to become negative.
5634 SubWord(scratch1, sp, scratch1);
5635 // Check if the arguments will overflow the stack.
5636 SllWord(scratch2, num_args, kSystemPointerSizeLog2);
5637 // Signed comparison.
5638 if (stack_overflow != nullptr) {
5639 Branch(stack_overflow, le, scratch1, Operand(scratch2));
5640 } else if (done != nullptr) {
5641 Branch(done, gt, scratch1, Operand(scratch2));
5642 } else {
5643 UNREACHABLE();
5644 }
5645}
5646
5647void MacroAssembler::InvokePrologue(Register expected_parameter_count,
5648 Register actual_parameter_count,
5649 InvokeType type) {
5650 Label regular_invoke;
5651
5652 // a0: actual arguments count
5653 // a1: function (passed through to callee)
5654 // a2: expected arguments count
5655
5656 DCHECK_EQ(actual_parameter_count, a0);
5657 DCHECK_EQ(expected_parameter_count, a2);
5658
5659 // If overapplication or if the actual argument count is equal to the
5660 // formal parameter count, no need to push extra undefined values.
5661 SubWord(expected_parameter_count, expected_parameter_count,
5662 actual_parameter_count);
5663 Branch(&regular_invoke, le, expected_parameter_count, Operand(zero_reg));
5664
5665 Label stack_overflow;
5666 {
5667 UseScratchRegisterScope temps(this);
5668 temps.Include(t0, t1);
5669 StackOverflowCheck(expected_parameter_count, temps.Acquire(),
5670 temps.Acquire(), &stack_overflow);
5671 }
5672 // Underapplication. Move the arguments already in the stack, including the
5673 // receiver and the return address.
5674 {
5675 Label copy;
5676 Register src = a6, dest = a7;
5677 Move(src, sp);
5678 SllWord(t0, expected_parameter_count, kSystemPointerSizeLog2);
5679 SubWord(sp, sp, Operand(t0));
5680 // Update stack pointer.
5681 Move(dest, sp);
5682 Move(t0, actual_parameter_count);
5683 bind(&copy);
5684 LoadWord(t1, MemOperand(src, 0));
5685 StoreWord(t1, MemOperand(dest, 0));
5686 SubWord(t0, t0, Operand(1));
5687 AddWord(src, src, Operand(kSystemPointerSize));
5688 AddWord(dest, dest, Operand(kSystemPointerSize));
5689 Branch(&copy, gt, t0, Operand(zero_reg));
5690 }
5691
5692 // Fill remaining expected arguments with undefined values.
5693 LoadRoot(t0, RootIndex::kUndefinedValue);
5694 {
5695 Label loop;
5696 bind(&loop);
5697 StoreWord(t0, MemOperand(a7, 0));
5698 SubWord(expected_parameter_count, expected_parameter_count, Operand(1));
5699 AddWord(a7, a7, Operand(kSystemPointerSize));
5700 Branch(&loop, gt, expected_parameter_count, Operand(zero_reg));
5701 }
5702 Branch(&regular_invoke);
5703
5704 bind(&stack_overflow);
5705 {
5706 FrameScope frame(
5707 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
5708 CallRuntime(Runtime::kThrowStackOverflow);
5709 break_(0xCC);
5710 }
5711 bind(&regular_invoke);
5712}
5713
5716 Register expected_parameter_count_or_dispatch_handle,
5717 Register actual_parameter_count) {
5718 ASM_CODE_COMMENT(this);
5719 DCHECK(!AreAliased(t0, fun, new_target,
5720 expected_parameter_count_or_dispatch_handle,
5721 actual_parameter_count));
5722
5723 // Load receiver to pass it later to DebugOnFunctionCall hook.
5724 LoadReceiver(t0);
5725 FrameScope frame(
5726 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
5727
5728 SmiTag(expected_parameter_count_or_dispatch_handle);
5729 SmiTag(actual_parameter_count);
5730 Push(expected_parameter_count_or_dispatch_handle, actual_parameter_count);
5731
5732 if (new_target.is_valid()) {
5734 }
5735 Push(fun, fun, t0);
5736 CallRuntime(Runtime::kDebugOnFunctionCall);
5737 Pop(fun);
5738 if (new_target.is_valid()) {
5739 Pop(new_target);
5740 }
5741
5742 Pop(expected_parameter_count_or_dispatch_handle, actual_parameter_count);
5743 SmiUntag(actual_parameter_count);
5744 SmiUntag(expected_parameter_count_or_dispatch_handle);
5745}
5746
5747#if defined(V8_ENABLE_LEAPTIERING) && defined(V8_TARGET_ARCH_RISCV64)
5749 Register function, Register actual_parameter_count, InvokeType type,
5750 ArgumentAdaptionMode argument_adaption_mode) {
5751 ASM_CODE_COMMENT(this);
5752 // You can't call a function without a valid frame.
5753 DCHECK(type == InvokeType::kJump || has_frame());
5754
5755 // Contract with called JS functions requires that function is passed in a1.
5756 // (See FullCodeGenerator::Generate().)
5757 DCHECK_EQ(function, a1);
5758
5759 // Set up the context.
5760 LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
5761
5762 InvokeFunctionCode(function, no_reg, actual_parameter_count, type,
5763 argument_adaption_mode);
5764}
5765
5767 Register function, Register new_target, Register actual_parameter_count,
5768 InvokeType type) {
5769 ASM_CODE_COMMENT(this);
5770 // You can't call a function without a valid frame.
5771 DCHECK(type == InvokeType::kJump || has_frame());
5772
5773 // Contract with called JS functions requires that function is passed in a1.
5774 // (See FullCodeGenerator::Generate().)
5775 DCHECK_EQ(function, a1);
5776
5777 LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
5778
5779 InvokeFunctionCode(function, new_target, actual_parameter_count, type);
5780}
5781
5783 Register function, Register new_target, Register actual_parameter_count,
5784 InvokeType type, ArgumentAdaptionMode argument_adaption_mode) {
5785 ASM_CODE_COMMENT(this);
5786 // You can't call a function without a valid frame.
5788 DCHECK_EQ(function, a1);
5789 DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
5790
5791 Register dispatch_handle = kJavaScriptCallDispatchHandleRegister;
5792 Lw(dispatch_handle,
5793 FieldMemOperand(function, JSFunction::kDispatchHandleOffset));
5794
5795 // On function call, call into the debugger if necessary.
5796 Label debug_hook, continue_after_hook;
5797 Register scratch = s1;
5798 {
5799 li(scratch,
5800 ExternalReference::debug_hook_on_function_call_address(isolate()));
5801 Lb(scratch, MemOperand(scratch, 0));
5802 BranchShort(&debug_hook, ne, scratch, Operand(zero_reg));
5803 }
5804 bind(&continue_after_hook);
5805
5806 // Clear the new.target register if not given.
5807 if (!new_target.is_valid()) {
5808 LoadRoot(a3, RootIndex::kUndefinedValue);
5809 }
5810
5811 if (argument_adaption_mode == ArgumentAdaptionMode::kAdapt) {
5812 Register expected_parameter_count = a2;
5813 LoadParameterCountFromJSDispatchTable(expected_parameter_count,
5814 dispatch_handle, scratch);
5815 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
5816 }
5817
5818 // We call indirectly through the code field in the function to
5819 // allow recompilation to take effect without changing any of the
5820 // call sites.
5821 LoadEntrypointFromJSDispatchTable(kJavaScriptCallCodeStartRegister,
5822 dispatch_handle, scratch);
5823 switch (type) {
5824 case InvokeType::kCall:
5826 break;
5827 case InvokeType::kJump:
5829 break;
5830 }
5831 Label done;
5832 Branch(&done);
5833
5834 // Deferred debug hook.
5835 bind(&debug_hook);
5836 CallDebugOnFunctionCall(function, new_target, dispatch_handle,
5837 actual_parameter_count);
5838 Branch(&continue_after_hook);
5839
5840 bind(&done);
5841}
5842#else // !V8_ENABLE_LEAPTIERING
5844 Register expected_parameter_count,
5845 Register actual_parameter_count,
5846 InvokeType type) {
5847 // You can't call a function without a valid frame.
5849
5850 // Contract with called JS functions requires that function is passed in a1.
5851 DCHECK_EQ(function, a1);
5852
5853 // Get the function and setup the context.
5854 LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5855
5856 InvokeFunctionCode(a1, no_reg, expected_parameter_count,
5857 actual_parameter_count, type);
5858}
5859
5861 Register function, Register new_target, Register actual_parameter_count,
5862 InvokeType type) {
5863 ASM_CODE_COMMENT(this);
5864 // You can't call a function without a valid frame.
5866
5867 // Contract with called JS functions requires that function is passed in a1.
5868 DCHECK_EQ(function, a1);
5869 Register expected_parameter_count = a2;
5870 {
5871 UseScratchRegisterScope temps(this);
5872 Register temp_reg = temps.Acquire();
5874 temp_reg,
5875 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
5876 LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
5877 // The argument count is stored as uint16_t
5878 Lhu(expected_parameter_count,
5879 FieldMemOperand(temp_reg,
5880 SharedFunctionInfo::kFormalParameterCountOffset));
5881 }
5882 InvokeFunctionCode(function, new_target, expected_parameter_count,
5883 actual_parameter_count, type);
5884}
5885
5887 Register expected_parameter_count,
5888 Register actual_parameter_count,
5889 InvokeType type) {
5890 ASM_CODE_COMMENT(this);
5891 // You can't call a function without a valid frame.
5893 DCHECK_EQ(function, a1);
5894 DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
5895
5896 // On function call, call into the debugger if necessary.
5897 Label debug_hook, continue_after_hook;
5898 Register scratch = s1;
5899 {
5900 li(scratch,
5901 ExternalReference::debug_hook_on_function_call_address(isolate()));
5902 Lb(scratch, MemOperand(scratch, 0));
5903 BranchShort(&debug_hook, ne, scratch, Operand(zero_reg));
5904 }
5905 bind(&continue_after_hook);
5906
5907 // Clear the new.target register if not given.
5908 if (!new_target.is_valid()) {
5909 LoadRoot(a3, RootIndex::kUndefinedValue);
5910 }
5911
5912 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
5913
5914 // We call indirectly through the code field in the function to
5915 // allow recompilation to take effect without changing any of the
5916 // call sites.
5917 constexpr int unused_argument_count = 0;
5918 switch (type) {
5919 case InvokeType::kCall:
5920 CallJSFunction(function, unused_argument_count);
5921 break;
5922 case InvokeType::kJump:
5923 JumpJSFunction(function);
5924 break;
5925 }
5926
5927 Label done;
5928 Branch(&done);
5929
5930 // Deferred debug hook.
5931 bind(&debug_hook);
5932 CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
5933 actual_parameter_count);
5934 Branch(&continue_after_hook);
5935
5936 // Continue here if InvokePrologue does handle the invocation due to
5937 // mismatched parameter counts.
5938 bind(&done);
5939}
5940#endif // V8_ENABLE_LEAPTIERING
5941// ---------------------------------------------------------------------------
5942// Support functions.
5943
5945 Register type_reg) {
5946 LoadMap(map, object);
5947 Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
5948}
5949
5951 InstanceType lower_limit,
5952 Register range) {
5953 Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
5954 SubWord(range, type_reg, Operand(lower_limit));
5955}
5956//------------------------------------------------------------------------------
5957// Wasm
5959 VSew sew, Vlmul lmul) {
5960 VU.set(kScratchReg, sew, lmul);
5961 vmseq_vv(v0, lhs, rhs);
5962 li(kScratchReg, -1);
5963 vmv_vx(dst, zero_reg);
5964 vmerge_vx(dst, kScratchReg, dst);
5965}
5966
5968 VSew sew, Vlmul lmul) {
5969 VU.set(kScratchReg, sew, lmul);
5970 vmsne_vv(v0, lhs, rhs);
5971 li(kScratchReg, -1);
5972 vmv_vx(dst, zero_reg);
5973 vmerge_vx(dst, kScratchReg, dst);
5974}
5975
5977 VSew sew, Vlmul lmul) {
5978 VU.set(kScratchReg, sew, lmul);
5979 vmsle_vv(v0, rhs, lhs);
5980 li(kScratchReg, -1);
5981 vmv_vx(dst, zero_reg);
5982 vmerge_vx(dst, kScratchReg, dst);
5983}
5984
5986 VSew sew, Vlmul lmul) {
5987 VU.set(kScratchReg, sew, lmul);
5988 vmsleu_vv(v0, rhs, lhs);
5989 li(kScratchReg, -1);
5990 vmv_vx(dst, zero_reg);
5991 vmerge_vx(dst, kScratchReg, dst);
5992}
5993
5995 VSew sew, Vlmul lmul) {
5996 VU.set(kScratchReg, sew, lmul);
5997 vmslt_vv(v0, rhs, lhs);
5998 li(kScratchReg, -1);
5999 vmv_vx(dst, zero_reg);
6000 vmerge_vx(dst, kScratchReg, dst);
6001}
6002
6004 VSew sew, Vlmul lmul) {
6005 VU.set(kScratchReg, sew, lmul);
6006 vmsltu_vv(v0, rhs, lhs);
6007 li(kScratchReg, -1);
6008 vmv_vx(dst, zero_reg);
6009 vmerge_vx(dst, kScratchReg, dst);
6010}
6011
6012#if V8_TARGET_ARCH_RISCV64
6013void MacroAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) {
6014 uint64_t vals[2];
6015 memcpy(vals, imms, sizeof(vals));
6016 VU.set(kScratchReg, E64, m1);
6017 li(kScratchReg, vals[1]);
6019 vslideup_vi(dst, kSimd128ScratchReg, 1);
6020 li(kScratchReg, vals[0]);
6021 vmv_sx(dst, kScratchReg);
6022}
6023#elif V8_TARGET_ARCH_RISCV32
6024void MacroAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) {
6025 uint32_t vals[4];
6026 memcpy(vals, imms, sizeof(vals));
6027 VU.set(kScratchReg, VSew::E32, Vlmul::m1);
6028 li(kScratchReg, vals[3]);
6030 li(kScratchReg, vals[2]);
6032 li(kScratchReg, vals[1]);
6033 vmv_vx(dst, kScratchReg);
6034 li(kScratchReg, vals[0]);
6035 vmv_sx(dst, kScratchReg);
6036 vslideup_vi(dst, kSimd128ScratchReg, 2);
6037}
6038#endif
6039
6040void MacroAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx,
6041 MemOperand src, Trapper&& trapper) {
6042 DCHECK_NE(kScratchReg, src.rm());
6043 if (ts == 8) {
6044 Lbu(kScratchReg2, src, std::forward<Trapper>(trapper));
6045 VU.set(kScratchReg, E32, m1);
6046 li(kScratchReg, 0x1 << laneidx);
6047 vmv_sx(v0, kScratchReg);
6048 VU.set(kScratchReg, E8, m1);
6049 vmerge_vx(dst, kScratchReg2, dst);
6050 } else if (ts == 16) {
6051 Lhu(kScratchReg2, src, std::forward<Trapper>(trapper));
6052 VU.set(kScratchReg, E16, m1);
6053 li(kScratchReg, 0x1 << laneidx);
6054 vmv_sx(v0, kScratchReg);
6055 vmerge_vx(dst, kScratchReg2, dst);
6056 } else if (ts == 32) {
6057 Load32U(kScratchReg2, src, std::forward<Trapper>(trapper));
6058 VU.set(kScratchReg, E32, m1);
6059 li(kScratchReg, 0x1 << laneidx);
6060 vmv_sx(v0, kScratchReg);
6061 vmerge_vx(dst, kScratchReg2, dst);
6062 } else if (ts == 64) {
6063#if V8_TARGET_ARCH_RISCV64
6064 LoadWord(kScratchReg2, src, std::forward<Trapper>(trapper));
6065 VU.set(kScratchReg, E64, m1);
6066 li(kScratchReg, 0x1 << laneidx);
6067 vmv_sx(v0, kScratchReg);
6068 vmerge_vx(dst, kScratchReg2, dst);
6069#elif V8_TARGET_ARCH_RISCV32
6070 LoadDouble(kScratchDoubleReg, src, std::forward<Trapper>(trapper));
6071 VU.set(kScratchReg, E64, m1);
6072 li(kScratchReg, 0x1 << laneidx);
6073 vmv_sx(v0, kScratchReg);
6074 vfmerge_vf(dst, kScratchDoubleReg, dst);
6075#endif
6076 } else {
6077 UNREACHABLE();
6078 }
6079}
6080
6081void MacroAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx,
6082 MemOperand dst, Trapper&& trapper) {
6083 DCHECK_NE(kScratchReg, dst.rm());
6084 if (sz == 8) {
6085 VU.set(kScratchReg, E8, m1);
6086 vslidedown_vi(kSimd128ScratchReg, src, laneidx);
6088 trapper(pc_offset());
6089 Sb(kScratchReg, dst, std::forward<Trapper>(trapper));
6090 } else if (sz == 16) {
6091 VU.set(kScratchReg, E16, m1);
6092 vslidedown_vi(kSimd128ScratchReg, src, laneidx);
6094 trapper(pc_offset());
6095 Sh(kScratchReg, dst, std::forward<Trapper>(trapper));
6096 } else if (sz == 32) {
6097 VU.set(kScratchReg, E32, m1);
6098 vslidedown_vi(kSimd128ScratchReg, src, laneidx);
6100 trapper(pc_offset());
6101 Sw(kScratchReg, dst, std::forward<Trapper>(trapper));
6102 } else {
6103 DCHECK_EQ(sz, 64);
6104 VU.set(kScratchReg, E64, m1);
6105 vslidedown_vi(kSimd128ScratchReg, src, laneidx);
6106#if V8_TARGET_ARCH_RISCV64
6108 trapper(pc_offset());
6109 StoreWord(kScratchReg, dst, std::forward<Trapper>(trapper));
6110#elif V8_TARGET_ARCH_RISCV32
6112 trapper(pc_offset());
6113 StoreDouble(kScratchDoubleReg, dst, std::forward<Trapper>(trapper));
6114#endif
6115 }
6116}
6117// -----------------------------------------------------------------------------
6118// Runtime calls.
6119#if V8_TARGET_ARCH_RISCV64
6120void MacroAssembler::AddOverflow64(Register dst, Register left,
6121 const Operand& right, Register overflow) {
6122 UseScratchRegisterScope temps(this);
6123 BlockTrampolinePoolScope block_trampoline_pool(this);
6124 Register right_reg = no_reg;
6125 Register scratch = temps.Acquire();
6126 Register scratch2 = temps.Acquire();
6127 if (!right.is_reg()) {
6128 li(scratch, Operand(right));
6129 right_reg = scratch;
6130 } else {
6131 right_reg = right.rm();
6132 }
6133 DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
6134 overflow != scratch2);
6135 DCHECK(overflow != left && overflow != right_reg);
6136 if (dst == left || dst == right_reg) {
6137 add(scratch2, left, right_reg);
6138 xor_(overflow, scratch2, left);
6139 xor_(scratch, scratch2, right_reg);
6140 and_(overflow, overflow, scratch);
6141 Mv(dst, scratch2);
6142 } else {
6143 add(dst, left, right_reg);
6144 xor_(overflow, dst, left);
6145 xor_(scratch, dst, right_reg);
6146 and_(overflow, overflow, scratch);
6147 }
6148}
6149
6150void MacroAssembler::SubOverflow64(Register dst, Register left,
6151 const Operand& right, Register overflow) {
6152 UseScratchRegisterScope temps(this);
6153 BlockTrampolinePoolScope block_trampoline_pool(this);
6154 Register right_reg = no_reg;
6155 Register scratch = temps.Acquire();
6156 Register scratch2 = temps.Acquire();
6157 if (!right.is_reg()) {
6158 li(scratch, Operand(right));
6159 right_reg = scratch;
6160 } else {
6161 right_reg = right.rm();
6162 }
6163
6164 DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
6165 overflow != scratch2);
6166 DCHECK(overflow != left && overflow != right_reg);
6167
6168 if (dst == left || dst == right_reg) {
6169 sub(scratch2, left, right_reg);
6170 xor_(overflow, left, scratch2);
6171 xor_(scratch, left, right_reg);
6172 and_(overflow, overflow, scratch);
6173 Mv(dst, scratch2);
6174 } else {
6175 sub(dst, left, right_reg);
6176 xor_(overflow, left, dst);
6177 xor_(scratch, left, right_reg);
6178 and_(overflow, overflow, scratch);
6179 }
6180}
6181
6182void MacroAssembler::MulOverflow32(Register dst, Register left,
6183 const Operand& right, Register overflow,
6184 bool sign_extend_inputs) {
6185 ASM_CODE_COMMENT(this);
6186 UseScratchRegisterScope temps(this);
6187 BlockTrampolinePoolScope block_trampoline_pool(this);
6188 Register right_reg = no_reg;
6189 Register scratch = temps.Acquire();
6190 if (!right.is_reg()) {
6191 if (!right.IsHeapNumberRequest()) {
6192 // emulate sext.w behavior for imm input
6193 int64_t imm;
6194 imm = static_cast<int32_t>(right.immediate() & 0xFFFFFFFFU);
6195 li(scratch, Operand(imm));
6196 } else {
6197 li(scratch, Operand(right));
6198 }
6199 right_reg = scratch;
6200 } else {
6201 right_reg = right.rm();
6202 }
6203 Register rs1 = no_reg;
6204 Register rs2 = no_reg;
6205 DCHECK(overflow != left && overflow != right_reg);
6206 if (sign_extend_inputs) {
6207 sext_w(overflow, left);
6208 if (right.is_reg()) {
6209 sext_w(scratch, right_reg);
6210 }
6211 rs1 = overflow;
6212 rs2 = scratch;
6213 } else {
6214 // we can skip sext_w on register inputs if not requested
6215 rs1 = left;
6216 rs2 = right_reg;
6217 // no need in assert if input was imm
6218 if (right.is_reg()) {
6219 AssertSignExtended(rs2);
6220 }
6221 AssertSignExtended(rs1);
6222 }
6223 mul(overflow, rs1, rs2);
6224 sext_w(dst, overflow);
6225 xor_(overflow, overflow, dst);
6226}
6227
6228void MacroAssembler::MulOverflow64(Register dst, Register left,
6229 const Operand& right, Register overflow) {
6230 ASM_CODE_COMMENT(this);
6231 UseScratchRegisterScope temps(this);
6232 BlockTrampolinePoolScope block_trampoline_pool(this);
6233 Register right_reg = no_reg;
6234 Register scratch = temps.Acquire();
6235 Register scratch2 = temps.Acquire();
6236 if (!right.is_reg()) {
6237 li(scratch, Operand(right));
6238 right_reg = scratch;
6239 } else {
6240 right_reg = right.rm();
6241 }
6242
6243 DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
6244 overflow != scratch2);
6245 DCHECK(overflow != left && overflow != right_reg);
6246 // Use this sequence of "mulh/mul" according to recommendation of ISA
6247 // Spec 7.1.
6248 // Upper part.
6249 mulh(scratch2, left, right_reg);
6250 // Lower part.
6251 mul(dst, left, right_reg);
6252 // Expand the sign of the lower part to 64bit.
6253 srai(overflow, dst, 63);
6254 // If the upper part is not equal to the expanded sign bit of the lower part,
6255 // overflow happens.
6256 xor_(overflow, overflow, scratch2);
6257}
6258
6259#elif V8_TARGET_ARCH_RISCV32
6260void MacroAssembler::AddOverflow(Register dst, Register left,
6261 const Operand& right, Register overflow) {
6262 UseScratchRegisterScope temps(this);
6263 BlockTrampolinePoolScope block_trampoline_pool(this);
6264 Register right_reg = no_reg;
6265 Register scratch = temps.Acquire();
6266 Register scratch2 = temps.Acquire();
6267 if (!right.is_reg()) {
6268 li(scratch, Operand(right));
6269 right_reg = scratch;
6270 } else {
6271 right_reg = right.rm();
6272 }
6273 DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
6274 overflow != scratch2);
6275 DCHECK(overflow != left && overflow != right_reg);
6276 if (dst == left || dst == right_reg) {
6277 add(scratch2, left, right_reg);
6278 xor_(overflow, scratch2, left);
6279 xor_(scratch, scratch2, right_reg);
6280 and_(overflow, overflow, scratch);
6281 Mv(dst, scratch2);
6282 } else {
6283 add(dst, left, right_reg);
6284 xor_(overflow, dst, left);
6285 xor_(scratch, dst, right_reg);
6286 and_(overflow, overflow, scratch);
6287 }
6288}
6289
6290void MacroAssembler::SubOverflow(Register dst, Register left,
6291 const Operand& right, Register overflow) {
6292 UseScratchRegisterScope temps(this);
6293 BlockTrampolinePoolScope block_trampoline_pool(this);
6294 Register right_reg = no_reg;
6295 Register scratch = temps.Acquire();
6296 Register scratch2 = temps.Acquire();
6297 if (!right.is_reg()) {
6298 li(scratch, Operand(right));
6299 right_reg = scratch;
6300 } else {
6301 right_reg = right.rm();
6302 }
6303
6304 DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
6305 overflow != scratch2);
6306 DCHECK(overflow != left && overflow != right_reg);
6307
6308 if (dst == left || dst == right_reg) {
6309 sub(scratch2, left, right_reg);
6310 xor_(overflow, left, scratch2);
6311 xor_(scratch, left, right_reg);
6312 and_(overflow, overflow, scratch);
6313 Mv(dst, scratch2);
6314 } else {
6315 sub(dst, left, right_reg);
6316 xor_(overflow, left, dst);
6317 xor_(scratch, left, right_reg);
6318 and_(overflow, overflow, scratch);
6319 }
6320}
6321
6322void MacroAssembler::MulOverflow32(Register dst, Register left,
6323 const Operand& right, Register overflow,
6324 bool sign_extend_inputs) {
6325 ASM_CODE_COMMENT(this);
6326 UseScratchRegisterScope temps(this);
6327 BlockTrampolinePoolScope block_trampoline_pool(this);
6328 Register right_reg = no_reg;
6329 Register scratch = temps.Acquire();
6330 Register scratch2 = temps.Acquire();
6331 if (!right.is_reg()) {
6332 li(scratch, Operand(right));
6333 right_reg = scratch;
6334 } else {
6335 right_reg = right.rm();
6336 }
6337
6338 DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
6339 overflow != scratch2);
6340 DCHECK(overflow != left && overflow != right_reg);
6341 mulh(overflow, left, right_reg);
6342 mul(dst, left, right_reg);
6343 srai(scratch2, dst, 31);
6344 xor_(overflow, overflow, scratch2);
6345}
6346#endif
6347
6349 int num_arguments) {
6350 ASM_CODE_COMMENT(this);
6351 // All parameters are on the stack. a0 has the return value after call.
6352
6353 // If the expected number of arguments of the runtime function is
6354 // constant, we check that the actual number of arguments match the
6355 // expectation.
6356 CHECK(f->nargs < 0 || f->nargs == num_arguments);
6357
6358 // TODO(1236192): Most runtime routines don't need the number of
6359 // arguments passed in because it is constant. At some point we
6360 // should remove this need and make the runtime routine entry code
6361 // smarter.
6362 PrepareCEntryArgs(num_arguments);
6364 bool switch_to_central = options().is_wasm;
6365 CallBuiltin(Builtins::RuntimeCEntry(f->result_size, switch_to_central));
6366}
6367
6369 ASM_CODE_COMMENT(this);
6370 const Runtime::Function* function = Runtime::FunctionForId(fid);
6371 DCHECK_EQ(1, function->result_size);
6372 if (function->nargs >= 0) {
6373 PrepareCEntryArgs(function->nargs);
6374 }
6376}
6377
6379 bool builtin_exit_frame) {
6380 ASM_CODE_COMMENT(this);
6381 PrepareCEntryFunction(builtin);
6382 TailCallBuiltin(Builtins::CEntry(1, ArgvMode::kStack, builtin_exit_frame));
6383}
6384
6386 Label* target_if_cleared) {
6387 ASM_CODE_COMMENT(this);
6388 CompareTaggedAndBranch(target_if_cleared, eq, in,
6390 And(out, in, Operand(~kWeakHeapObjectMask));
6391}
6392
6394 Register scratch1,
6395 Register scratch2) {
6396 DCHECK_GT(value, 0);
6397 if (v8_flags.native_code_counters && counter->Enabled()) {
6398 ASM_CODE_COMMENT(this);
6399 // This operation has to be exactly 32-bit wide in case the external
6400 // reference table redirects the counter to a uint32_t
6401 // dummy_stats_counter_ field.
6402 li(scratch2, ExternalReference::Create(counter));
6403 Lw(scratch1, MemOperand(scratch2));
6404 Add32(scratch1, scratch1, Operand(value));
6405 Sw(scratch1, MemOperand(scratch2));
6406 }
6407}
6408
6410 Register scratch1,
6411 Register scratch2) {
6412 DCHECK_GT(value, 0);
6413 if (v8_flags.native_code_counters && counter->Enabled()) {
6414 ASM_CODE_COMMENT(this);
6415 // This operation has to be exactly 32-bit wide in case the external
6416 // reference table redirects the counter to a uint32_t
6417 // dummy_stats_counter_ field.
6418 li(scratch2, ExternalReference::Create(counter));
6419 Lw(scratch1, MemOperand(scratch2));
6420 Sub32(scratch1, scratch1, Operand(value));
6421 Sw(scratch1, MemOperand(scratch2));
6422 }
6423}
6424
6425// -----------------------------------------------------------------------------
6426// Debugging.
6427
6430
6432 Operand rt) {
6433 if (v8_flags.debug_code) Check(cc, reason, rs, rt);
6434}
6435
6437 Register tmp, AbortReason abort_reason) {
6438 if (!v8_flags.debug_code) return;
6439
6440 ASM_CODE_COMMENT(this);
6441 DCHECK(!AreAliased(object, map_tmp, tmp));
6442 Label ok;
6443
6444 JumpIfSmi(object, &ok);
6445
6446 GetObjectType(object, map_tmp, tmp);
6447
6448 Branch(&ok, kUnsignedLessThanEqual, tmp, Operand(LAST_NAME_TYPE));
6449
6450 Branch(&ok, kUnsignedGreaterThanEqual, tmp, Operand(FIRST_JS_RECEIVER_TYPE));
6451
6452 Branch(&ok, kEqual, map_tmp, RootIndex::kHeapNumberMap);
6453
6454 Branch(&ok, kEqual, map_tmp, RootIndex::kBigIntMap);
6455
6456 Branch(&ok, kEqual, object, RootIndex::kUndefinedValue);
6457
6458 Branch(&ok, kEqual, object, RootIndex::kTrueValue);
6459
6460 Branch(&ok, kEqual, object, RootIndex::kFalseValue);
6461
6462 Branch(&ok, kEqual, object, RootIndex::kNullValue);
6463
6464 Abort(abort_reason);
6465 bind(&ok);
6466}
6467
6468#ifdef V8_ENABLE_DEBUG_CODE
6469
6470void MacroAssembler::AssertZeroExtended(Register int32_register) {
6471 if (!v8_flags.slow_debug_code) return;
6472 ASM_CODE_COMMENT(this);
6473 Assert(Condition::ule, AbortReason::k32BitValueInRegisterIsNotZeroExtended,
6474 int32_register, Operand(kMaxUInt32));
6475}
6476
6477void MacroAssembler::AssertSignExtended(Register int32_register) {
6478 if (!v8_flags.slow_debug_code) return;
6479 ASM_CODE_COMMENT(this);
6480 Assert(Condition::le, AbortReason::k32BitValueInRegisterIsNotSignExtended,
6481 int32_register, Operand(kMaxInt));
6482 Assert(Condition::ge, AbortReason::k32BitValueInRegisterIsNotSignExtended,
6483 int32_register, Operand(kMinInt));
6484}
6485
6487 Register value, Register scratch,
6488 unsigned lower_limit, unsigned higher_limit) {
6489 if (!v8_flags.debug_code) return;
6490 Label ok;
6491 BranchRange(&ok, cond, value, scratch, lower_limit, higher_limit);
6492 Abort(reason);
6493 bind(&ok);
6494}
6495
6496#endif // V8_ENABLE_DEBUG_CODE
6497
6499 Operand rt) {
6500 Label L;
6501 BranchShort(&L, cc, rs, rt);
6502 Abort(reason);
6503 // Will not return here.
6504 bind(&L);
6505}
6506
6508 Operand rt) {
6509 Check(cc, reason, rs, rt);
6510}
6511
6513 ASM_CODE_COMMENT(this);
6514 if (v8_flags.code_comments) {
6515 RecordComment("Abort message:", SourceLocation{});
6517 }
6518
6519 // Without debug code, save the code size and just trap.
6520 if (!v8_flags.debug_code || v8_flags.trap_on_abort) {
6521 ebreak();
6522 return;
6523 }
6524
6525 if (should_abort_hard()) {
6526 // We don't care if we constructed a frame. Just pretend we did.
6527 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
6528 PrepareCallCFunction(1, a0);
6529 li(a0, Operand(static_cast<int>(reason)));
6530 li(a1, ExternalReference::abort_with_reason());
6531 // Use Call directly to avoid any unneeded overhead. The function won't
6532 // return anyway.
6533 Call(a1);
6534 return;
6535 }
6536
6537 Label abort_start;
6538 bind(&abort_start);
6539
6540 Move(a0, Smi::FromInt(static_cast<int>(reason)));
6541
6542 {
6543 // We don't actually want to generate a pile of code for this, so just
6544 // claim there is a stack frame, without generating one.
6546 if (root_array_available()) {
6547 // Generate an indirect call via builtins entry table here in order to
6548 // ensure that the interpreter_entry_return_pc_offset is the same for
6549 // InterpreterEntryTrampoline and InterpreterEntryTrampolineForProfiling
6550 // when v8_flags.debug_code is enabled.
6551 LoadEntryFromBuiltin(Builtin::kAbort, t6);
6552 Call(t6);
6553 } else {
6554 CallBuiltin(Builtin::kAbort);
6555 }
6556 }
6557 // Will not return here.
6559 // If the calling code cares about the exact number of
6560 // instructions generated, we insert padding here to keep the size
6561 // of the Abort macro constant.
6562 // Currently in debug mode with debug_code enabled the number of
6563 // generated instructions is 10, so we use this as a maximum value.
6564 static const int kExpectedAbortInstructions = 10;
6565 int abort_instructions = InstructionsGeneratedSince(&abort_start);
6566 DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
6567 while (abort_instructions++ < kExpectedAbortInstructions) {
6568 nop();
6569 }
6570 }
6571}
6572
6573// Sets condition flags based on comparison, and returns type in type_reg.
6575 Register type_reg,
6576 InstanceType type, Condition cond,
6577 Label* target,
6578 Label::Distance distance) {
6579 ASM_CODE_COMMENT(this);
6580 LoadMap(map, object);
6581 // Borrowed from BaselineAssembler
6582 if (v8_flags.debug_code) {
6583 AssertNotSmi(map);
6584 Register temp_type_reg = type_reg;
6585 UseScratchRegisterScope temps(this);
6586 if (map == temp_type_reg) {
6587 // GetObjectType clobbers 2nd and 3rd args, can't be same registers as
6588 // first one
6589 temp_type_reg = temps.Acquire();
6590 }
6591 GetObjectType(map, temp_type_reg, temp_type_reg);
6592 Assert(eq, AbortReason::kUnexpectedValue, temp_type_reg, Operand(MAP_TYPE));
6593 }
6594 Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
6595 Branch(target, cond, type_reg, Operand(type), distance);
6596}
6597
6602
6607
6609 ASM_CODE_COMMENT(this);
6610 LoadMap(dst, cp);
6612 dst, FieldMemOperand(
6613 dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
6615}
6616
6618 CodeKind min_opt_level,
6619 Register feedback_vector,
6620 FeedbackSlot slot,
6621 Label* on_result,
6622 Label::Distance distance) {
6623 ASM_CODE_COMMENT(this);
6624 Label fallthrough, clear_slot;
6626 scratch_and_result,
6627 FieldMemOperand(feedback_vector,
6629 LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
6630
6631 // Is it marked_for_deoptimization? If yes, clear the slot.
6632 {
6633 // The entry references a CodeWrapper object. Unwrap it now.
6635 scratch_and_result,
6636 FieldMemOperand(scratch_and_result, CodeWrapper::kCodeOffset));
6637
6638 // marked for deoptimization?
6639 UseScratchRegisterScope temps(this);
6640 Register scratch = temps.Acquire();
6641 Load32U(scratch, FieldMemOperand(scratch_and_result, Code::kFlagsOffset));
6642 And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
6643
6644 if (min_opt_level == CodeKind::TURBOFAN_JS) {
6645 Branch(&clear_slot, not_equal, scratch, Operand(zero_reg),
6647
6648 // is code "turbofanned"?
6649 Load32U(scratch, FieldMemOperand(scratch_and_result, Code::kFlagsOffset));
6650 And(scratch, scratch, Operand(1 << Code::kIsTurbofannedBit));
6651 Branch(on_result, not_equal, scratch, Operand(zero_reg), distance);
6652
6653 Branch(&fallthrough);
6654 } else {
6655 DCHECK_EQ(min_opt_level, CodeKind::MAGLEV);
6656 Branch(on_result, equal, scratch, Operand(zero_reg), distance);
6657 }
6658
6659 bind(&clear_slot);
6660 li(scratch_and_result, ClearedValue());
6662 scratch_and_result,
6663 FieldMemOperand(feedback_vector,
6665 }
6666
6667 bind(&fallthrough);
6668 Move(scratch_and_result, zero_reg);
6669}
6670
6672 ASM_CODE_COMMENT(this);
6673 UseScratchRegisterScope temps(this);
6674 Register scratch = temps.Acquire();
6675 li(scratch, Operand(StackFrame::TypeToMarker(type)));
6676 PushCommonFrame(scratch);
6677}
6678
6680
6682 ASM_CODE_COMMENT(this);
6683 UseScratchRegisterScope temps(this);
6684 Register scratch = temps.Acquire();
6685 BlockTrampolinePoolScope block_trampoline_pool(this);
6686 Push(ra, fp);
6687 Move(fp, sp);
6688 if (!StackFrame::IsJavaScript(type)) {
6689 li(scratch, Operand(StackFrame::TypeToMarker(type)));
6690 Push(scratch);
6691 }
6692#if V8_ENABLE_WEBASSEMBLY
6693 if (type == StackFrame::WASM || type == StackFrame::WASM_LIFTOFF_SETUP) {
6695 }
6696#endif // V8_ENABLE_WEBASSEMBLY
6697}
6698
6700 ASM_CODE_COMMENT(this);
6701 addi(sp, fp, 2 * kSystemPointerSize);
6702 LoadWord(ra, MemOperand(fp, 1 * kSystemPointerSize));
6703 LoadWord(fp, MemOperand(fp, 0 * kSystemPointerSize));
6704}
6705
6706void MacroAssembler::EnterExitFrame(Register scratch, int stack_space,
6707 StackFrame::Type frame_type) {
6708 ASM_CODE_COMMENT(this);
6709 DCHECK(frame_type == StackFrame::EXIT ||
6710 frame_type == StackFrame::BUILTIN_EXIT ||
6711 frame_type == StackFrame::API_ACCESSOR_EXIT ||
6712 frame_type == StackFrame::API_CALLBACK_EXIT);
6713
6714 // Set up the frame structure on the stack.
6715 static_assert(2 * kSystemPointerSize ==
6719
6720 // This is how the stack will look:
6721 // fp + 2 (==kCallerSPDisplacement) - old stack's end
6722 // [fp + 1 (==kCallerPCOffset)] - saved old ra
6723 // [fp + 0 (==kCallerFPOffset)] - saved old fp
6724 // [fp - 1 StackFrame::EXIT Smi
6725 // [fp - 2 (==kSPOffset)] - sp of the called function
6726 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
6727 // new stack (will contain saved ra)
6728
6729 using ER = ExternalReference;
6730
6731 // Save registers and reserve room for saved entry sp.
6732 addi(sp, sp,
6734 StoreWord(ra, MemOperand(sp, 3 * kSystemPointerSize));
6735 StoreWord(fp, MemOperand(sp, 2 * kSystemPointerSize));
6736
6737 li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
6738 StoreWord(scratch, MemOperand(sp, 1 * kSystemPointerSize));
6739 // Set up new frame pointer.
6741
6742 if (v8_flags.debug_code) {
6743 StoreWord(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
6744 }
6745
6746 // Save the frame pointer and the context in top.
6747 ER c_entry_fp_address =
6748 ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
6749 StoreWord(fp, ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
6750 ER context_address = ER::Create(IsolateAddressId::kContextAddress, isolate());
6751 StoreWord(cp, ExternalReferenceAsOperand(context_address, no_reg));
6752
6753 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
6754
6755 // Reserve place for the return address, stack space and an optional slot
6756 // (used by DirectCEntry to hold the return value if a struct is
6757 // returned) and align the frame preparing for calling the runtime function.
6758 DCHECK_GE(stack_space, 0);
6759 SubWord(sp, sp, Operand((stack_space + 1) * kSystemPointerSize));
6760 if (frame_alignment > 0) {
6761 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
6762 And(sp, sp, Operand(-frame_alignment)); // Align stack.
6763 }
6764
6765 // Set the exit frame sp value to point just before the return address
6766 // location.
6767 addi(scratch, sp, kSystemPointerSize);
6768 StoreWord(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
6769}
6770
6772 ASM_CODE_COMMENT(this);
6773 BlockTrampolinePoolScope block_trampoline_pool(this);
6774 using ER = ExternalReference;
6775 // Clear top frame.
6776 // Restore current context from top and clear it in debug mode.
6777 ER context_address = ER::Create(IsolateAddressId::kContextAddress, isolate());
6778 LoadWord(cp, ExternalReferenceAsOperand(context_address, no_reg));
6779
6780 if (v8_flags.debug_code) {
6782 StoreWord(scratch, ExternalReferenceAsOperand(context_address, no_reg));
6783 }
6784
6785 // Clear the top frame.
6786 ER c_entry_fp_address =
6787 ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
6788 StoreWord(zero_reg, ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
6789
6790 // Pop the arguments, restore registers, and return.
6791 Mv(sp, fp); // Respect ABI stack constraint.
6794
6795 addi(sp, sp, 2 * kSystemPointerSize);
6796}
6797
6799#if V8_HOST_ARCH_RISCV32 || V8_HOST_ARCH_RISCV64
6800 // Running on the real platform. Use the alignment as mandated by the local
6801 // environment.
6802 // Note: This will break if we ever start generating snapshots on one RISC-V
6803 // platform for another RISC-V platform with a different alignment.
6805#else // V8_HOST_ARCH_RISCV64
6806 // If we are using the simulator then we should always align to the expected
6807 // alignment. As the simulator is used to generate snapshots we do not know
6808 // if the target platform will need alignment, so this is controlled from a
6809 // flag.
6810 return v8_flags.sim_stack_alignment;
6811#endif // V8_HOST_ARCH_RISCV64
6812}
6813
6815 if (v8_flags.debug_code) {
6816 ASM_CODE_COMMENT(this);
6817 const int frame_alignment = ActivationFrameAlignment();
6818 const int frame_alignment_mask = frame_alignment - 1;
6819
6820 if (frame_alignment > kSystemPointerSize) {
6821 Label alignment_as_expected;
6822 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
6823 {
6824 UseScratchRegisterScope temps(this);
6825 Register scratch = temps.Acquire();
6826 andi(scratch, sp, frame_alignment_mask);
6827 BranchShort(&alignment_as_expected, eq, scratch, Operand(zero_reg));
6828 }
6829 // Don't use Check here, as it will call Runtime_Abort re-entering here.
6830 ebreak();
6831 bind(&alignment_as_expected);
6832 }
6833 }
6834}
6835
6836void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) {
6837 ASM_CODE_COMMENT(this);
6838 if (SmiValuesAre32Bits()) {
6839 Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
6840 } else {
6843 Lw(dst, src);
6844 } else {
6845 LoadWord(dst, src);
6846 }
6847 SmiUntag(dst);
6848 }
6849}
6850
6851void MacroAssembler::SmiToInt32(Register smi) {
6852 ASM_CODE_COMMENT(this);
6853 if (v8_flags.enable_slow_asserts) {
6854 AssertSmi(smi);
6855 }
6857 SmiUntag(smi);
6858}
6859
6860void MacroAssembler::SmiToInt32(Register dst, Register src) {
6861 if (dst != src) {
6862 Move(dst, src);
6863 }
6864 SmiToInt32(dst);
6865}
6866
6867void MacroAssembler::JumpIfSmi(Register value, Label* smi_label,
6868 Label::Distance distance) {
6869 ASM_CODE_COMMENT(this);
6870 DCHECK_EQ(0, kSmiTag);
6871 UseScratchRegisterScope temps(this);
6872 Register scratch = temps.Acquire();
6873 andi(scratch, value, kSmiTagMask);
6874 Branch(smi_label, eq, scratch, Operand(zero_reg), distance);
6875}
6876
6878 Register code, Register scratch, Label* if_marked_for_deoptimization) {
6879 Load32U(scratch, FieldMemOperand(code, Code::kFlagsOffset));
6880 And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
6881 Branch(if_marked_for_deoptimization, ne, scratch, Operand(zero_reg));
6882}
6883
6885 return Operand(static_cast<int32_t>(i::ClearedValue(isolate()).ptr()));
6886}
6887
6888void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
6889 Label::Distance distance) {
6890 ASM_CODE_COMMENT(this);
6891 UseScratchRegisterScope temps(this);
6892 Register scratch = temps.Acquire();
6893 DCHECK_EQ(0, kSmiTag);
6894 andi(scratch, value, kSmiTagMask);
6895 Branch(not_smi_label, ne, scratch, Operand(zero_reg), distance);
6896}
6897
6899 Register object,
6900 InstanceType instance_type,
6901 Register scratch) {
6902 DCHECK(cc == eq || cc == ne);
6903 UseScratchRegisterScope temps(this);
6904 if (scratch == no_reg) {
6905 scratch = temps.Acquire();
6906 }
6908 if (std::optional<RootIndex> expected =
6910 Tagged_t ptr = ReadOnlyRootPtr(*expected);
6911 LoadCompressedMap(scratch, object);
6912 Branch(target, cc, scratch, Operand(ptr));
6913 return;
6914 }
6915 }
6916 GetObjectType(object, scratch, scratch);
6917 Branch(target, cc, scratch, Operand(instance_type));
6918}
6919
6921 Register scratch, Label* target,
6922 Label::Distance distance,
6923 Condition cc) {
6924 CHECK(cc == Condition::kUnsignedLessThan ||
6925 cc == Condition::kUnsignedGreaterThanEqual);
6927#ifdef DEBUG
6928 Label ok;
6929 LoadMap(scratch, heap_object);
6930 GetInstanceTypeRange(scratch, scratch, FIRST_JS_RECEIVER_TYPE, scratch);
6931 Branch(&ok, Condition::kUnsignedLessThanEqual, scratch,
6932 Operand(LAST_JS_RECEIVER_TYPE - FIRST_JS_RECEIVER_TYPE));
6933
6934 LoadMap(scratch, heap_object);
6935 GetInstanceTypeRange(scratch, scratch, FIRST_PRIMITIVE_HEAP_OBJECT_TYPE,
6936 scratch);
6937 Branch(&ok, Condition::kUnsignedLessThanEqual, scratch,
6938 Operand(LAST_PRIMITIVE_HEAP_OBJECT_TYPE -
6939 FIRST_PRIMITIVE_HEAP_OBJECT_TYPE));
6940
6941 Abort(AbortReason::kInvalidReceiver);
6942 bind(&ok);
6943#endif // DEBUG
6944
6945 // All primitive object's maps are allocated at the start of the read only
6946 // heap. Thus JS_RECEIVER's must have maps with larger (compressed)
6947 // addresses.
6948 LoadCompressedMap(scratch, heap_object);
6949 Branch(target, cc, scratch,
6951 } else {
6952 static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
6953 GetObjectType(heap_object, scratch, scratch);
6954 Branch(target, cc, scratch, Operand(FIRST_JS_RECEIVER_TYPE));
6955 }
6956}
6957
6959 if (v8_flags.debug_code) {
6960 ASM_CODE_COMMENT(this);
6961 UseScratchRegisterScope temps(this);
6962 Register scratch = temps.Acquire();
6963 static_assert(kSmiTag == 0);
6964 andi(scratch, object, kSmiTagMask);
6965 Check(ne, reason, scratch, Operand(zero_reg));
6966 }
6967}
6968
6970 if (v8_flags.debug_code) {
6971 ASM_CODE_COMMENT(this);
6972 UseScratchRegisterScope temps(this);
6973 Register scratch = temps.Acquire();
6974 static_assert(kSmiTag == 0);
6975 andi(scratch, object, kSmiTagMask);
6976 Check(eq, reason, scratch, Operand(zero_reg));
6977 }
6978}
6979
6981 if (v8_flags.debug_code) {
6982 ASM_CODE_COMMENT(this);
6983 UseScratchRegisterScope temps(this);
6984 Register scratch = temps.Acquire();
6985 BlockTrampolinePoolScope block_trampoline_pool(this);
6986 static_assert(kSmiTag == 0);
6987 SmiTst(object, scratch);
6988 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, scratch,
6989 Operand(zero_reg));
6990
6991 LoadMap(scratch, object);
6992 Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
6993 And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
6994 Check(ne, AbortReason::kOperandIsNotAConstructor, scratch,
6995 Operand(zero_reg));
6996 }
6997}
6998
7000 if (v8_flags.debug_code) {
7001 ASM_CODE_COMMENT(this);
7002 BlockTrampolinePoolScope block_trampoline_pool(this);
7003 UseScratchRegisterScope temps(this);
7004 Register scratch = temps.Acquire();
7005 static_assert(kSmiTag == 0);
7006 SmiTst(object, scratch);
7007 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, scratch,
7008 Operand(zero_reg));
7009 push(object);
7010 LoadMap(object, object);
7011 Register range = scratch;
7012 GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, range);
7013 Check(Uless_equal, AbortReason::kOperandIsNotAFunction, range,
7014 Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
7015 pop(object);
7016 }
7017}
7018
7020 if (!v8_flags.debug_code) return;
7021 ASM_CODE_COMMENT(this);
7022 static_assert(kSmiTag == 0);
7023 AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction);
7024 push(object);
7025 LoadMap(object, object);
7026 UseScratchRegisterScope temps(this);
7027 Register range = temps.Acquire();
7029 Check(Uless_equal, AbortReason::kOperandIsNotACallableFunction, range,
7032 pop(object);
7033}
7034
7036 if (v8_flags.debug_code) {
7037 ASM_CODE_COMMENT(this);
7038 BlockTrampolinePoolScope block_trampoline_pool(this);
7039 UseScratchRegisterScope temps(this);
7040 Register scratch = temps.Acquire();
7041 static_assert(kSmiTag == 0);
7042 SmiTst(object, scratch);
7043 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, scratch,
7044 Operand(zero_reg));
7045 GetObjectType(object, scratch, scratch);
7046 Check(eq, AbortReason::kOperandIsNotABoundFunction, scratch,
7047 Operand(JS_BOUND_FUNCTION_TYPE));
7048 }
7049}
7050
7051#ifdef V8_ENABLE_DEBUG_CODE
7053 Register object) {
7054#if V8_TARGET_ARCH_RISCV64
7055 if (!PointerCompressionIsEnabled()) return;
7056 if (!v8_flags.debug_code) return;
7057 ASM_CODE_COMMENT(this);
7058 // We may not have any scratch registers so we preserve our input register.
7059 Push(object, zero_reg);
7060 Label ok;
7061 {
7062 UseScratchRegisterScope temps(this);
7063 Register scratch = temps.Acquire();
7064 SmiTst(object, scratch);
7065 BranchShort(&ok, kEqual, scratch, Operand(zero_reg));
7066 }
7067 // Clear the lower 32 bits.
7068 Srl64(object, object, Operand(32));
7069 Sll64(object, object, Operand(32));
7070 // Either the value is now equal to the right-shifted pointer compression
7071 // cage base or it's zero if we got a compressed pointer register as input.
7072 BranchShort(&ok, kEqual, object, Operand(zero_reg));
7073 Check(kEqual, AbortReason::kObjectNotTagged, object,
7074 Operand(kPtrComprCageBaseRegister));
7075 bind(&ok);
7076 Pop(object, zero_reg);
7077#endif
7078}
7079#endif // V8_ENABLE_DEBUG_CODE
7080
7082 if (!v8_flags.debug_code) return;
7083 ASM_CODE_COMMENT(this);
7084 BlockTrampolinePoolScope block_trampoline_pool(this);
7085 UseScratchRegisterScope temps(this);
7086 Register scratch = temps.Acquire();
7087 static_assert(kSmiTag == 0);
7088 SmiTst(object, scratch);
7089 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, scratch,
7090 Operand(zero_reg));
7091
7092 LoadMap(scratch, object);
7093 GetInstanceTypeRange(scratch, scratch, FIRST_JS_GENERATOR_OBJECT_TYPE,
7094 scratch);
7095 Check(
7096 Uless_equal, AbortReason::kOperandIsNotAGeneratorObject, scratch,
7097 Operand(LAST_JS_GENERATOR_OBJECT_TYPE - FIRST_JS_GENERATOR_OBJECT_TYPE));
7098}
7099
7101 Register scratch) {
7102 if (v8_flags.debug_code) {
7103 ASM_CODE_COMMENT(this);
7104 Label done_checking;
7105 AssertNotSmi(object);
7106 LoadRoot(scratch, RootIndex::kUndefinedValue);
7107 BranchShort(&done_checking, eq, object, Operand(scratch));
7108 GetObjectType(object, scratch, scratch);
7109 Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
7110 Operand(ALLOCATION_SITE_TYPE));
7111 bind(&done_checking);
7112 }
7113}
7114
7115template <typename F_TYPE>
7117 FPURegister src2, MaxMinKind kind) {
7118 DCHECK((std::is_same<F_TYPE, float>::value) ||
7119 (std::is_same<F_TYPE, double>::value));
7120
7121 if (src1 == src2 && dst != src1) {
7122 if (std::is_same<float, F_TYPE>::value) {
7123 fmv_s(dst, src1);
7124 } else {
7125 fmv_d(dst, src1);
7126 }
7127 return;
7128 }
7129
7130 Label done, nan;
7131
7132 // For RISCV, fmin_s returns the other non-NaN operand as result if only one
7133 // operand is NaN; but for JS, if any operand is NaN, result is Nan. The
7134 // following handles the discrepancy in the handling of NaN between ISA and
7135 // JS semantics.
7136 UseScratchRegisterScope temps(this);
7137 Register scratch = temps.Acquire();
7138 if (std::is_same<float, F_TYPE>::value) {
7139 CompareIsNotNanF32(scratch, src1, src2);
7140 } else {
7141 CompareIsNotNanF64(scratch, src1, src2);
7142 }
7143 BranchFalseF(scratch, &nan);
7144
7145 if (kind == MaxMinKind::kMax) {
7146 if (std::is_same<float, F_TYPE>::value) {
7147 fmax_s(dst, src1, src2);
7148 } else {
7149 fmax_d(dst, src1, src2);
7150 }
7151 } else {
7152 if (std::is_same<float, F_TYPE>::value) {
7153 fmin_s(dst, src1, src2);
7154 } else {
7155 fmin_d(dst, src1, src2);
7156 }
7157 }
7158 j(&done);
7159
7160 bind(&nan);
7161 // if any operand is NaN, return NaN (fadd returns NaN if any operand is NaN)
7162 if (std::is_same<float, F_TYPE>::value) {
7163 fadd_s(dst, src1, src2);
7164 } else {
7165 fadd_d(dst, src1, src2);
7166 }
7167
7168 bind(&done);
7169}
7170
7176
7182
7188
7194
7196 int num_fp_arguments) {
7197 int stack_passed_dwords = 0;
7198
7199 // Up to eight integer arguments are passed in registers a0..a7 and
7200 // up to eight floating point arguments are passed in registers fa0..fa7
7201 if (num_gp_arguments > kRegisterPassedArguments) {
7202 stack_passed_dwords += num_gp_arguments - kRegisterPassedArguments;
7203 }
7204 if (num_fp_arguments > kRegisterPassedArguments) {
7205 stack_passed_dwords += num_fp_arguments - kRegisterPassedArguments;
7206 }
7207 stack_passed_dwords += kCArgSlotCount;
7208 return stack_passed_dwords;
7209}
7210
7211void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
7212 int num_double_arguments,
7213 Register scratch) {
7214 ASM_CODE_COMMENT(this);
7215 int frame_alignment = ActivationFrameAlignment();
7216
7217 // Up to eight simple arguments in a0..a7, fa0..fa7.
7218 // Remaining arguments are pushed on the stack (arg slot calculation handled
7219 // by CalculateStackPassedDWords()).
7220 int stack_passed_arguments =
7221 CalculateStackPassedDWords(num_reg_arguments, num_double_arguments);
7222 if (frame_alignment > kSystemPointerSize) {
7223 // Make stack end at alignment and make room for stack arguments and the
7224 // original value of sp.
7225 Mv(scratch, sp);
7226 SubWord(sp, sp, Operand((stack_passed_arguments + 1) * kSystemPointerSize));
7227 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
7228 And(sp, sp, Operand(-frame_alignment));
7229 StoreWord(scratch,
7230 MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
7231 } else {
7232 SubWord(sp, sp, Operand(stack_passed_arguments * kSystemPointerSize));
7233 }
7234}
7235
7236void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
7237 Register scratch) {
7238 PrepareCallCFunction(num_reg_arguments, 0, scratch);
7239}
7240
7242 int num_reg_arguments,
7243 int num_double_arguments,
7244 SetIsolateDataSlots set_isolate_data_slots,
7245 Label* return_location) {
7246 BlockTrampolinePoolScope block_trampoline_pool(this);
7247 li(t6, function);
7248 return CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments,
7249 set_isolate_data_slots, return_location);
7250}
7251
7252int MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
7253 int num_double_arguments,
7254 SetIsolateDataSlots set_isolate_data_slots,
7255 Label* return_location) {
7256 return CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
7257 set_isolate_data_slots, return_location);
7258}
7259
7260int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments,
7261 SetIsolateDataSlots set_isolate_data_slots,
7262 Label* return_location) {
7263 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
7264 return_location);
7265}
7266
7267int MacroAssembler::CallCFunction(Register function, int num_arguments,
7268 SetIsolateDataSlots set_isolate_data_slots,
7269 Label* return_location) {
7270 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
7271 return_location);
7272}
7273
7275 Register function, int num_reg_arguments, int num_double_arguments,
7276 SetIsolateDataSlots set_isolate_data_slots, Label* return_location) {
7277 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
7278 DCHECK(has_frame());
7279 ASM_CODE_COMMENT(this);
7280 // Make sure that the stack is aligned before calling a C function unless
7281 // running in the simulator. The simulator has its own alignment check which
7282 // provides more information.
7283 // The argument stots are presumed to have been set up by
7284 // PrepareCallCFunction.
7285
7286#if V8_HOST_ARCH_RISCV32 || V8_HOST_ARCH_RISCV64
7287 if (v8_flags.debug_code) {
7288 int frame_alignment = base::OS::ActivationFrameAlignment();
7289 int frame_alignment_mask = frame_alignment - 1;
7290 if (frame_alignment > kSystemPointerSize) {
7291 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
7292 Label alignment_as_expected;
7293 {
7294 UseScratchRegisterScope temps(this);
7295 Register scratch = temps.Acquire();
7296 And(scratch, sp, Operand(frame_alignment_mask));
7297 BranchShort(&alignment_as_expected, eq, scratch, Operand(zero_reg));
7298 }
7299 // Don't use Check here, as it will call Runtime_Abort possibly
7300 // re-entering here.
7301 ebreak();
7302 bind(&alignment_as_expected);
7303 }
7304 }
7305#endif // V8_HOST_ARCH_RISCV32 || V8_HOST_ARCH_RISCV64
7306
7307 // Just call directly. The function called cannot cause a GC, or
7308 // allow preemption, so the return address in the link register
7309 // stays correct.
7310 Label get_pc;
7311 {
7312 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
7313 if (function != t6) {
7314 Mv(t6, function);
7315 function = t6;
7316 }
7317
7318 // Save the frame pointer and PC so that the stack layout remains
7319 // iterable, even without an ExitFrame which normally exists between JS
7320 // and C frames.
7321 // 't' registers are caller-saved so this is safe as a scratch register.
7322 Register pc_scratch = t1;
7323
7324 LoadAddress(pc_scratch, &get_pc);
7325 // See x64 code for reasoning about how to address the isolate data
7326 // fields.
7328 StoreWord(pc_scratch,
7329 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerPC));
7330 StoreWord(fp,
7331 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
7332 }
7333 }
7334
7335 Call(function);
7336 int call_pc_offset = pc_offset();
7337 bind(&get_pc);
7338 if (return_location) bind(return_location);
7339
7340 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
7341 // We don't unset the PC; the FP is the source of truth.
7342 StoreWord(zero_reg,
7343 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
7344 }
7345
7346 // Remove frame bought in PrepareCallCFunction
7347 int stack_passed_arguments =
7348 CalculateStackPassedDWords(num_reg_arguments, num_double_arguments);
7350 LoadWord(sp, MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
7351 } else {
7352 AddWord(sp, sp, Operand(stack_passed_arguments * kSystemPointerSize));
7353 }
7354
7355 return call_pc_offset;
7356}
7357
7358#undef BRANCH_ARGS_CHECK
7359
7361 Label* condition_met) {
7362 ASM_CODE_COMMENT(this);
7363 UseScratchRegisterScope temps(this);
7364 temps.Include(t6);
7365 Register scratch = temps.Acquire();
7367 LoadWord(scratch, MemOperand(scratch, MemoryChunk::FlagsOffset()));
7368 And(scratch, scratch, Operand(mask));
7369 Branch(condition_met, cc, scratch, Operand(zero_reg));
7370}
7371
7373 Register reg4, Register reg5,
7374 Register reg6) {
7375 RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
7376
7378 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
7379 int code = config->GetAllocatableGeneralCode(i);
7380 Register candidate = Register::from_code(code);
7381 if (regs.has(candidate)) continue;
7382 return candidate;
7383 }
7384 UNREACHABLE();
7385}
7386
7388 ASM_CODE_COMMENT(this);
7389 auto pc = -pc_offset();
7390 auipc(dst, 0);
7391 if (pc != 0) {
7392 SubWord(dst, dst, pc);
7393 }
7394}
7395
7396// Check if the code object is marked for deoptimization. If it is, then it
7397// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
7398// to:
7399// 1. read from memory the word that contains that bit, which can be found in
7400// the flags in the referenced {Code} object;
7401// 2. test kMarkedForDeoptimizationBit in those flags; and
7402// 3. if it is not zero then it jumps to the builtin.
7404 ASM_CODE_COMMENT(this);
7405 UseScratchRegisterScope temps(this);
7406 Register scratch = temps.Acquire();
7407 if (v8_flags.debug_code || !V8_ENABLE_LEAPTIERING_BOOL) {
7408 int offset =
7409 InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
7412 Lw(scratch, FieldMemOperand(scratch, Code::kFlagsOffset));
7413 }
7414#ifdef V8_ENABLE_LEAPTIERING
7415 if (v8_flags.debug_code) {
7416 Label not_deoptimized;
7417 And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
7418 Branch(&not_deoptimized, eq, scratch, Operand(zero_reg));
7419 Abort(AbortReason::kInvalidDeoptimizedCode);
7420 bind(&not_deoptimized);
7421 }
7422#else
7423 And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
7424 TailCallBuiltin(Builtin::kCompileLazyDeoptimizedCode, ne, scratch,
7425 Operand(zero_reg));
7426#endif
7427}
7428
7441
7443 Register code_object,
7444 CodeEntrypointTag tag) {
7445 ASM_CODE_COMMENT(this);
7446#ifdef V8_ENABLE_SANDBOX
7447 LoadCodeEntrypointViaCodePointer(
7449 FieldMemOperand(code_object, Code::kSelfIndirectPointerOffset), tag);
7450#else
7451 LoadWord(destination,
7452 FieldMemOperand(code_object, Code::kInstructionStartOffset));
7453#endif
7454}
7455
7457 MemOperand field_operand) {
7459#ifdef V8_ENABLE_SANDBOX
7460 DecompressProtected(destination, field_operand);
7461#else
7462 LoadTaggedField(destination, field_operand);
7463#endif
7464}
7465
7467 CodeEntrypointTag tag) {
7468 ASM_CODE_COMMENT(this);
7469 LoadCodeInstructionStart(code_object, code_object, tag);
7470 Call(code_object);
7471}
7472
7474 JumpMode jump_mode) {
7475 ASM_CODE_COMMENT(this);
7476 DCHECK_EQ(JumpMode::kJump, jump_mode);
7477 LoadCodeInstructionStart(code_object, code_object, tag);
7478 Jump(code_object);
7479}
7480
7481#ifdef V8_TARGET_ARCH_RISCV64
7482void MacroAssembler::CallJSFunction(Register function_object,
7483 [[maybe_unused]] uint16_t argument_count) {
7484 ASM_CODE_COMMENT(this);
7486#ifdef V8_ENABLE_LEAPTIERING
7489 UseScratchRegisterScope temps(this);
7490 Register scratch = temps.Acquire();
7491 Lw(dispatch_handle,
7492 FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
7493 LoadEntrypointAndParameterCountFromJSDispatchTable(code, parameter_count,
7494 dispatch_handle, scratch);
7495 // Force a safe crash if the parameter count doesn't match.
7496 SbxCheck(le, AbortReason::kJSSignatureMismatch, parameter_count,
7497 Operand(argument_count));
7498 Call(code);
7499#elif V8_ENABLE_SANDBOX
7500 // When the sandbox is enabled, we can directly fetch the entrypoint pointer
7501 // from the code pointer table instead of going through the Code object. In
7502 // this way, we avoid one memory load on this code path.
7503 LoadCodeEntrypointViaCodePointer(
7504 code, FieldMemOperand(function_object, JSFunction::kCodeOffset),
7506 Call(code);
7507#else
7508 LoadTaggedField(code,
7509 FieldMemOperand(function_object, JSFunction::kCodeOffset));
7511#endif
7512}
7513#else
7515 uint16_t argument_count) {
7517#if V8_ENABLE_LEAPTIERING
7518 Register dispatch_handle = s1;
7519 UseScratchRegisterScope temps(this);
7520 Register scratch = temps.Acquire();
7521 Lw(dispatch_handle,
7522 FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
7523 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
7524 Call(code);
7525#else
7526 LoadTaggedField(code,
7527 FieldMemOperand(function_object, JSFunction::kCodeOffset));
7528 CallCodeObject(code);
7529#endif // V8_ENABLE_LEAPTIERING
7530}
7531#endif
7532
7533#if V8_ENABLE_LEAPTIERING
7534void MacroAssembler::CallJSDispatchEntry(JSDispatchHandle dispatch_handle,
7535 uint16_t argument_count) {
7537 Register scratch = s1;
7539 Operand(dispatch_handle.value(), RelocInfo::JS_DISPATCH_HANDLE));
7540 // WARNING: This entrypoint load is only safe because we are storing a
7541 // RelocInfo for the dispatch handle in the li above (thus keeping the
7542 // dispatch entry alive) _and_ because the entrypoints are not compactable
7543 // (thus meaning that the calculation in the entrypoint load is not
7544 // invalidated by a compaction).
7545 // TODO(leszeks): Make this less of a footgun.
7546 static_assert(!JSDispatchTable::kSupportsCompaction);
7547 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
7548 CHECK_EQ(argument_count,
7549 IsolateGroup::current()->js_dispatch_table()->GetParameterCount(
7550 dispatch_handle));
7551 Call(code);
7552}
7553#endif
7554
7556 JumpMode jump_mode) {
7557 ASM_CODE_COMMENT(this);
7560#ifdef V8_ENABLE_LEAPTIERING
7561 Register dispatch_handle = s1;
7562 UseScratchRegisterScope temps(this);
7563 Register scratch = temps.Acquire();
7564 Lw(dispatch_handle,
7565 FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
7566 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
7567 Jump(code);
7568#else
7569 LoadTaggedField(code,
7570 FieldMemOperand(function_object, JSFunction::kCodeOffset));
7571 JumpCodeObject(code, kJSEntrypointTag, jump_mode);
7572#endif
7573}
7574
7575#ifdef V8_ENABLE_WEBASSEMBLY
7576void MacroAssembler::ResolveWasmCodePointer(Register target,
7577 uint64_t signature_hash) {
7578 ASM_CODE_COMMENT(this);
7579 ExternalReference global_jump_table =
7580 ExternalReference::wasm_code_pointer_table();
7581 UseScratchRegisterScope temps(this);
7582 Register scratch = temps.Acquire();
7583 li(scratch, global_jump_table);
7584
7585#ifdef V8_ENABLE_SANDBOX
7586 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == 16);
7587 CalcScaledAddress(target, scratch, target, 4);
7588 LoadWord(
7589 scratch,
7590 MemOperand(target, wasm::WasmCodePointerTable::kOffsetOfSignatureHash));
7591 // bool has_second_tmp = temps.CanAcquire();
7592 // Register signature_hash_register = has_second_tmp ? temps.Acquire() :
7593 // target; if (!has_second_tmp) {
7594 // Push(signature_hash_register);
7595 // }
7596 // li(signature_hash_register, Operand(signature_hash));
7597 SbxCheck(Condition::kEqual, AbortReason::kWasmSignatureMismatch, scratch,
7598 Operand(signature_hash));
7599#else
7600 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == kSystemPointerSize);
7601 CalcScaledAddress(target, scratch, target, kSystemPointerSizeLog2);
7602#endif
7603 LoadWord(target, MemOperand(target, 0));
7604}
7605
7606void MacroAssembler::CallWasmCodePointer(Register target,
7607 uint64_t signature_hash,
7608 CallJumpMode call_jump_mode) {
7609 ResolveWasmCodePointer(target, signature_hash);
7610 if (call_jump_mode == CallJumpMode::kTailCall) {
7611 Jump(target);
7612 } else {
7613 Call(target);
7614 }
7615}
7616
7617void MacroAssembler::CallWasmCodePointerNoSignatureCheck(Register target) {
7618 ExternalReference global_jump_table =
7619 ExternalReference::wasm_code_pointer_table();
7620 UseScratchRegisterScope temps(this);
7621 Register scratch = temps.Acquire();
7622 li(scratch, global_jump_table);
7623 constexpr unsigned int kEntrySizeLog2 =
7624 std::bit_width(sizeof(wasm::WasmCodePointerTableEntry)) - 1;
7625 CalcScaledAddress(target, scratch, target, kEntrySizeLog2);
7626 LoadWord(target, MemOperand(target));
7627 Call(target);
7628}
7629
7630void MacroAssembler::LoadWasmCodePointer(Register dst, MemOperand src) {
7631 static_assert(sizeof(WasmCodePointer) == 4);
7632 Lw(dst, src);
7633}
7634#endif
7635
7636#ifdef V8_ENABLE_LEAPTIERING
7637void MacroAssembler::LoadEntrypointFromJSDispatchTable(Register destination,
7638 Register dispatch_handle,
7639 Register scratch) {
7640 DCHECK(!AreAliased(destination, scratch));
7641 ASM_CODE_COMMENT(this);
7642 Register index = destination;
7643 li(scratch, ExternalReference::js_dispatch_table_address());
7644#ifdef V8_TARGET_ARCH_RISCV32
7645 static_assert(kJSDispatchHandleShift == 0);
7646 slli(index, dispatch_handle, kJSDispatchTableEntrySizeLog2);
7647#else
7648 srli(index, dispatch_handle, kJSDispatchHandleShift);
7649 slli(index, index, kJSDispatchTableEntrySizeLog2);
7650#endif
7651 AddWord(scratch, scratch, index);
7652 LoadWord(destination,
7653 MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
7654}
7655
7656void MacroAssembler::LoadEntrypointFromJSDispatchTable(
7657 Register destination, JSDispatchHandle dispatch_handle, Register scratch) {
7658 DCHECK(!AreAliased(destination, scratch));
7659 ASM_CODE_COMMENT(this);
7660 li(scratch, ExternalReference::js_dispatch_table_address());
7661 // WARNING: This offset calculation is only safe if we have already stored a
7662 // RelocInfo for the dispatch handle, e.g. in CallJSDispatchEntry, (thus
7663 // keeping the dispatch entry alive) _and_ because the entrypoints are not
7664 // compatible (thus meaning that the offset calculation is not invalidated by
7665 // a compaction).
7666 // TODO(leszeks): Make this less of a footgun.
7667 static_assert(!JSDispatchTable::kSupportsCompaction);
7668 int offset = JSDispatchTable::OffsetOfEntry(dispatch_handle) +
7669 JSDispatchEntry::kEntrypointOffset;
7670 LoadWord(destination, MemOperand(scratch, offset));
7671}
7672
7673#ifdef V8_TARGET_ARCH_RISCV64
7674void MacroAssembler::LoadParameterCountFromJSDispatchTable(
7675 Register destination, Register dispatch_handle, Register scratch) {
7676 DCHECK(!AreAliased(destination, scratch));
7677 ASM_CODE_COMMENT(this);
7678 Register index = destination;
7679 srli(index, dispatch_handle, kJSDispatchHandleShift);
7680 slli(index, index, kJSDispatchTableEntrySizeLog2);
7681 li(scratch, ExternalReference::js_dispatch_table_address());
7682 AddWord(scratch, scratch, index);
7683 static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
7684 Lhu(destination, MemOperand(scratch, JSDispatchEntry::kCodeObjectOffset));
7685}
7686
7687void MacroAssembler::LoadEntrypointAndParameterCountFromJSDispatchTable(
7688 Register entrypoint, Register parameter_count, Register dispatch_handle,
7689 Register scratch) {
7690 DCHECK(!AreAliased(entrypoint, parameter_count, scratch));
7691 ASM_CODE_COMMENT(this);
7692 Register index = parameter_count;
7693 li(scratch, ExternalReference::js_dispatch_table_address());
7694 srli(index, dispatch_handle, kJSDispatchHandleShift);
7695 slli(index, index, kJSDispatchTableEntrySizeLog2);
7696 AddWord(scratch, scratch, index);
7697 LoadWord(entrypoint, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
7698 static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
7699 Lhu(parameter_count, MemOperand(scratch, JSDispatchEntry::kCodeObjectOffset));
7700}
7701#endif // V8_TARGET_ARCH_RISCV64
7702#endif // V8_ENABLE_LEAPTIERING
7703
7704#if V8_TARGET_ARCH_RISCV64
7705void MacroAssembler::LoadTaggedField(const Register& destination,
7706 const MemOperand& field_operand,
7707 Trapper&& trapper) {
7709 DecompressTagged(destination, field_operand,
7710 std::forward<Trapper>(trapper));
7711 } else {
7712 Ld(destination, field_operand, std::forward<Trapper>(trapper));
7713 }
7714}
7715
7717 const Register& destination, const MemOperand& field_operand) {
7719 Lw(destination, field_operand);
7720 } else {
7721 Ld(destination, field_operand);
7722 }
7723}
7724
7726 const MemOperand& field_operand) {
7728 DecompressTaggedSigned(destination, field_operand);
7729 } else {
7730 Ld(destination, field_operand);
7731 }
7732}
7733
7734void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) {
7735 SmiUntag(dst, src);
7736}
7737
7738void MacroAssembler::StoreTaggedField(const Register& value,
7739 const MemOperand& dst_field_operand,
7740 Trapper&& trapper) {
7741 trapper(pc_offset());
7743 Sw(value, dst_field_operand, std::forward<Trapper>(trapper));
7744 } else {
7745 Sd(value, dst_field_operand, std::forward<Trapper>(trapper));
7746 }
7747}
7748
7749void MacroAssembler::AtomicStoreTaggedField(Register src, const MemOperand& dst,
7750 Trapper&& trapper) {
7751 UseScratchRegisterScope temps(this);
7752 Register scratch = temps.Acquire();
7753 AddWord(scratch, dst.rm(), dst.offset());
7754 trapper(pc_offset());
7756 amoswap_w(true, true, zero_reg, src, scratch);
7757 } else {
7758 amoswap_d(true, true, zero_reg, src, scratch);
7759 }
7760}
7761
7763 const MemOperand& field_operand,
7764 Trapper&& trapper) {
7765 ASM_CODE_COMMENT(this);
7766 Lwu(destination, field_operand, std::forward<Trapper>(trapper));
7767 if (v8_flags.slow_debug_code) {
7768 // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
7769 AddWord(destination, destination,
7770 Operand(((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32));
7771 }
7772}
7773
7775 const MemOperand& field_operand,
7776 Trapper&& trapper) {
7777 ASM_CODE_COMMENT(this);
7778 Lwu(destination, field_operand, std::forward<Trapper>(trapper));
7780}
7781
7783 const Register& source) {
7784 ASM_CODE_COMMENT(this);
7785 And(destination, source, Operand(0xFFFFFFFF));
7787}
7788
7789void MacroAssembler::DecompressTagged(Register dst, Tagged_t immediate) {
7790 ASM_CODE_COMMENT(this);
7791 AddWord(dst, kPtrComprCageBaseRegister, static_cast<int32_t>(immediate));
7792}
7793
7795 const MemOperand& field_operand,
7796 Trapper&& trapper) {
7797#ifdef V8_ENABLE_SANDBOX
7799 ASM_CODE_COMMENT(this);
7800 UseScratchRegisterScope temps(this);
7801 Register scratch = temps.Acquire();
7802 Lw(destination, field_operand, std::forward<Trapper>(trapper));
7803 LoadWord(scratch,
7804 MemOperand(kRootRegister, IsolateData::trusted_cage_base_offset()));
7805 Or(destination, destination, scratch);
7806#else
7807 UNREACHABLE();
7808#endif // V8_ENABLE_SANDBOX
7809}
7810
7812 const MemOperand& src,
7813 Trapper&& trapper) {
7814 ASM_CODE_COMMENT(this);
7815 Lwu(dst, src, std::forward<Trapper>(trapper));
7816 sync();
7817 if (v8_flags.slow_debug_code) {
7818 // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
7819 AddWord(dst, dst,
7820 Operand(((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32));
7821 }
7822}
7823
7824void MacroAssembler::AtomicDecompressTagged(Register dst, const MemOperand& src,
7825 Trapper&& trapper) {
7826 ASM_CODE_COMMENT(this);
7827 Lwu(dst, src, std::forward<Trapper>(trapper));
7828 sync();
7829 AddWord(dst, kPtrComprCageBaseRegister, dst);
7830}
7831
7832#endif
7836
7843
7844// Calls an API function. Allocates HandleScope, extracts returned value
7845// from handle and propagates exceptions. Clobbers C argument registers
7846// and C caller-saved registers. Restores context. On return removes
7847// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
7848// (GCed, includes the call JS arguments space and the additional space
7849// allocated for the fast call).
7850void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
7851 Register function_address,
7852 ExternalReference thunk_ref, Register thunk_arg,
7853 int slots_to_drop_on_return,
7854 MemOperand* argc_operand,
7855 MemOperand return_value_operand) {
7856 ASM_CODE_COMMENT(masm);
7857 using ER = ExternalReference;
7858
7859 Isolate* isolate = masm->isolate();
7861 ER::handle_scope_next_address(isolate), no_reg);
7863 ER::handle_scope_limit_address(isolate), no_reg);
7865 ER::handle_scope_level_address(isolate), no_reg);
7866
7867 Register return_value = a0;
7868 Register scratch = a4;
7869 Register scratch2 = a5;
7870
7871 // Allocate HandleScope in callee-saved registers.
7872 // We will need to restore the HandleScope after the call to the API function,
7873 // by allocating it in callee-saved registers it'll be preserved by C code.
7874 Register prev_next_address_reg = kScratchReg;
7875 Register prev_limit_reg = s1;
7876 Register prev_level_reg = s2;
7877
7878 // C arguments (kCArgRegs[0/1]) are expected to be initialized outside, so
7879 // this function must not corrupt them (return_value overlaps with
7880 // kCArgRegs[0] but that's ok because we start using it only after the C
7881 // call).
7882 DCHECK(!AreAliased(kCArgRegs[0], kCArgRegs[1], // C args
7883 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
7884 // function_address and thunk_arg might overlap but this function must not
7885 // corrupted them until the call is made (i.e. overlap with return_value is
7886 // fine).
7887 DCHECK(!AreAliased(function_address, // incoming parameters
7888 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
7889 DCHECK(!AreAliased(thunk_arg, // incoming parameters
7890 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
7891 {
7893 "Allocate HandleScope in callee-save registers.");
7894 __ LoadWord(prev_next_address_reg, next_mem_op);
7895 __ LoadWord(prev_limit_reg, limit_mem_op);
7896 __ Lw(prev_level_reg, level_mem_op);
7897 __ Add32(scratch, prev_level_reg, Operand(1));
7898 __ Sw(scratch, level_mem_op);
7899 }
7900
7901 Label profiler_or_side_effects_check_enabled, done_api_call;
7902 if (with_profiling) {
7903 __ RecordComment("Check if profiler or side effects check is enabled");
7904 __ Lb(scratch,
7905 __ ExternalReferenceAsOperand(IsolateFieldId::kExecutionMode));
7906 __ Branch(&profiler_or_side_effects_check_enabled, ne, scratch,
7907 Operand(zero_reg));
7908#ifdef V8_RUNTIME_CALL_STATS
7909 __ RecordComment("Check if RCS is enabled");
7910 __ li(scratch, ER::address_of_runtime_stats_flag());
7911 __ Lw(scratch, MemOperand(scratch, 0));
7912 __ Branch(&profiler_or_side_effects_check_enabled, ne, scratch,
7913 Operand(zero_reg));
7914#endif // V8_RUNTIME_CALL_STATS
7915 }
7916
7917 __ RecordComment("Call the api function directly.");
7918 __ StoreReturnAddressAndCall(function_address);
7919 __ bind(&done_api_call);
7920
7921 Label propagate_exception;
7922 Label delete_allocated_handles;
7923 Label leave_exit_frame;
7924
7925 __ RecordComment("Load the value from ReturnValue");
7926 __ LoadWord(return_value, return_value_operand);
7927
7928 {
7930 masm,
7931 "No more valid handles (the result handle was the last one)."
7932 "Restore previous handle scope.");
7933 __ StoreWord(prev_next_address_reg, next_mem_op);
7934 if (v8_flags.debug_code) {
7935 __ Lw(scratch, level_mem_op);
7936 __ Sub32(scratch, scratch, Operand(1));
7937 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, scratch,
7938 Operand(prev_level_reg));
7939 }
7940 __ Sw(prev_level_reg, level_mem_op);
7941 __ LoadWord(scratch, limit_mem_op);
7942 __ Branch(&delete_allocated_handles, ne, prev_limit_reg, Operand(scratch));
7943 }
7944 __ RecordComment("Leave the API exit frame.");
7945 __ bind(&leave_exit_frame);
7946
7947 Register argc_reg = prev_limit_reg;
7948 if (argc_operand != nullptr) {
7949 // Load the number of stack slots to drop before LeaveExitFrame modifies sp.
7950 __ LoadWord(argc_reg, *argc_operand);
7951 }
7952
7953 __ LeaveExitFrame(scratch);
7954
7955 {
7957 "Check if the function scheduled an exception.");
7958 __ LoadRoot(scratch, RootIndex::kTheHoleValue);
7959 __ LoadWord(scratch2, __ ExternalReferenceAsOperand(
7960 ER::exception_address(isolate), no_reg));
7961 __ Branch(&propagate_exception, ne, scratch, Operand(scratch2));
7962 }
7963
7964 __ AssertJSAny(return_value, scratch, scratch2,
7965 AbortReason::kAPICallReturnedInvalidObject);
7966
7967 if (argc_operand == nullptr) {
7968 DCHECK_NE(slots_to_drop_on_return, 0);
7969 __ AddWord(sp, sp, Operand(slots_to_drop_on_return * kSystemPointerSize));
7970 } else {
7971 // {argc_operand} was loaded into {argc_reg} above.
7972 if (slots_to_drop_on_return != 0) {
7973 __ AddWord(sp, sp, Operand(slots_to_drop_on_return * kSystemPointerSize));
7974 }
7975 __ CalcScaledAddress(sp, sp, argc_reg, kSystemPointerSizeLog2);
7976 }
7977
7978 __ Ret();
7979
7980 if (with_profiling) {
7981 ASM_CODE_COMMENT_STRING(masm, "Call the api function via thunk wrapper.");
7982 __ bind(&profiler_or_side_effects_check_enabled);
7983 // Additional parameter is the address of the actual callback function.
7984 if (thunk_arg.is_valid()) {
7985 MemOperand thunk_arg_mem_op = __ ExternalReferenceAsOperand(
7986 IsolateFieldId::kApiCallbackThunkArgument);
7987 __ StoreWord(thunk_arg, thunk_arg_mem_op);
7988 }
7989 __ li(scratch, thunk_ref);
7991 __ Branch(&done_api_call);
7992 }
7993
7994 __ RecordComment("An exception was thrown. Propagate it.");
7995 __ bind(&propagate_exception);
7996 __ TailCallRuntime(Runtime::kPropagateException);
7997
7998 {
8000 masm, "HandleScope limit has changed. Delete allocated extensions.");
8001 __ bind(&delete_allocated_handles);
8002 __ StoreWord(prev_limit_reg, limit_mem_op);
8003 // Save the return value in a callee-save register.
8004 Register saved_result = prev_limit_reg;
8005 __ Move(saved_result, a0);
8006 __ PrepareCallCFunction(1, prev_level_reg);
8007 __ li(kCArgRegs[0], ER::isolate_address(isolate));
8008 __ CallCFunction(ER::delete_handle_scope_extensions(), 1);
8009 __ Move(kCArgRegs[0], saved_result);
8010 __ Branch(&leave_exit_frame);
8011 }
8012}
8013
8015 Register scratch, Label* fbv_undef) {
8016 Label done;
8017 // Load the feedback vector from the closure.
8018 LoadTaggedField(dst,
8019 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
8020 LoadTaggedField(dst, FieldMemOperand(dst, FeedbackCell::kValueOffset));
8021
8022 // Check if feedback vector is valid.
8024 Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
8025 Branch(&done, eq, scratch, Operand(FEEDBACK_VECTOR_TYPE));
8026
8027 // Not valid, load undefined.
8028 LoadRoot(dst, RootIndex::kUndefinedValue);
8029 Branch(fbv_undef);
8030
8031 bind(&done);
8032}
8033
8035 Register scratch, unsigned lower_limit,
8036 unsigned higher_limit,
8037 Label::Distance distance) {
8038 ASM_CODE_COMMENT(this);
8039 DCHECK_LT(lower_limit, higher_limit);
8040 if (lower_limit != 0) {
8041 SubWord(scratch, value, Operand(lower_limit));
8042 Branch(L, cond, scratch, Operand(higher_limit - lower_limit), distance);
8043 } else {
8044 Branch(L, cond, scratch, Operand(higher_limit - lower_limit), distance);
8045 }
8046}
8047
8048#undef __
8049} // namespace internal
8050} // namespace v8
#define DEBUG_PRINTF(...)
#define Assert(condition)
int16_t parameter_count
Definition builtins.cc:67
Builtins::Kind kind
Definition builtins.cc:40
SourcePosition pos
static int ActivationFrameAlignment()
constexpr UnderlyingType & value() &
void RequestHeapNumber(HeapNumberRequest request)
Definition assembler.cc:262
EmbeddedObjectIndex AddEmbeddedObject(IndirectHandle< HeapObject > object)
Definition assembler.cc:285
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
const AssemblerOptions & options() const
Definition assembler.h:339
void amoswap_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void lr_w(bool aq, bool rl, Register rd, Register rs1)
void sh2add(Register rd, Register rs1, Register rs2)
void cpop(Register rd, Register rs)
void sh3add(Register rd, Register rs1, Register rs2)
void rev8(Register rd, Register rs)
void ctz(Register rd, Register rs)
void sh1add(Register rd, Register rs1, Register rs2)
void rori(Register rd, Register rs1, uint8_t shamt)
void c_fsdsp(FPURegister rs2, uint16_t uimm9)
void c_li(Register rd, int8_t imm6)
void c_srli(Register rs1, int8_t shamt6)
void c_addi4spn(Register rd, int16_t uimm10)
void c_add(Register rd, Register rs2)
void c_sub(Register rd, Register rs2)
void c_xor(Register rd, Register rs2)
void c_mv(Register rd, Register rs2)
void c_fld(FPURegister rd, Register rs1, uint16_t uimm8)
void c_or(Register rd, Register rs2)
void c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8)
void c_lw(Register rd, Register rs1, uint16_t uimm7)
void c_and(Register rd, Register rs2)
void c_swsp(Register rs2, uint16_t uimm8)
void c_slli(Register rd, uint8_t shamt6)
void c_andi(Register rs1, int8_t imm6)
void c_lwsp(Register rd, uint16_t uimm8)
void c_srai(Register rs1, int8_t shamt6)
void c_fldsp(FPURegister rd, uint16_t uimm9)
void c_sw(Register rs2, Register rs1, uint16_t uimm7)
void c_addi(Register rd, int8_t imm6)
void fcvt_wu_d(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void flt_d(Register rd, FPURegister rs1, FPURegister rs2)
void feq_d(Register rd, FPURegister rs1, FPURegister rs2)
void fcvt_w_d(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fle_d(Register rd, FPURegister rs1, FPURegister rs2)
void fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2)
void fcvt_d_w(FPURegister rd, Register rs1, FPURoundingMode frm=RNE)
void fsd(FPURegister source, Register base, int16_t imm12)
void fcvt_d_wu(FPURegister rd, Register rs1, FPURoundingMode frm=RNE)
void fmv_d(FPURegister rd, FPURegister rs)
void fmv_w_x(FPURegister rd, Register rs1)
void feq_s(Register rd, FPURegister rs1, FPURegister rs2)
void fsw(FPURegister source, Register base, int16_t imm12)
void fcvt_s_w(FPURegister rd, Register rs1, FPURoundingMode frm=RNE)
void fcvt_wu_s(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void flt_s(Register rd, FPURegister rs1, FPURegister rs2)
void fsgnj_s(FPURegister rd, FPURegister rs1, FPURegister rs2)
void fmv_x_w(Register rd, FPURegister rs1)
void fle_s(Register rd, FPURegister rs1, FPURegister rs2)
void fcvt_w_s(Register rd, FPURegister rs1, FPURoundingMode frm=RNE)
void fcvt_s_wu(FPURegister rd, Register rs1, FPURoundingMode frm=RNE)
void fmv_s(FPURegister rd, FPURegister rs)
void flw(FPURegister rd, Register rs1, int16_t imm12)
void mv(Register rd, Register rs)
void snez(Register rd, Register rs)
void seqz(Register rd, Register rs)
void srai(Register rd, Register rs1, uint8_t shamt)
void srli(Register rd, Register rs1, uint8_t shamt)
void bleu(Register rs1, Register rs2, int16_t imm13)
void bgtu(Register rs1, Register rs2, int16_t imm13)
void slli(Register rd, Register rs1, uint8_t shamt)
void mulhu(Register rd, Register rs1, Register rs2)
void mulh(Register rd, Register rs1, Register rs2)
void rem(Register rd, Register rs1, Register rs2)
void remu(Register rd, Register rs1, Register rs2)
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2)
void vmv_xs(Register rd, VRegister vs2)
void vfmerge_vf(VRegister vd, FPURegister fs1, VRegister vs2)
void vnot_vv(VRegister dst, VRegister src, MaskType mask=NoMask)
void vfmv_fs(FPURegister fd, VRegister vs2)
void vmv_sx(VRegister vd, Register rs1)
void vmv_vx(VRegister vd, Register rs1)
void czero_nez(Register rd, Register rs1, Register rs2)
void czero_eqz(Register rd, Register rs1, Register rs2)
void csrrci(Register rd, ControlStatusReg csr, uint8_t imm5)
void set(Register rd, VSew sew, Vlmul lmul)
void ld(Register rd, const MemOperand &rs)
void sd(Register rd, const MemOperand &rs)
void fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa)
bool is_near(Label *L, OffsetSize bits)
void addi(Register dst, Register src, const Operand &imm)
void lbu(Register rd, const MemOperand &rs)
void break_(uint32_t code, bool break_as_stop=false)
void fmax_d(FPURegister fd, FPURegister fj, FPURegister fk)
void divw(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void bgeu(Register rj, Register rd, int32_t offset)
void beqz(Register rj, int32_t offset)
void fneg_s(FPURegister fd, FPURegister fj)
void jr(Register target)
void lb(Register rd, const MemOperand &rs)
void jalr(Register rs, Register rd=ra)
void sltiu(Register rd, Register rs, int32_t j)
void bne(Register rj, Register rd, int32_t offset)
void div(Register src)
void negw(Register reg)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
bool is_trampoline_pool_blocked() const
void slti(Register rd, Register rj, int32_t si12)
void auipc(Register rs, int16_t imm16)
void fsub_d(FPURegister fd, FPURegister fj, FPURegister fk)
int InstructionsGeneratedSince(Label *label)
void fmax_s(FPURegister fd, FPURegister fj, FPURegister fk)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void blt(Register rj, Register rd, int32_t offset)
void ble(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void neg(const Register &rd, const Operand &operand)
Simd128Register Simd128Register ra
uint64_t jump_address(Label *L)
void lw(Register rd, const MemOperand &rs)
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data=0)
void jal(int64_t target)
RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode)
static constexpr int kJumpOffsetBits
void EmitConstPoolWithJumpIfNeeded(size_t margin=0)
void xori(Register rd, Register rj, int32_t ui12)
friend class UseScratchRegisterScope
bool MustUseReg(RelocInfo::Mode rmode)
void bltu(Register rj, Register rd, int32_t offset)
void shift(Operand dst, Immediate shift_amount, int subcode, int size)
void clz(Register dst, Register src, Condition cond=al)
void or_(Register dst, int32_t imm32)
void fmin_d(FPURegister fd, FPURegister fj, FPURegister fk)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void sb(Register rd, const MemOperand &rs)
void xor_(Register dst, int32_t imm32)
void andi(Register rd, Register rj, int32_t ui12)
void lh(Register rd, const MemOperand &rs)
void sc_d(Register rd, Register rj, int32_t si14)
void BlockTrampolinePoolFor(int instructions)
void sltu(Register rd, Register rj, Register rk)
void sc_w(Register rd, Register rj, int32_t si14)
void lwu(Register rd, const MemOperand &rs)
void srl(Register rd, Register rt, uint16_t sa)
void fmin_s(FPURegister fd, FPURegister fj, FPURegister fk)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
void fneg_d(FPURegister fd, FPURegister fj)
int32_t branch_offset_helper(Label *L, OffsetSize bits)
void not_(const VRegister &vd, const VRegister &vn)
void bge(Register rj, Register rd, int32_t offset)
void fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa)
void ori(Register rd, Register rj, int32_t ui12)
void lhu(Register rd, const MemOperand &rs)
void sw(Register rd, const MemOperand &rs)
uint64_t branch_long_offset(Label *L)
void AdjustBaseAndOffset(MemOperand *src)
void fadd_s(FPURegister fd, FPURegister fj, FPURegister fk)
void slt(Register rd, Register rj, Register rk)
void fadd_d(FPURegister fd, FPURegister fj, FPURegister fk)
void sh(Register rd, const MemOperand &rs)
bool NeedAdjustBaseAndOffset(const MemOperand &src, OffsetAccessType=OffsetAccessType::SINGLE_ACCESS, int second_Access_add_to_offset=4)
void bgt(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void bnez(Register rj, int32_t offset)
void divu(Register rs, Register rt)
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
void CheckTrampolinePoolQuick(int extra_instructions=0)
int SizeOfCodeGeneratedSince(Label *label)
Instruction * pc() const
void fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa)
void beq(Register rj, Register rd, int32_t offset)
void sra(Register rt, Register rd, uint16_t sa)
void fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa)
static constexpr Builtin RecordWrite(SaveFPRegsMode fp_mode)
static bool IsIsolateIndependentBuiltin(Tagged< Code > code)
Definition builtins.cc:372
V8_EXPORT_PRIVATE Handle< Code > code_handle(Builtin builtin)
Definition builtins.cc:154
static constexpr Builtin RuntimeCEntry(int result_size, bool switch_to_central_stack=false)
static constexpr Builtin EphemeronKeyBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin IndirectPointerBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin CEntry(int result_size, ArgvMode argv_mode, bool builtin_exit_frame=false, bool switch_to_central_stack=false)
static const int kIsTurbofannedBit
Definition code.h:458
static const int kMarkedForDeoptimizationBit
Definition code.h:456
static const int kInvalidContext
Definition contexts.h:578
static V8_INLINE constexpr int SlotOffset(int index)
Definition contexts.h:516
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static constexpr int kCallerSPDisplacement
static V8_EXPORT_PRIVATE ExternalReference address_of_code_pointer_table_base_address()
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr uint32_t kFlagsTieringStateIsAnyRequested
static constexpr uint32_t FlagMaskForNeedsProcessingCheckFrom(CodeKind code_kind)
static constexpr int OffsetOfElementAt(int index)
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static const int kExternalPointerTableBasePointerOffset
static constexpr int BuiltinEntrySlotOffset(Builtin id)
static constexpr int real_jslimit_offset()
static constexpr int jslimit_offset()
static IsolateGroup * current()
Builtins * builtins()
Definition isolate.h:1443
Address BuiltinEntry(Builtin builtin)
static bool IsAddressableThroughRootRegister(Isolate *isolate, const ExternalReference &reference)
V8_INLINE std::string CommentForOffHeapTrampoline(const char *prefix, Builtin builtin)
static int32_t RootRegisterOffsetForExternalReferenceTableEntry(Isolate *isolate, const ExternalReference &reference)
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
Tagged_t ReadOnlyRootPtr(RootIndex index)
void IndirectLoadConstant(Register destination, Handle< HeapObject > object)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
void IndirectLoadExternalReference(Register destination, ExternalReference reference)
void Mul(const Register &rd, const Register &rn, const Register &rm)
void LoadStackLimit(Register destination, StackLimitKind kind)
void GetObjectType(Register function, Register map, Register type_reg)
void Call(Register target, Condition cond=al)
void CallJSFunction(Register function_object, uint16_t argument_count)
void CallDebugOnFunctionCall(Register fun, Register new_target, Register expected_parameter_count, Register actual_parameter_count)
void Round_d(FPURegister fd, FPURegister fj)
void Lbu(Register rd, const MemOperand &rs)
void LoadAddress(Register destination, ExternalReference source)
void BranchAndLinkLong(Label *L, BranchDelaySlot bdslot)
void LoadNBytes(Register rd, const MemOperand &rs, Register scratch)
void LoadFloat(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend=false)
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch)
void AlignedStoreHelper(Reg_T value, const MemOperand &rs, Func generator)
void Clear_if_nan_s(Register rd, FPURegister fs)
void MultiPopFPU(DoubleRegList regs)
void DecompressTaggedSigned(const Register &destination, const MemOperand &field_operand)
void Drop(int count, Condition cond=al)
void Cvt_s_uw(FPURegister fd, FPURegister fs)
void MultiPushFPU(DoubleRegList regs)
void Neg(const Register &rd, const Operand &operand)
void MovFromFloatResult(DwVfpRegister dst)
void Trunc_l_d(FPURegister fd, FPURegister fs)
void Floor_d(FPURegister fd, FPURegister fj)
void Scd(Register rd, const MemOperand &rs)
void Sh(Register rd, const MemOperand &rs)
void SmiUntag(Register reg, SBit s=LeaveCC)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void Ceil_f(VRegister dst, VRegister src, Register scratch, VRegister v_scratch)
void Neg_s(FPURegister fd, FPURegister fj)
void Round_f(VRegister dst, VRegister src, Register scratch, VRegister v_scratch)
void Clz32(Register rd, Register rs)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTagRange tag_range, Register isolate_root=Register::no_reg())
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2, CFRegister cd=FCC0)
void near_call(int offset, RelocInfo::Mode rmode)
void Lb(Register rd, const MemOperand &rs)
void BranchRange(Label *L, Condition cond, Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label::Distance distance=Label::kFar)
void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void SarPair(Register high, Register low, uint8_t imm8)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void PushStandardFrame(Register function_reg)
void BranchFalseF(Label *target, CFRegister cc=FCC0)
void Uld(Register rd, const MemOperand &rs)
void AssertSignExtended(Register int32_register) NOOP_UNLESS_DEBUG_CODE
void UnalignedFStoreHelper(FPURegister frd, const MemOperand &rs)
void CompareRoot(Register obj, RootIndex index)
void MovFromFloatParameter(DwVfpRegister dst)
void LoadLane(NeonSize sz, NeonListOperand dst_list, uint8_t lane, NeonMemOperand src)
void RoundHelper(VRegister dst, VRegister src, Register scratch, VRegister v_scratch, FPURoundingMode frm, bool keep_nan_same=true)
void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot=PROTECT)
void Move(Register dst, Tagged< Smi > smi)
void SmiTst(Register value)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void li_optimized(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void AtomicDecompressTaggedSigned(const Register &destination, const Register &base, const Register &index, const Register &temp)
void StoreReturnAddressAndCall(Register target)
void LoadZeroIfConditionZero(Register dest, Register condition)
void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
int32_t GetOffset(Label *L, OffsetSize bits)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void ReverseBytesHelper(Register rd, Register rs, Register tmp1, Register tmp2)
void LoadRootRelative(Register destination, int32_t offset) final
void JumpIfSmi(Register value, Label *smi_label)
void Round_d_d(FPURegister fd, FPURegister fs)
void JumpIfObjectType(Register object, Register map, Register type_reg, InstanceType type, Label *if_cond_pass, Condition cond=eq)
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void Cvt_s_ul(FPURegister fd, FPURegister fs)
void UnalignedFLoadHelper(FPURegister frd, const MemOperand &rs)
void CallCodeObject(Register code_object)
void LoadSandboxedPointerField(Register destination, MemOperand field_operand)
void UnalignedStoreHelper(Register rd, const MemOperand &rs, Register scratch_other=no_reg)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void Ceil_d_d(FPURegister fd, FPURegister fs)
void Lwu(Register rd, const MemOperand &rs)
int CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void MulOverflow32(Register dst, Register left, const Operand &right, Register overflow, bool sign_extend_inputs=true)
void Ror(const Register &rd, const Register &rs, unsigned shift)
void UStoreFloat(FPURegister fd, const MemOperand &rs)
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
void BranchFalseShortF(Label *target, CFRegister cc=FCC0)
void CompareTaggedRootAndBranch(const Register &with, RootIndex index, Condition cc, Label *target)
void InsertLowWordF64(FPURegister dst, Register src_low)
void NegateBool(Register rd, Register rs)
void AtomicDecompressTagged(const Register &destination, const Register &base, const Register &index, const Register &temp)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void Trunc_w_d(FPURegister fd, FPURegister fs)
void Ulwu(Register rd, const MemOperand &rs)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
bool BranchAndLinkShortCheck(int32_t offset, Label *L, Condition cond, Register rs, const Operand &rt, BranchDelaySlot bdslot)
void LoadFPRImmediate(FPURegister dst, float imm)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Cvt_d_ul(FPURegister fd, FPURegister fs)
void near_jump(int offset, RelocInfo::Mode rmode)
void BranchTrueF(Label *target, CFRegister cc=FCC0)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void DecodeSandboxedPointer(Register value)
void Sd(Register rd, const MemOperand &rs)
void BranchShortHelper(int16_t offset, Label *L, BranchDelaySlot bdslot)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2)
void WasmRvvS128const(VRegister dst, const uint8_t imms[16])
void CompareTaggedRoot(Register with, RootIndex index)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Register scratch, Label *target, Label::Distance distance=Label::kFar, Condition condition=Condition::kUnsignedGreaterThanEqual)
void CompareTaggedAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
bool CanUseNearCallOrJump(RelocInfo::Mode rmode)
void Trunc_s_s(FPURegister fd, FPURegister fs)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode)
void Cvt_s_w(FPURegister fd, Register rs)
void SmiTag(Register reg, SBit s=LeaveCC)
void CompareObjectTypeAndJump(Register heap_object, Register map, Register type_reg, InstanceType type, Condition cond, Label *target, Label::Distance distance)
void SbxCheck(Condition cc, AbortReason reason)
void LoadNBytesOverwritingBaseReg(const MemOperand &rs, Register scratch0, Register scratch1)
void PushArray(Register array, Register size, Register scratch, PushArrayOrder order=PushArrayOrder::kNormal)
void StoreDouble(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void MovToFloatResult(DwVfpRegister src)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void MovToFloatParameter(DwVfpRegister src)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void PushCommonFrame(Register marker_reg=no_reg)
void LoadIndirectPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void Trunc_d(FPURegister fd, FPURegister fj)
void CallIndirectPointerBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode, IndirectPointerTag tag)
void ShlPair(Register high, Register low, uint8_t imm8)
int LeaveFrame(StackFrame::Type type)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void Sw(Register rd, const MemOperand &rs)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
static int InstrCountForLi64Bit(int64_t value)
void ShrPair(Register high, Register low, uint8_t imm8)
void JumpIfMarking(Label *is_marking, Label::Distance condition_met_distance=Label::kFar)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
void BranchAndLinkShortHelper(int16_t offset, Label *L, BranchDelaySlot bdslot)
void Lhu(Register rd, const MemOperand &rs)
void InsertBits(Register dest, Register source, Register pos, int size)
void StoreCodePointerField(Register value, MemOperand dst_field_operand)
void BranchLong(int32_t offset, BranchDelaySlot bdslot=PROTECT)
void ByteSwap(Register dest, Register src, int operand_size)
void Msub_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk)
void GenPCRelativeJumpAndLink(Register rd, int64_t offset)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void Jump(Register target, Condition cond=al)
void Usw(Register rd, const MemOperand &rs)
void LoadRoot(Register destination, RootIndex index) final
void Trunc_d_d(FPURegister fd, FPURegister fs)
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void DecompressProtected(const Register &destination, const MemOperand &field_operand)
void UnalignedLoadHelper(Register rd, const MemOperand &rs)
void Madd_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void ULoadDouble(FPURegister fd, const MemOperand &rs)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Floor_d_d(FPURegister fd, FPURegister fs)
void Round_w_s(Register rd, FPURegister fs, Register result=no_reg)
void Popcnt32(Register dst, Register src)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
void Ceil_w_d(FPURegister fd, FPURegister fs)
void Clear_if_nan_d(Register rd, FPURegister fs)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void Round_s_s(FPURegister fd, FPURegister fs)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void Floor_w_s(Register rd, FPURegister fs, Register result=no_reg)
void LoadZeroIfConditionNotZero(Register dest, Register condition)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void StoreFloat(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void StoreLane(NeonSize sz, NeonListOperand src_list, uint8_t lane, NeonMemOperand dst)
void WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void LoadFromConstantsTable(Register destination, int constant_index) final
void LoadProtectedPointerField(Register destination, MemOperand field_operand)
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void PrepareCEntryFunction(const ExternalReference &ref)
void Cvt_d_w(FPURegister fd, Register rs)
void MaybeSaveRegisters(RegList registers)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void LoadTaggedRoot(Register destination, RootIndex index)
void LoadWordPair(Register rd, const MemOperand &rs, Register scratch=at)
void Lld(Register rd, const MemOperand &rs)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void StoreIndirectPointerField(Register value, MemOperand dst_field_operand)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void Ulhu(Register rd, const MemOperand &rs)
void WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void Lw(Register rd, const MemOperand &rs)
void JumpIfNotMarking(Label *not_marking, Label::Distance condition_met_distance=Label::kFar)
void Neg_d(FPURegister fd, FPURegister fk)
void MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset)
void WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
bool BranchShortCheck(int32_t offset, Label *L, Condition cond, Register rs, const Operand &rt, BranchDelaySlot bdslot)
void LoadCompressedMap(Register dst, Register object)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void CallBuiltin(Builtin builtin, Condition cond=al)
void Ceil_d(FPURegister fd, FPURegister fj)
void Lh(Register rd, const MemOperand &rs)
void Popcnt64(Register dst, Register src)
void CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2)
void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode)
void LoadCodePointerField(Register destination, MemOperand field_operand)
void InsertHighWordF64(FPURegister dst, Register src_high)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void CompareI(Register rd, Register rs, const Operand &rt, Condition cond)
void Xor(Register dst, Register src)
void Cvt_d_uw(FPURegister fd, FPURegister fs)
std::function< void(int)> Trapper
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void LoadDouble(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void CmpTagged(const Register &r1, const Register &r2)
void Check(Condition cond, AbortReason reason)
void Usd(Register rd, const MemOperand &rs)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void Or(Register dst, Register src)
bool CalculateOffset(Label *L, int32_t *offset, OffsetSize bits)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void FloatMinMaxHelper(FPURegister dst, FPURegister src1, FPURegister src2, MaxMinKind kind)
void Sc(Register rd, const MemOperand &rs)
void StoreTrustedPointerField(Register value, MemOperand dst_field_operand)
static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode, uint8_t *pc)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void StoreSandboxedPointerField(Register value, MemOperand dst_field_operand)
void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch, Label *if_marked_for_deoptimization)
Register GetRtAsRegisterHelper(const Operand &rt, Register scratch)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result, TruncFunc trunc)
void AssertZeroExtended(Register int32_register)
void Ll(Register rd, const MemOperand &rs)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void CompareRootAndBranch(const Register &obj, RootIndex index, Condition cc, Label *target, ComparisonMode mode=ComparisonMode::kDefault)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void Floor_f(VRegister dst, VRegister src, Register scratch, VRegister v_scratch)
void AssertRange(Condition cond, AbortReason reason, Register value, Register scratch, unsigned lower_limit, unsigned higher_limit) NOOP_UNLESS_DEBUG_CODE
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Madd_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk)
void StoreWordPair(Register rd, const MemOperand &rs, Register scratch=at)
void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void Sb(Register rd, const MemOperand &rs)
void Branch(Label *label, bool need_link=false)
void Ld(Register rd, const MemOperand &rs)
void UStoreDouble(FPURegister fd, const MemOperand &rs)
void Ceil_w_s(Register rd, FPURegister fs, Register result=no_reg)
void Trunc_w_s(Register rd, FPURegister fs, Register result=no_reg)
void Ceil_s_s(FPURegister fd, FPURegister fs)
void GetInstanceTypeRange(Register map, Register type_reg, InstanceType lower_limit, Register range)
void Ulw(Register rd, const MemOperand &rs)
int CalculateStackPassedDWords(int num_gp_arguments, int num_fp_arguments)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void Ulh(Register rd, const MemOperand &rs)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadIsolateField(Register dst, IsolateFieldId id)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2, CFRegister cd=FCC0)
void BranchTrueShortF(Label *target, CFRegister cc=FCC0)
void MaybeRestoreRegisters(RegList registers)
void MulOverflow64(Register dst, Register left, const Operand &right, Register overflow)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void LoadCompressedTaggedRoot(Register destination, RootIndex index)
void Floor_s_s(FPURegister fd, FPURegister fs)
void WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void TryLoadOptimizedOsrCode(Register scratch_and_result, CodeKind min_opt_level, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void Ctz32(Register rd, Register rs)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void Round_w_d(FPURegister fd, FPURegister fs)
static int SafepointRegisterStackIndex(int reg_code)
void AlignedLoadHelper(Reg_T target, const MemOperand &rs, Func generator)
void WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void SmiUntagField(Register dst, const MemOperand &src)
void StubPrologue(StackFrame::Type type)
void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2)
void CalcScaledAddress(Register rd, Register rt, Register rs, uint8_t sa)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
void Trunc_f(VRegister dst, VRegister src, Register scratch, VRegister v_scratch)
void StoreRootRelative(int32_t offset, Register value) final
void LoadTaggedSignedField(const Register &destination, const MemOperand &field_operand)
void LoadMap(Register destination, Register object)
void AtomicStoreTaggedField(const Register &value, const Register &dst_base, const Register &dst_index, const Register &temp)
void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void AssertStackIsAligned() NOOP_UNLESS_DEBUG_CODE
void TailCallRuntime(Runtime::FunctionId fid)
void Swap(Register srcdst0, Register srcdst1)
void LoadNativeContextSlot(Register dst, int index)
void WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul)
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void Floor_w_d(FPURegister fd, FPURegister fs)
void ULoadFloat(FPURegister fd, const MemOperand &rs)
void Msub_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk)
void Switch(Register scratch, Register value, int case_value_base, Label **labels, int num_labels)
void AssertSmiOrHeapObjectInMainCompressionCage(Register object) NOOP_UNLESS_DEBUG_CODE
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void GenPCRelativeJump(Register rd, int64_t offset)
void Ush(Register rd, const MemOperand &rs, Register scratch)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr intptr_t FlagsOffset()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static constexpr intptr_t GetAlignmentMaskForAssembler()
bool is_reg(Register reg) const
Register rm() const
int32_t immediate() const
constexpr bool is_empty() const
constexpr bool has(RegisterT reg) const
constexpr unsigned Count() const
constexpr storage_t bits() const
constexpr bool is_valid() const
static constexpr FPURegister from_code(int8_t code)
constexpr int8_t code() const
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr bool IsWasmCanonicalSigId(Mode mode)
Definition reloc-info.h:217
static constexpr bool IsCompressedEmbeddedObject(Mode mode)
Definition reloc-info.h:206
static constexpr bool IsCodeTarget(Mode mode)
Definition reloc-info.h:196
static constexpr bool IsJSDispatchHandle(Mode mode)
Definition reloc-info.h:254
static constexpr bool IsWasmCodePointerTableEntry(Mode mode)
Definition reloc-info.h:220
static constexpr bool IsFullEmbeddedObject(Mode mode)
Definition reloc-info.h:203
static constexpr bool IsReadOnly(RootIndex root_index)
Definition roots.h:623
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
Definition runtime.cc:350
bool contains_direct_pointer() const
Definition assembler.h:282
static SlotDescriptor ForCodePointerSlot()
Definition assembler.h:311
IndirectPointerTag indirect_pointer_tag() const
Definition assembler.h:290
bool contains_indirect_pointer() const
Definition assembler.h:286
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > zero()
Definition smi.h:99
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static bool IsJavaScript(Type t)
Definition frames.h:284
V8_EXPORT_PRIVATE bool Enabled()
Definition counters.cc:32
static constexpr int kFixedFrameSizeFromFp
void Include(const Register &reg1, const Register &reg2=no_reg)
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode)
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define V8_ENABLE_LEAPTIERING_BOOL
Definition globals.h:151
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
Handle< String > source_
Definition compiler.cc:3791
bool is_empty
Definition sweeper.cc:229
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
int32_t offset
TNode< Object > target
TNode< Object > receiver
ZoneVector< RpoNumber > & result
LiftoffRegister reg
EmitFn fn
int x
uint32_t const mask
#define SmiWordOffset(offset)
#define T_REGS(V)
#define A_REGS(V)
#define TEST_AND_PUSH_REG(reg)
#define TEST_AND_POP_REG(reg)
#define BRANCH_ARGS_CHECK(cond, rs, rt)
#define S_REGS(V)
ReadOnlyCheck
SmiCheck
ComparisonMode
ArgumentAdaptionMode
InvokeType
SetIsolateDataSlots
JumpMode
RegListBase< RegisterT > registers
InstructionOperand source
InstructionOperand destination
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
signed short int16_t
Definition unicode.cc:38
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
V8_INLINE constexpr std::optional< RootIndex > UniqueMapOfInstanceType(InstanceType type)
constexpr Register no_reg
constexpr int kMinInt
Definition globals.h:375
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr Register kRootRegister
constexpr int kCodePointerTableEntrySizeLog2
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kBitsPerByte
Definition globals.h:682
constexpr unsigned kFloatMantissaBits
const int kFloat64MantissaBits
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
constexpr DoubleRegister kScratchDoubleReg
const int kSmiTagSize
Definition v8-internal.h:87
constexpr bool CodeKindCanTierUp(CodeKind kind)
Definition code-kind.h:95
constexpr Register kJavaScriptCallTargetRegister
constexpr int kCodePointerTableEntryCodeObjectOffset
const int kFloat64ExponentBias
static int InstrCountForLiLower32Bit(int64_t value)
constexpr int kTrustedPointerTableEntrySizeLog2
const Address kWeakHeapObjectMask
Definition globals.h:967
const int kFloat32ExponentBias
constexpr unsigned kFloatExponentBias
constexpr Register kJavaScriptCallArgCountRegister
Address Tagged_t
Definition globals.h:547
constexpr Register kScratchReg2
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
base::StrongAlias< JSDispatchHandleAliasTag, uint32_t > JSDispatchHandle
Definition globals.h:557
constexpr Register kScratchReg
static const int kRegisterPassedArguments
Flag flags[]
Definition flags.cc:3797
constexpr int kUIntptrSize
Definition globals.h:409
constexpr int L
MemOperand FieldMemOperand(Register object, int offset)
constexpr uint8_t kInstrSizeLog2
constexpr DoubleRegister kSingleRegZero
constexpr unsigned kFloatExponentBits
constexpr int kSystemPointerSize
Definition globals.h:410
const char * GetAbortReason(AbortReason reason)
static constexpr int kMaxCParameters
constexpr uint32_t kDebugZapValue
Definition globals.h:1015
constexpr Simd128Register kSimd128ScratchReg
constexpr uint32_t kZapValue
Definition globals.h:1005
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
constexpr bool is_intn(int64_t x, unsigned n)
Definition utils.h:568
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
const DoubleRegList kCallerSavedFPU
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
constexpr uint32_t kTrustedPointerHandleShift
constexpr uint32_t kCodePointerHandleShift
const int kCArgsSlotsSize
const int kHeapObjectTag
Definition v8-internal.h:72
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
@ kExternalPointerNullTag
const int kFloat32ExponentBits
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
const RegList kJSCallerSaved
Definition reglist-arm.h:23
constexpr bool SmiValuesAre32Bits()
const int kFloat32MantissaBits
constexpr Register kJavaScriptCallCodeStartRegister
constexpr int kJSDispatchTableEntrySizeLog2
Definition globals.h:562
constexpr Register kPtrComprCageBaseRegister
const intptr_t kSmiTagMask
Definition v8-internal.h:88
return value
Definition map-inl.h:893
const int kSafepointRegisterStackIndexMap[kNumRegs]
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
constexpr uint8_t kInstrSize
const int kSmiTag
Definition v8-internal.h:86
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
const int kFloat64ExponentBits
constexpr Register cp
constexpr int kMaxInt
Definition globals.h:374
constexpr uint64_t kTrustedPointerTableMarkBit
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register kJavaScriptCallDispatchHandleRegister
constexpr uint32_t kCodePointerHandleMarker
const uint32_t kClearedWeakHeapObjectLower32
Definition globals.h:981
constexpr uint32_t kMaxUInt32
Definition globals.h:387
constexpr Register kJavaScriptCallNewTargetRegister
constexpr int kNumRegisters
constexpr bool PointerCompressionIsEnabled()
static bool IsZero(const Operand &rt)
const int kCArgSlotCount
Local< T > Handle
#define ror(value, bits)
Definition sha-256.cc:30
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001