v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-riscv.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_CODEGEN_RISCV_MACRO_ASSEMBLER_RISCV_H_
6#define V8_CODEGEN_RISCV_MACRO_ASSEMBLER_RISCV_H_
7
8#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
9#error This header must be included via macro-assembler.h
10#endif
11
12#include <optional>
13
18#include "src/common/globals.h"
22
23namespace v8 {
24namespace internal {
25
26#define xlen (uint8_t(sizeof(void*) * 8))
27// Forward declarations.
28enum class AbortReason : uint8_t;
29
30// Reserved Register Usage Summary.
31//
32// Registers t5, t6, and t3 are reserved for use by the MacroAssembler.
33//
34// The programmer should know that the MacroAssembler may clobber these three,
35// but won't touch other registers except in special cases.
36//
37// TODO(RISCV): Cannot find info about this ABI. We chose t6 for now.
38// Per the RISC-V ABI, register t6 must be used for indirect function call
39// via 'jalr t6' or 'jr t6' instructions. This is relied upon by gcc when
40// trying to update gp register for position-independent-code. Whenever
41// RISC-V generated code calls C code, it must be via t6 register.
42
43
44// Flags used for the li macro-assembler function.
45enum LiFlags {
46 // If the constant value can be represented in just 16 bits, then
47 // optimize the li to use a single instruction, rather than lui/ori/slli
48 // sequence. A number of other optimizations that emits less than
49 // maximum number of instructions exists.
50 OPTIMIZE_SIZE = 0,
51 // Always use 8 instructions (lui/addi/slliw sequence), even if the
52 // constant
53 // could be loaded with just one, so that this value is patchable later.
54 CONSTANT_SIZE = 1,
55 // For address loads 8 instruction are required. Used to mark
56 // constant load that will be used as address without relocation
57 // information. It ensures predictable code size, so specific sites
58 // in code are patchable.
59 ADDRESS_LOAD = 2
60};
61
63
64Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
65 Register reg3 = no_reg,
66 Register reg4 = no_reg,
67 Register reg5 = no_reg,
68 Register reg6 = no_reg);
69
70// -----------------------------------------------------------------------------
71// Static helper functions.
72
73#if defined(V8_TARGET_LITTLE_ENDIAN)
74#define SmiWordOffset(offset) (offset + kSystemPointerSize / 2)
75#else
76#define SmiWordOffset(offset) offset
77#endif
78
79// Generate a MemOperand for loading a field from an object.
80inline MemOperand FieldMemOperand(Register object, int offset) {
81 return MemOperand(object, offset - kHeapObjectTag);
82}
83
84// Generate a MemOperand for storing arguments 5..N on the stack
85// when calling CallCFunction().
86// TODO(plind): Currently ONLY used for O32. Should be fixed for
87// n64, and used in RegExp code, and other places
88// with more than 8 arguments.
89inline MemOperand CFunctionArgumentOperand(int index) {
91 // Argument 5 takes the slot just past the four Arg-slots.
92 int offset = (index - 5) * kSystemPointerSize + kCArgsSlotsSize;
93 return MemOperand(sp, offset);
94}
95
97
98class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
99 public:
100 using MacroAssemblerBase::MacroAssemblerBase;
101
102 // Activation support.
104 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
105 // Out-of-line constant pool not implemented on RISC-V.
106 UNREACHABLE();
107 }
109
110 // Generates function and stub prologue code.
112 void Prologue();
113
115 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
116 li(kRootRegister, Operand(isolate_root));
117#ifdef V8_COMPRESS_POINTERS
118 LoadRootRelative(kPtrComprCageBaseRegister,
119 IsolateData::cage_base_offset());
120#endif
121 }
122
124
125 // Jump unconditionally to given label.
126 void jmp(Label* L, Label::Distance distance = Label::kFar) {
127 Branch(L, distance);
128 }
129
130 // -------------------------------------------------------------------------
131 // Debugging.
132
133 void Trap();
135#ifdef USE_SIMULATOR
136 // See src/codegen/riscv/base-constants-riscv.h DebugParameters.
137 void Debug(uint32_t parameters) { break_(parameters, false); }
138#endif
139 // Calls Abort(msg) if the condition cc is not satisfied.
140 // Use --debug_code to enable.
141 void Assert(Condition cc, AbortReason reason, Register rs, Operand rt);
142
143 void AssertJSAny(Register object, Register map_tmp, Register tmp,
144 AbortReason abort_reason);
145
146 // Abort execution if argument is not smi nor in the main pointer
147 // compression cage, enabled via --debug-code.
150
151 // Like Assert(), but always enabled.
152 void Check(Condition cond, AbortReason reason);
153
154 // Like Assert(), but always enabled.
155 void Check(Condition cc, AbortReason reason, Register rs, Operand rt);
156
157 // Same as Check() but expresses that the check is needed for the sandbox.
159
160 // Print a message to stdout and abort execution.
162
163 // Arguments macros.
164#define COND_TYPED_ARGS Condition cond, Register r1, const Operand &r2
165#define COND_ARGS cond, r1, r2
166
167 // Cases when relocation is not needed.
168#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
169 void Name(target_type target); \
170 void Name(target_type target, COND_TYPED_ARGS);
171
172#define DECLARE_BRANCH_PROTOTYPES(Name) \
173 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
174 DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
175
176 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
177 DECLARE_BRANCH_PROTOTYPES(BranchShort)
178
179 void Branch(Label* target);
180 void Branch(int32_t target);
181 void BranchLong(Label* L);
182 void Branch(Label* target, Condition cond, Register r1, const Operand& r2,
183 Label::Distance distance = Label::kFar);
184 void Branch(Label* target, Label::Distance distance) {
185 Branch(target, cc_always, zero_reg, Operand(zero_reg), distance);
186 }
187 void Branch(int32_t target, Condition cond, Register r1, const Operand& r2,
188 Label::Distance distance = Label::kFar);
189 void Branch(Label* L, Condition cond, Register rj, RootIndex index,
190 Label::Distance distance = Label::kFar);
191#undef DECLARE_BRANCH_PROTOTYPES
192#undef COND_TYPED_ARGS
193#undef COND_ARGS
194
195 void BranchRange(Label* L, Condition cond, Register value, Register scratch,
196 unsigned lower_limit, unsigned higher_limit,
197 Label::Distance distance = Label::kFar);
198 void AllocateStackSpace(Register bytes) { SubWord(sp, sp, bytes); }
199
200 void AllocateStackSpace(int bytes) {
201 DCHECK_GE(bytes, 0);
202 if (bytes == 0) return;
203 SubWord(sp, sp, Operand(bytes));
204 }
205
206 inline void NegateBool(Register rd, Register rs) { Xor(rd, rs, 1); }
207
208 // Compare float, if any operand is NaN, result is false except for NE
209 void CompareF32(Register rd, FPUCondition cc, FPURegister cmp1,
210 FPURegister cmp2);
211 // Compare double, if any operand is NaN, result is false except for NE
212 void CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
213 FPURegister cmp2);
214 void CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
215 void CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
216 void CompareIsNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
217 void CompareIsNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
218
219 // Floating point branches
220 void BranchTrueShortF(Register rs, Label* target);
221 void BranchFalseShortF(Register rs, Label* target);
222
223 void BranchTrueF(Register rs, Label* target);
224 void BranchFalseF(Register rs, Label* target);
225
227 const Operand& r2, bool need_link = false);
228 static int InstrCountForLi64Bit(int64_t value);
230 void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
231 // Load int32 in the rd register.
232 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
233 inline void li(Register rd, intptr_t j, LiFlags mode = OPTIMIZE_SIZE) {
234 li(rd, Operand(j), mode);
235 }
236
237 inline void Move(Register output, MemOperand operand) {
238 LoadWord(output, operand);
239 }
240 void li(Register dst, Handle<HeapObject> value,
241 RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
242 void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
243
244 void LoadFromConstantsTable(Register destination, int constant_index) final;
247 void StoreRootRelative(int32_t offset, Register value) final;
248
249 // Operand pointing to an external reference.
250 // May emit code to set up the scratch register. The operand is
251 // only guaranteed to be correct as long as the scratch register
252 // isn't changed.
253 // If the operand is used more than once, use a scratch register
254 // that is guaranteed not to be clobbered.
256 Register scratch);
258 return ExternalReferenceAsOperand(ExternalReference::Create(id), no_reg);
259 }
260 inline void GenPCRelativeJump(Register rd, int32_t imm32) {
261 BlockTrampolinePoolScope block_trampoline_pool(this);
262 DCHECK(is_int32(imm32 + 0x800));
263 int32_t Hi20 = ((imm32 + 0x800) >> 12);
264 int32_t Lo12 = imm32 << 20 >> 20;
265 auipc(rd, Hi20); // Read PC + Hi20 into scratch.
266 jr(rd, Lo12); // jump PC + Hi20 + Lo12
267 }
268
269 inline void GenPCRelativeJumpAndLink(Register rd, int32_t imm32) {
270 BlockTrampolinePoolScope block_trampoline_pool(this);
271 DCHECK(is_int32(imm32 + 0x800));
272 int32_t Hi20 = ((imm32 + 0x800) >> 12);
273 int32_t Lo12 = imm32 << 20 >> 20;
274 auipc(rd, Hi20); // Read PC + Hi20 into scratch.
275 jalr(rd, Lo12); // jump PC + Hi20 + Lo12
276 }
277
278 // Generate a B immediate instruction with the corresponding relocation info.
279 // 'offset' is the immediate to encode in the B instruction (so it is the
280 // difference between the target and the PC of the instruction, divided by
281 // the instruction size).
283 UseScratchRegisterScope temps(this);
284 Register temp = temps.Acquire();
285 if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode, offset);
286 GenPCRelativeJump(temp, offset);
287 }
288 // Generate a auipc+jalr instruction with the corresponding relocation info.
289 // As for near_jump, 'offset' is the immediate to encode in the auipc+jalr
290 // instruction.
292 UseScratchRegisterScope temps(this);
293 Register temp = temps.Acquire();
294 if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode, offset);
295 GenPCRelativeJumpAndLink(temp, offset);
296 }
297 // Generate a BL immediate instruction with the corresponding relocation info
298 // for the input HeapNumberRequest.
300
301// Jump, Call, and Ret pseudo instructions implementing inter-working.
302#define COND_ARGS \
303 Condition cond = al, Register rs = zero_reg, \
304 const Operand &rt = Operand(zero_reg)
305
306 void Jump(Register target, COND_ARGS);
307 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
308 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
309 // Deffer from li, this method save target to the memory, and then load
310 // it to register use ld, it can be used in wasm jump table for concurrent
311 // patching.
312
313 // We should not use near calls or jumps for calls to external references,
314 // since the code spaces are not guaranteed to be close to each other.
316 return rmode != RelocInfo::EXTERNAL_REFERENCE;
317 }
318 static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
319 uint8_t* pc);
320 void PatchAndJump(Address target);
322 void Jump(const ExternalReference& reference);
323 void Call(Register target, COND_ARGS);
324 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
325 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
326 COND_ARGS);
327 void Call(Label* target);
328 void LoadAddress(
329 Register dst, Label* target,
330 RelocInfo::Mode rmode = RelocInfo::INTERNAL_REFERENCE_ENCODED);
331
332 // Load the code entry point from the Code object.
334 Register destination, Register code_object,
335 CodeEntrypointTag tag = kDefaultCodeEntrypointTag);
338 JumpMode jump_mode = JumpMode::kJump);
339
340 // Convenience functions to call/jmp to the code of a JSFunction object.
341 void CallJSFunction(Register function_object, uint16_t argument_count);
342 void JumpJSFunction(Register function_object,
343 JumpMode jump_mode = JumpMode::kJump);
344#ifdef V8_ENABLE_LEAPTIERING
345 void CallJSDispatchEntry(JSDispatchHandle dispatch_handle,
346 uint16_t argument_count);
347#endif
348#ifdef V8_ENABLE_WEBASSEMBLY
349 void ResolveWasmCodePointer(Register target, uint64_t signature_hash);
350 void CallWasmCodePointer(Register target, uint64_t signature_hash,
351 CallJumpMode call_jump_mode = CallJumpMode::kCall);
352 void CallWasmCodePointerNoSignatureCheck(Register target);
353 void LoadWasmCodePointer(Register dst, MemOperand src);
354#endif
355
356 // Load the builtin given by the Smi in |builtin| into the same
357 // register.
358 // Load the builtin given by the Smi in |builtin_index| into |target|.
359 void LoadEntryFromBuiltinIndex(Register builtin_index, Register target);
362 void CallBuiltinByIndex(Register builtin_index, Register target);
363 void CallBuiltin(Builtin builtin);
365 void TailCallBuiltin(Builtin builtin, Condition cond, Register type,
366 Operand range);
367
368 // Generates an instruction sequence s.t. the return address points to the
369 // instruction following the call.
370 // The return address on the stack is used by frame iteration.
372#ifdef V8_TARGET_ARCH_RISCV32
373 // Enforce platform specific stack alignment.
374 void EnforceStackAlignment();
375#endif
377 void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
379 Label* jump_deoptimization_entry_label);
380
382
383 // Emit code to discard a non-negative number of pointer-sized elements
384 // from the stack, clobbering only the sp register.
385 void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
386 const Operand& op = Operand(no_reg));
387
388 // Trivial case of DropAndRet that only emits 2 instructions.
389 void DropAndRet(int drop);
390
391 void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
392
393 void push(Register src) {
394 AddWord(sp, sp, Operand(-kSystemPointerSize));
395 StoreWord(src, MemOperand(sp, 0));
396 }
397 void Push(Register src) { push(src); }
399 void Push(Tagged<Smi> smi);
401
402 private:
403 template <typename... Rs>
404 void push_helper(Register r, Rs... rs) {
405 StoreWord(r, MemOperand(sp, sizeof...(rs) * kSystemPointerSize));
406 push_helper(rs...);
407 }
408
409 template <>
411 StoreWord(r, MemOperand(sp, 0));
412 }
413
414 public:
415 // Push a number of registers. The leftmost register first (to the highest
416 // address).
417 template <typename... Rs>
418 void Push(Register r, Rs... rs) {
419 SubWord(sp, sp, (sizeof...(rs) + 1) * kSystemPointerSize);
420 push_helper(r, rs...);
421 }
422
423 void Push(Register src, Condition cond, Register tst1, Register tst2) {
424 // Since we don't have conditional execution we use a Branch.
425 Branch(3, cond, tst1, Operand(tst2));
426 SubWord(sp, sp, Operand(kSystemPointerSize));
427 StoreWord(src, MemOperand(sp, 0));
428 }
429
430 enum PushArrayOrder { kNormal, kReverse };
431 void PushArray(Register array, Register size, PushArrayOrder order = kNormal);
432
433 // Caution: if {value} is a 32-bit negative int, it should be sign-extended
434 // to 64-bit before calling this function.
435 void Switch(Register scratch, Register value, int case_value_base,
436 Label** labels, int num_labels);
437
440
442 SaveFPRegsMode fp_mode);
444 SaveFPRegsMode fp_mode,
447 Register object, Operand offset, SaveFPRegsMode fp_mode,
448 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
450 Register object, Register slot_address, SaveFPRegsMode fp_mode,
451 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
452
453 // For a given |object| and |offset|:
454 // - Move |object| to |dst_object|.
455 // - Compute the address of the slot pointed to by |offset| in |object| and
456 // write it to |dst_slot|.
457 // This method makes sure |object| and |offset| are allowed to overlap with
458 // the destination registers.
459 void MoveObjectAndSlot(Register dst_object, Register dst_slot,
460 Register object, Operand offset);
461
462 // These PushAll/PopAll respect the order of the registers in the stack from
463 // low index to high.
465 if (registers.is_empty()) return;
466 ASM_CODE_COMMENT(this);
467 // TODO(victorgomes): pushes/pops registers in the opposite order
468 // as expected by Maglev frame. Consider massaging Maglev to accept this
469 // order instead.
470 int16_t num_to_push = registers.Count();
471 int16_t stack_offset = num_to_push * kSystemPointerSize;
472
473 SubWord(sp, sp, Operand(stack_offset));
474 for (int16_t i = 0; i < kNumRegisters; i++) {
475 if ((registers.bits() & (1 << i)) != 0) {
476 stack_offset -= kSystemPointerSize;
477 StoreWord(Register::from_code(i), MemOperand(sp, stack_offset));
478 }
479 }
480 }
481
483 if (registers.is_empty()) return;
484 int16_t stack_offset = 0;
485 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
486 if ((registers.bits() & (1 << i)) != 0) {
487 LoadWord(Register::from_code(i), MemOperand(sp, stack_offset));
488 stack_offset += kSystemPointerSize;
489 }
490 }
491 addi(sp, sp, stack_offset);
492 }
493
494 void PushAll(DoubleRegList registers, int stack_slot_size = kDoubleSize) {
495 DCHECK_EQ(stack_slot_size, kDoubleSize);
496 int16_t num_to_push = registers.Count();
497 int16_t stack_offset = num_to_push * kDoubleSize;
498
499 SubWord(sp, sp, Operand(stack_offset));
500 for (int16_t i = 0; i < kNumRegisters; i++) {
501 if ((registers.bits() & (1 << i)) != 0) {
502 stack_offset -= kDoubleSize;
503 StoreDouble(FPURegister::from_code(i), MemOperand(sp, stack_offset));
504 }
505 }
506 }
507
508 void PopAll(DoubleRegList registers, int stack_slot_size = kDoubleSize) {
509 DCHECK_EQ(stack_slot_size, kDoubleSize);
510 int16_t stack_offset = 0;
511 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
512 if ((registers.bits() & (1 << i)) != 0) {
513 LoadDouble(FPURegister::from_code(i), MemOperand(sp, stack_offset));
514 stack_offset += kDoubleSize;
515 }
516 }
517 addi(sp, sp, stack_offset);
518 }
519
520 // Push multiple registers on the stack.
521 // Registers are saved in numerical order, with higher numbered registers
522 // saved in higher memory addresses.
523 void MultiPush(RegList regs);
525
526 // Calculate how much stack space (in bytes) are required to store caller
527 // registers excluding those specified in the arguments.
529 Register exclusion1 = no_reg,
530 Register exclusion2 = no_reg,
531 Register exclusion3 = no_reg) const;
532
533 // Push caller saved registers on the stack, and return the number of bytes
534 // stack pointer is adjusted.
535 int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
536 Register exclusion2 = no_reg,
537 Register exclusion3 = no_reg);
538 // Restore caller saved registers from the stack, and return the number of
539 // bytes stack pointer is adjusted.
540 int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
541 Register exclusion2 = no_reg,
542 Register exclusion3 = no_reg);
543
544 void pop(Register dst) {
545 LoadWord(dst, MemOperand(sp, 0));
546 AddWord(sp, sp, Operand(kSystemPointerSize));
547 }
548 void Pop(Register dst) { pop(dst); }
549
550 private:
551 template <typename... Rs>
552 void pop_helper(Register r, Rs... rs) {
553 pop_helper(rs...);
554 LoadWord(r, MemOperand(sp, sizeof...(rs) * kSystemPointerSize));
555 }
556
557 template <>
559 LoadWord(r, MemOperand(sp, 0));
560 }
561
562 public:
563 // Pop a number of registers. The leftmost register last (from the highest
564 // address).
565 template <typename... Rs>
566 void Pop(Register r, Rs... rs) {
567 pop_helper(r, rs...);
568 AddWord(sp, sp, (sizeof...(rs) + 1) * kSystemPointerSize);
569 }
570
571 void Pop(uint32_t count = 1) {
572 AddWord(sp, sp, Operand(count * kSystemPointerSize));
573 }
574
575 // Pops multiple values from the stack and load them in the
576 // registers specified in regs. Pop order is the opposite as in MultiPush.
577 void MultiPop(RegList regs);
579
580#define DEFINE_INSTRUCTION(instr) \
581 void instr(Register rd, Register rs, const Operand& rt); \
582 void instr(Register rd, Register rs, Register rt) { \
583 instr(rd, rs, Operand(rt)); \
584 } \
585 void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); }
586
587#define DEFINE_INSTRUCTION2(instr) \
588 void instr(Register rs, const Operand& rt); \
589 void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
590 void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
591
592#define DEFINE_INSTRUCTION3(instr) void instr(Register rd, intptr_t imm);
593
594 DEFINE_INSTRUCTION(AddWord)
595 DEFINE_INSTRUCTION(SubWord)
596 DEFINE_INSTRUCTION(SllWord)
597 DEFINE_INSTRUCTION(SrlWord)
598 DEFINE_INSTRUCTION(SraWord)
599#if V8_TARGET_ARCH_RISCV64
600 DEFINE_INSTRUCTION(Add32)
601 DEFINE_INSTRUCTION(Add64)
602 DEFINE_INSTRUCTION(Div32)
603 DEFINE_INSTRUCTION(Divu32)
604 DEFINE_INSTRUCTION(Divu64)
605 DEFINE_INSTRUCTION(Mod32)
606 DEFINE_INSTRUCTION(Modu32)
607 DEFINE_INSTRUCTION(Div64)
608 DEFINE_INSTRUCTION(Sub32)
609 DEFINE_INSTRUCTION(Sub64)
610 DEFINE_INSTRUCTION(Mod64)
611 DEFINE_INSTRUCTION(Modu64)
612 DEFINE_INSTRUCTION(Mul32)
613 DEFINE_INSTRUCTION(Mulh32)
614 DEFINE_INSTRUCTION(Mul64)
615 DEFINE_INSTRUCTION(Mulh64)
616 DEFINE_INSTRUCTION(Mulhu64)
619 DEFINE_INSTRUCTION2(Divu32)
620 DEFINE_INSTRUCTION2(Divu64)
621 DEFINE_INSTRUCTION(Sll64)
622 DEFINE_INSTRUCTION(Sra64)
623 DEFINE_INSTRUCTION(Srl64)
625#elif V8_TARGET_ARCH_RISCV32
626 DEFINE_INSTRUCTION(Add32)
631 DEFINE_INSTRUCTION(Sub32)
633 DEFINE_INSTRUCTION(Mul32)
637#endif
643
654 DEFINE_INSTRUCTION(Sll32)
655 DEFINE_INSTRUCTION(Sra32)
656 DEFINE_INSTRUCTION(Srl32)
657
660
662
665
666#undef DEFINE_INSTRUCTION
667#undef DEFINE_INSTRUCTION2
668#undef DEFINE_INSTRUCTION3
669
670 void Amosub_w(bool aq, bool rl, Register rd, Register rs1, Register rs2) {
671 UseScratchRegisterScope temps(this);
672 Register temp = temps.Acquire();
673 sub(temp, zero_reg, rs2);
674 amoadd_w(aq, rl, rd, rs1, temp);
675 }
676
677 // Convert smi to word-size sign-extended value.
678 void SmiUntag(Register dst, const MemOperand& src);
679 void SmiUntag(Register dst, Register src) {
680#if V8_TARGET_ARCH_RISCV64
683 sraiw(dst, src, kSmiShift);
684 } else {
685 srai(dst, src, kSmiShift);
686 }
687#elif V8_TARGET_ARCH_RISCV32
689 srai(dst, src, kSmiShift);
690#endif
691 }
692
694 // Convert smi to 32-bit value.
697
698 // Enabled via --debug-code.
700 AbortReason reason = AbortReason::kOperandIsASmi);
701 void AssertSmi(Register object,
702 AbortReason reason = AbortReason::kOperandIsASmi);
703
704 // Abort execution if a 64 bit register containing a 32 bit payload does
705 // not have zeros in the top 32 bits, enabled via --debug-code.
707
708 // Abort execution if a 64 bit register containing a 32 bit payload does
709 // not have all zeros or all ones in the top 32 bits, enabled via
710 // --debug-code.
712
713 void AssertRange(Condition cond, AbortReason reason, Register value,
714 Register scratch, unsigned lower_limit,
715 unsigned higher_limit) NOOP_UNLESS_DEBUG_CODE;
716
717 int CalculateStackPassedDWords(int num_gp_arguments, int num_fp_arguments);
718
719 // Before calling a C-function from generated code, align arguments on stack.
720 // After aligning the frame, non-register arguments must be stored on the
721 // stack, using helper: CFunctionArgumentOperand().
722 // The argument count assumes all arguments are word sized.
723 // Some compilers/platforms require the stack to be aligned when calling
724 // C++ code.
725 // Needs a scratch register to do some arithmetic. This register will be
726 // trashed.
727 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
728 Register scratch);
729 void PrepareCallCFunction(int num_reg_arguments, Register scratch);
730
731 // Arguments 1-8 are placed in registers a0 through a7 respectively.
732 // Arguments 9..n are stored to stack
733
734 // Calls a C function and cleans up the space for arguments allocated
735 // by PrepareCallCFunction. The called function is not allowed to trigger a
736 // garbage collection, since that might move the code and invalidate the
737 // return address (unless this is somehow accounted for by the called
738 // function).
740 ExternalReference function, int num_arguments,
741 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
742 Label* return_location = nullptr);
744 Register function, int num_arguments,
745 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
746 Label* return_location = nullptr);
748 ExternalReference function, int num_reg_arguments,
749 int num_double_arguments,
750 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
751 Label* return_location = nullptr);
753 Register function, int num_reg_arguments, int num_double_arguments,
754 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
755 Label* return_location = nullptr);
758
759 // These functions abstract parameter passing for the three different ways
760 // we call C functions from generated code.
764
765 // See comments at the beginning of Builtins::Generate_CEntry.
766 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
767 inline void PrepareCEntryFunction(const ExternalReference& ref) {
768 li(a1, ref);
769 }
770
771 void CheckPageFlag(Register object, int mask, Condition cc,
772 Label* condition_met);
773
774 void CheckPageFlag(const Register& object, Register scratch, int mask,
775 Condition cc, Label* condition_met) {
776 CheckPageFlag(object, mask, cc, condition_met);
777 }
778#undef COND_ARGS
779
780 // Performs a truncating conversion of a floating point number as used by
781 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
782 // Exits with 'result' holding the answer.
784 DoubleRegister double_input, StubCallMode stub_mode);
785
786 void CompareI(Register rd, Register rs, const Operand& rt, Condition cond);
787
790
792 if (CpuFeatures::IsSupported(ZBB)) {
793 sextb(rd, rs);
794 } else {
795 slli(rd, rs, xlen - 8);
796 srai(rd, rd, xlen - 8);
797 }
798 }
799
801 if (CpuFeatures::IsSupported(ZBB)) {
802 sexth(rd, rs);
803 } else {
804 slli(rd, rs, xlen - 16);
805 srai(rd, rd, xlen - 16);
806 }
807 }
808
809 void Clz32(Register rd, Register rs);
810 void Ctz32(Register rd, Register rs);
811 void Popcnt32(Register rd, Register rs, Register scratch);
812
813#if V8_TARGET_ARCH_RISCV64
814 void SignExtendWord(Register rd, Register rs) { sext_w(rd, rs); }
815 void ZeroExtendWord(Register rd, Register rs) {
816 if (CpuFeatures::IsSupported(ZBA)) {
817 zextw(rd, rs);
818 } else {
819 slli(rd, rs, 32);
820 srli(rd, rd, 32);
821 }
822 }
823 void Popcnt64(Register rd, Register rs, Register scratch);
824 void Ctz64(Register rd, Register rs);
825 void Clz64(Register rd, Register rs);
826#elif V8_TARGET_ARCH_RISCV32
827 void AddPair(Register dst_low, Register dst_high, Register left_low,
828 Register left_high, Register right_low, Register right_high,
829 Register scratch1, Register scratch2);
830
831 void SubPair(Register dst_low, Register dst_high, Register left_low,
832 Register left_high, Register right_low, Register right_high,
833 Register scratch1, Register scratch2);
834
835 void AndPair(Register dst_low, Register dst_high, Register left_low,
836 Register left_high, Register right_low, Register right_high);
837
838 void OrPair(Register dst_low, Register dst_high, Register left_low,
839 Register left_high, Register right_low, Register right_high);
840
841 void XorPair(Register dst_low, Register dst_high, Register left_low,
842 Register left_high, Register right_low, Register right_high);
843
844 void MulPair(Register dst_low, Register dst_high, Register left_low,
845 Register left_high, Register right_low, Register right_high,
846 Register scratch1, Register scratch2);
847
848 void ShlPair(Register dst_low, Register dst_high, Register src_low,
849 Register src_high, Register shift, Register scratch1,
850 Register scratch2);
851 void ShlPair(Register dst_low, Register dst_high, Register src_low,
852 Register src_high, int32_t shift, Register scratch1,
853 Register scratch2);
854
855 void ShrPair(Register dst_low, Register dst_high, Register src_low,
856 Register src_high, Register shift, Register scratch1,
857 Register scratch2);
858
859 void ShrPair(Register dst_low, Register dst_high, Register src_low,
860 Register src_high, int32_t shift, Register scratch1,
861 Register scratch2);
862
863 void SarPair(Register dst_low, Register dst_high, Register src_low,
864 Register src_high, Register shift, Register scratch1,
865 Register scratch2);
866 void SarPair(Register dst_low, Register dst_high, Register src_low,
867 Register src_high, int32_t shift, Register scratch1,
868 Register scratch2);
869#endif
870
871 // Bit field starts at bit pos and extending for size bits is extracted from
872 // rs and stored zero/sign-extended and right-justified in rt
873 void ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size,
874 bool sign_extend = false);
875 void ExtractBits(Register dest, Register source, Register pos, int size,
876 bool sign_extend = false) {
877 sra(dest, source, pos);
878 ExtractBits(dest, dest, 0, size, sign_extend);
879 }
880
881 // Insert bits [0, size) of source to bits [pos, pos+size) of dest
882 void InsertBits(Register dest, Register source, Register pos, int size);
883
886
887 // Change endianness
888 void ByteSwap(Register dest, Register src, int operand_size,
889 Register scratch = no_reg);
890
891 // helper function for bytes reverse
892 template <int NBYTES>
893 void ReverseBytesHelper(Register rd, Register rs, Register tmp1,
894 Register tmp2);
895
896 void Clear_if_nan_d(Register rd, FPURegister fs);
897 void Clear_if_nan_s(Register rd, FPURegister fs);
898 // Convert single to unsigned word.
899 void Trunc_uw_s(Register rd, FPURegister fs, Register result = no_reg);
900
901 // helper functions for unaligned load/store
902 template <int NBYTES, bool IS_SIGNED>
903 void UnalignedLoadHelper(Register rd, const MemOperand& rs);
904 template <int NBYTES>
905 void UnalignedStoreHelper(Register rd, const MemOperand& rs,
906 Register scratch_other = no_reg);
907
908 template <int NBYTES>
910 template <int NBYTES>
912#if V8_TARGET_ARCH_RISCV32
913 void UnalignedDoubleHelper(FPURegister frd, const MemOperand& rs);
914 void UnalignedDStoreHelper(FPURegister frd, const MemOperand& rs);
915#endif
916
917 template <typename Reg_T, typename Func>
918 void AlignedLoadHelper(Reg_T target, const MemOperand& rs, Func generator);
919 template <typename Reg_T, typename Func>
920 void AlignedStoreHelper(Reg_T value, const MemOperand& rs, Func generator);
921
922 template <int NBYTES, bool LOAD_SIGNED>
923 void LoadNBytes(Register rd, const MemOperand& rs, Register scratch);
924 template <int NBYTES, bool LOAD_SIGNED>
925 void LoadNBytesOverwritingBaseReg(const MemOperand& rs, Register scratch0,
926 Register scratch1);
927 // load/store macros
928 void Ulh(Register rd, const MemOperand& rs);
929 void Ulhu(Register rd, const MemOperand& rs);
930 void Ush(Register rd, const MemOperand& rs);
931
932 void Ulw(Register rd, const MemOperand& rs);
933 void Usw(Register rd, const MemOperand& rs);
934
935 void Uld(Register rd, const MemOperand& rs);
936 void Usd(Register rd, const MemOperand& rs);
937
938 void ULoadFloat(FPURegister fd, const MemOperand& rs);
939 void UStoreFloat(FPURegister fd, const MemOperand& rs);
940
941 void ULoadDouble(FPURegister fd, const MemOperand& rs);
942 void UStoreDouble(FPURegister fd, const MemOperand& rs);
943
944 using Trapper = std::function<void(int)>;
945
946 void Lb(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
947 void Lbu(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
948 void Sb(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
949
950 void Lh(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
951 void Lhu(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
952 void Sh(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
953
954 void Lw(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
955 void Sw(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
956
957#if V8_TARGET_ARCH_RISCV64
958 void Ulwu(Register rd, const MemOperand& rs);
959 void Lwu(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
960 void Ld(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
961 void Sd(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
962 void Lld(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
963 void Scd(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
964
965 inline void Load32U(Register rd, const MemOperand& rs,
966 Trapper&& trapper = [](int){}) {
967 Lwu(rd, rs, std::forward<Trapper>(trapper));
968 }
969 inline void LoadWord(Register rd, const MemOperand& rs,
970 Trapper&& trapper = [](int){}) {
971 Ld(rd, rs, std::forward<Trapper>(trapper));
972 }
973 inline void StoreWord(Register rd, const MemOperand& rs,
974 Trapper&& trapper = [](int){}) {
975 Sd(rd, rs, std::forward<Trapper>(trapper));
976 }
977#elif V8_TARGET_ARCH_RISCV32
978 inline void Load32U(
979 Register rd, const MemOperand& rs, Trapper&& trapper = [](int){}) {
980 Lw(rd, rs, std::forward<Trapper>(trapper));
981 }
982 inline void LoadWord(
983 Register rd, const MemOperand& rs, Trapper&& trapper = [](int){}) {
984 Lw(rd, rs, std::forward<Trapper>(trapper));
985 }
986 inline void StoreWord(
987 Register rd, const MemOperand& rs, Trapper&& trapper = [](int){}) {
988 Sw(rd, rs, std::forward<Trapper>(trapper));
989 }
990#endif
991 void LoadFloat(
992 FPURegister fd, const MemOperand& src, Trapper&& trapper = [](int){});
993 void StoreFloat(
994 FPURegister fs, const MemOperand& dst, Trapper&& trapper = [](int){});
995
996 void LoadDouble(
997 FPURegister fd, const MemOperand& src, Trapper&& trapper = [](int){});
998 void StoreDouble(
999 FPURegister fs, const MemOperand& dst, Trapper&& trapper = [](int){});
1000
1001 void Ll(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
1002 void Sc(Register rd, const MemOperand& rs, Trapper&& trapper = [](int){});
1003
1004 void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2);
1005 void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2);
1006 void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2);
1007 void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2);
1008 template <typename F>
1009 void FloatMinMaxHelper(FPURegister dst, FPURegister src1, FPURegister src2,
1010 MaxMinKind kind);
1011
1012 bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
1013 bool IsSingleZeroRegSet() { return has_single_zero_reg_set_; }
1014
1015 inline void MoveIfZero(Register rd, Register rj, Register rk) {
1016 CHECK(CpuFeatures::IsSupported(ZICOND));
1017 UseScratchRegisterScope temps(this);
1018 Register scratch = temps.Acquire();
1019 czero_nez(scratch, rj, rk);
1020 czero_eqz(rd, rd, rk);
1021 or_(rd, rd, scratch);
1022 }
1023
1024 inline void Move(Register dst, Tagged<Smi> smi) { li(dst, Operand(smi)); }
1025
1026 inline void Move(Register dst, Register src) {
1027 if (dst != src) {
1028 mv(dst, src);
1029 }
1030 }
1031
1032 inline void MoveDouble(FPURegister dst, FPURegister src) {
1033 if (dst != src) fmv_d(dst, src);
1034 }
1035
1036 inline void MoveFloat(FPURegister dst, FPURegister src) {
1037 if (dst != src) fmv_s(dst, src);
1038 }
1039
1040 inline void Move(FPURegister dst, FPURegister src) { MoveDouble(dst, src); }
1041
1042#if V8_TARGET_ARCH_RISCV64
1043 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
1044 fmv_x_d(dst_high, src);
1045 fmv_x_w(dst_low, src);
1046 srli(dst_high, dst_high, 32);
1047 }
1048
1049 inline void Move(Register dst, FPURegister src) { fmv_x_d(dst, src); }
1050
1051 inline void Move(FPURegister dst, Register src) { fmv_d_x(dst, src); }
1052#elif V8_TARGET_ARCH_RISCV32
1053 inline void Move(Register dst, FPURegister src) { fmv_x_w(dst, src); }
1054
1055 inline void Move(FPURegister dst, Register src) { fmv_w_x(dst, src); }
1056#endif
1057
1058 // Extract sign-extended word from high-half of FPR to GPR
1059 inline void ExtractHighWordFromF64(Register dst_high, FPURegister src) {
1060#if V8_TARGET_ARCH_RISCV64
1061 fmv_x_d(dst_high, src);
1062 srai(dst_high, dst_high, 32);
1063#elif V8_TARGET_ARCH_RISCV32
1064 // todo(riscv32): delete storedouble
1065 AddWord(sp, sp, Operand(-8));
1066 StoreDouble(src, MemOperand(sp, 0));
1067 Lw(dst_high, MemOperand(sp, 4));
1068 AddWord(sp, sp, Operand(8));
1069#endif
1070 }
1071
1072 // Insert low-word from GPR (src_high) to the high-half of FPR (dst)
1073 void InsertHighWordF64(FPURegister dst, Register src_high);
1074
1075 // Extract sign-extended word from low-half of FPR to GPR
1076 inline void ExtractLowWordFromF64(Register dst_low, FPURegister src) {
1077 fmv_x_w(dst_low, src);
1078 }
1079
1080 // Insert low-word from GPR (src_high) to the low-half of FPR (dst)
1081 void InsertLowWordF64(FPURegister dst, Register src_low);
1082
1083 void LoadFPRImmediate(FPURegister dst, float imm) {
1084 LoadFPRImmediate(dst, base::bit_cast<uint32_t>(imm));
1085 }
1086 void LoadFPRImmediate(FPURegister dst, double imm) {
1087 LoadFPRImmediate(dst, base::bit_cast<uint64_t>(imm));
1088 }
1089 void LoadFPRImmediate(FPURegister dst, uint32_t src);
1090 void LoadFPRImmediate(FPURegister dst, uint64_t src);
1091#if V8_TARGET_ARCH_RISCV64
1092 // AddOverflow64 sets overflow register to a negative value if
1093 // overflow occured, otherwise it is zero or positive
1094 void AddOverflow64(Register dst, Register left, const Operand& right,
1095 Register overflow);
1096 // SubOverflow64 sets overflow register to a negative value if
1097 // overflow occured, otherwise it is zero or positive
1098 void SubOverflow64(Register dst, Register left, const Operand& right,
1099 Register overflow);
1100 // MIPS-style 32-bit unsigned mulh
1101 void Mulhu32(Register dst, Register left, const Operand& right,
1102 Register left_zero, Register right_zero);
1103#elif V8_TARGET_ARCH_RISCV32
1104 // AddOverflow sets overflow register to a negative value if
1105 // overflow occured, otherwise it is zero or positive
1106 void AddOverflow(Register dst, Register left, const Operand& right,
1107 Register overflow);
1108 // SubOverflow sets overflow register to a negative value if
1109 // overflow occured, otherwise it is zero or positive
1110 void SubOverflow(Register dst, Register left, const Operand& right,
1111 Register overflow);
1112 // MIPS-style 32-bit unsigned mulh
1113 void Mulhu(Register dst, Register left, const Operand& right,
1114 Register left_zero, Register right_zero);
1115#endif
1116 // MulOverflow32 sets overflow register to zero if no overflow occured
1117 void MulOverflow32(Register dst, Register left, const Operand& right,
1118 Register overflow, bool sign_extend_inputs = true);
1119 // MulOverflow64 sets overflow register to zero if no overflow occured
1120 void MulOverflow64(Register dst, Register left, const Operand& right,
1121 Register overflow);
1122 // Number of instructions needed for calculation of switch table entry address
1123 static const int kSwitchTablePrologueSize = 6;
1124
1125 // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
1126 // functor/function with 'Label *func(size_t index)' declaration.
1127 template <typename Func>
1128 void GenerateSwitchTable(Register index, size_t case_count,
1129 Func GetLabelFunction);
1130
1131 // Load an object from the root table.
1134 void LoadCompressedTaggedRoot(Register destination, RootIndex index);
1135
1137
1138 void LoadFeedbackVector(Register dst, Register closure, Register scratch,
1139 Label* fbv_undef);
1141
1142 // If the value is a NaN, canonicalize the value else, do nothing.
1144
1145 // ---------------------------------------------------------------------------
1146 // FPU macros. These do not handle special cases like NaN or +- inf.
1147
1148 // Convert unsigned word to double.
1150
1151 // convert signed word to double.
1152 void Cvt_d_w(FPURegister fd, Register rs);
1153
1154 // Convert unsigned long to double.
1156
1157 // Convert unsigned word to float.
1159
1160 // convert signed word to float.
1161 void Cvt_s_w(FPURegister fd, Register rs);
1162
1163 // Convert unsigned long to float.
1165
1166 // Convert double to unsigned word.
1167 void Trunc_uw_d(Register rd, FPURegister fs, Register result = no_reg);
1168
1169 // Convert double to signed word.
1170 void Trunc_w_d(Register rd, FPURegister fs, Register result = no_reg);
1171
1172 // Convert single to signed word.
1173 void Trunc_w_s(Register rd, FPURegister fs, Register result = no_reg);
1174#if V8_TARGET_ARCH_RISCV64
1175 // Convert double to unsigned long.
1176 void Trunc_ul_d(Register rd, FPURegister fs, Register result = no_reg);
1177
1178 // Convert singled to signed long.
1179 void Trunc_l_d(Register rd, FPURegister fs, Register result = no_reg);
1180
1181 // Convert single to unsigned long.
1182 void Trunc_ul_s(Register rd, FPURegister fs, Register result = no_reg);
1183
1184 // Convert singled to signed long.
1185 void Trunc_l_s(Register rd, FPURegister fs, Register result = no_reg);
1186
1187 // Round double functions
1188 void Trunc_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
1189 void Round_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
1190 void Floor_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
1191 void Ceil_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
1192#endif
1193 // Round single to signed word.
1194 void Round_w_s(Register rd, FPURegister fs, Register result = no_reg);
1195
1196 // Round double to signed word.
1197 void Round_w_d(Register rd, FPURegister fs, Register result = no_reg);
1198
1199 // Ceil single to signed word.
1200 void Ceil_w_s(Register rd, FPURegister fs, Register result = no_reg);
1201
1202 // Ceil double to signed word.
1203 void Ceil_w_d(Register rd, FPURegister fs, Register result = no_reg);
1204
1205 // Floor single to signed word.
1206 void Floor_w_s(Register rd, FPURegister fs, Register result = no_reg);
1207
1208 // Floor double to signed word.
1209 void Floor_w_d(Register rd, FPURegister fs, Register result = no_reg);
1210
1211 // Round float functions
1212 void Trunc_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
1213 void Round_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
1214 void Floor_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
1215 void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
1216
1217 void Ceil_f(VRegister dst, VRegister src, Register scratch,
1218 VRegister v_scratch);
1219
1220 void Ceil_d(VRegister dst, VRegister src, Register scratch,
1221 VRegister v_scratch);
1222
1223 void Floor_f(VRegister dst, VRegister src, Register scratch,
1224 VRegister v_scratch);
1225 void Floor_d(VRegister dst, VRegister src, Register scratch,
1226 VRegister v_scratch);
1227 void Trunc_f(VRegister dst, VRegister src, Register scratch,
1228 VRegister v_scratch);
1229 void Trunc_d(VRegister dst, VRegister src, Register scratch,
1230 VRegister v_scratch);
1231 void Round_f(VRegister dst, VRegister src, Register scratch,
1232 VRegister v_scratch);
1233 void Round_d(VRegister dst, VRegister src, Register scratch,
1234 VRegister v_scratch);
1235 // -------------------------------------------------------------------------
1236 // Smi utilities.
1237
1238 void SmiTag(Register dst, Register src) {
1239 static_assert(kSmiTag == 0);
1240#if V8_TARGET_ARCH_RISCV64
1241 if (SmiValuesAre32Bits()) {
1242 // Smi goes to upper 32
1243 slli(dst, src, 32);
1244 } else {
1246 // Smi is shifted left by 1
1247 Add32(dst, src, src);
1248 }
1249#elif V8_TARGET_ARCH_RISCV32
1250
1252 // Smi is shifted left by 1
1253 slli(dst, src, kSmiShift);
1254#endif
1255 }
1256
1258
1259 // Jump the register contains a smi.
1260 void JumpIfSmi(Register value, Label* smi_label,
1261 Label::Distance distance = Label::kFar);
1262
1263 // AssembleArchBinarySearchSwitchRange Use JumpIfEqual and JumpIfLessThan.
1264 // In V8_COMPRESS_POINTERS, the compare is done with the lower 32 bits of the
1265 // input.
1266 void JumpIfEqual(Register a, int32_t b, Label* dest) {
1267#ifdef V8_COMPRESS_POINTERS
1268 Sll32(a, a, 0);
1269#endif
1270 Branch(dest, eq, a, Operand(b));
1271 }
1272
1273 void JumpIfLessThan(Register a, int32_t b, Label* dest) {
1274#ifdef V8_COMPRESS_POINTERS
1275 Sll32(a, a, 0);
1276#endif
1277 Branch(dest, lt, a, Operand(b));
1278 }
1279
1280 void JumpIfUnsignedLessThan(Register x, int32_t y, Label* dest) {
1281 AssertZeroExtended(x);
1282 Branch(dest, ult, x, Operand(y));
1283 }
1284
1285 void JumpIfMarking(Label* is_marking,
1286 Label::Distance condition_met_distance = Label::kFar);
1287 void JumpIfNotMarking(Label* not_marking,
1288 Label::Distance condition_met_distance = Label::kFar);
1289
1290 // Push a standard frame, consisting of ra, fp, context and JS function.
1291 void PushStandardFrame(Register function_reg);
1292
1293 // Get the actual activation frame alignment for target environment.
1295
1296 // Calculated scaled address (rd) as rt + rs << sa
1297 void CalcScaledAddress(Register rd, Register rt, Register rs, uint8_t sa);
1298
1299 // Compute the start of the generated instruction stream from the current PC.
1300 // This is an alternative to embedding the {CodeObject} handle as a reference.
1302
1303 // Load a trusted pointer field.
1304 // When the sandbox is enabled, these are indirect pointers using the trusted
1305 // pointer table. Otherwise they are regular tagged fields.
1307 IndirectPointerTag tag);
1308 // Store a trusted pointer field.
1309 void StoreTrustedPointerField(Register value, MemOperand dst_field_operand);
1310 // Load a code pointer field.
1311 // These are special versions of trusted pointers that, when the sandbox is
1312 // enabled, reference code objects through the code pointer table.
1314 LoadTrustedPointerField(destination, field_operand,
1315 kCodeIndirectPointerTag);
1316 }
1317 // Store a code pointer field.
1318 void StoreCodePointerField(Register value, MemOperand dst_field_operand) {
1319 StoreTrustedPointerField(value, dst_field_operand);
1320 }
1321
1322 // Loads a field containing an off-heap ("external") pointer and does
1323 // necessary decoding if sandbox is enabled.
1324 void LoadExternalPointerField(Register destination, MemOperand field_operand,
1326 Register isolate_root = no_reg);
1327
1328#if V8_TARGET_ARCH_RISCV64
1329 // ---------------------------------------------------------------------------
1330 // Pointer compression Support
1331
1332 // Loads a field containing any tagged value and decompresses it if necessary.
1333 void LoadTaggedField(const Register& destination,
1334 const MemOperand& field_operand,
1335 Trapper&& trapper = [](int){});
1336
1337 // Loads a field containing any tagged value but never decompresses it.
1338 void LoadTaggedFieldWithoutDecompressing(const Register& destination,
1339 const MemOperand& field_operand);
1340
1341 // Loads a field containing a tagged signed value and decompresses it if
1342 // necessary.
1343 void LoadTaggedSignedField(const Register& destination,
1344 const MemOperand& field_operand);
1345
1346 // Loads a field containing smi value and untags it.
1347 void SmiUntagField(Register dst, const MemOperand& src);
1348
1349 // Compresses and stores tagged value to given on-heap location.
1350 void StoreTaggedField(const Register& value,
1351 const MemOperand& dst_field_operand,
1352 Trapper&& trapper = [](int){});
1353 void AtomicStoreTaggedField(Register dst, const MemOperand& src,
1354 Trapper&& trapper = [](int){});
1355
1356 void DecompressTaggedSigned(const Register& destination,
1357 const MemOperand& field_operand,
1358 Trapper&& trapper = [](int){});
1359 void DecompressTagged(const Register& destination,
1360 const MemOperand& field_operand,
1361 Trapper&& trapper = [](int){});
1362 void DecompressTagged(const Register& destination, const Register& source);
1363 void DecompressTagged(Register dst, Tagged_t immediate);
1364 void DecompressProtected(const Register& destination,
1365 const MemOperand& field_operand,
1366 Trapper&& trapper = [](int){});
1367
1368 // ---------------------------------------------------------------------------
1369 // V8 Sandbox support
1370
1371 // Transform a SandboxedPointer from/to its encoded form, which is used when
1372 // the pointer is stored on the heap and ensures that the pointer will always
1373 // point into the sandbox.
1374 void DecodeSandboxedPointer(Register value);
1375 void LoadSandboxedPointerField(Register destination,
1376 const MemOperand& field_operand,
1377 Trapper&& trapper = [](int){});
1378 void StoreSandboxedPointerField(Register value,
1379 const MemOperand& dst_field_operand,
1380 Trapper&& trapper = [](int){});
1381
1382 // Loads an indirect pointer field.
1383 // Only available when the sandbox is enabled, but always visible to avoid
1384 // having to place the #ifdefs into the caller.
1385 void LoadIndirectPointerField(Register destination, MemOperand field_operand,
1386 IndirectPointerTag tag);
1387 // Store an indirect pointer field.
1388 // Only available when the sandbox is enabled, but always visible to avoid
1389 // having to place the #ifdefs into the caller.
1390 void StoreIndirectPointerField(Register value, MemOperand dst_field_operand,
1391 Trapper&& trapper = [](int){});
1392
1393#ifdef V8_ENABLE_SANDBOX
1394 // Retrieve the heap object referenced by the given indirect pointer handle,
1395 // which can either be a trusted pointer handle or a code pointer handle.
1396 void ResolveIndirectPointerHandle(Register destination, Register handle,
1397 IndirectPointerTag tag);
1398
1399 // Retrieve the heap object referenced by the given trusted pointer handle.
1400 void ResolveTrustedPointerHandle(Register destination, Register handle,
1401 IndirectPointerTag tag);
1402 // Retrieve the Code object referenced by the given code pointer handle.
1403 void ResolveCodePointerHandle(Register destination, Register handle);
1404
1405 // Load the pointer to a Code's entrypoint via a code pointer.
1406 // Only available when the sandbox is enabled as it requires the code pointer
1407 // table.
1408 void LoadCodeEntrypointViaCodePointer(Register destination,
1409 MemOperand field_operand,
1410 CodeEntrypointTag tag);
1411
1412 // Load the value of Code pointer table corresponding to
1413 // IsolateGroup::current()->code_pointer_table_.
1414 // Only available when the sandbox is enabled.
1415 void LoadCodePointerTableBase(Register destination);
1416#endif
1417
1418 void AtomicDecompressTaggedSigned(Register dst, const MemOperand& src,
1419 Trapper&& trapper = [](int){});
1420 void AtomicDecompressTagged(Register dst, const MemOperand& src,
1421 Trapper&& trapper = [](int){});
1422
1423 void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) {
1425 Sub32(rd, rs1, rs2);
1426 } else {
1427 SubWord(rd, rs1, rs2);
1428 }
1429 }
1430
1431#elif V8_TARGET_ARCH_RISCV32
1432 // ---------------------------------------------------------------------------
1433 // Pointer compression Support
1434 // rv32 don't support Pointer compression. Defines these functions for
1435 // simplify builtins.
1436 inline void LoadTaggedField(const Register& destination,
1437 const MemOperand& field_operand,
1438 Trapper&& trapper = [](int){}) {
1439 Lw(destination, field_operand, std::forward<Trapper>(trapper));
1440 }
1441
1442 inline void LoadTaggedSignedField(const Register& destination,
1443 const MemOperand& field_operand) {
1444 Lw(destination, field_operand);
1445 }
1446
1447 inline void SmiUntagField(Register dst, const MemOperand& src) {
1448 SmiUntag(dst, src);
1449 }
1450
1451 // Compresses and stores tagged value to given on-heap location.
1452 void StoreTaggedField(
1453 const Register& value, const MemOperand& dst_field_operand,
1454 Trapper&& trapper = [](int){}) {
1455 Sw(value, dst_field_operand, std::forward<Trapper>(trapper));
1456 }
1457
1458 void AtomicStoreTaggedField(
1459 Register src, const MemOperand& dst, Trapper&& trapper = [](int) {}) {
1460 UseScratchRegisterScope temps(this);
1461 Register scratch = temps.Acquire();
1462 AddWord(scratch, dst.rm(), dst.offset());
1463 trapper(pc_offset());
1464 amoswap_w(true, true, zero_reg, src, scratch);
1465 }
1466#endif
1467
1468 // NB: Control-flow integrity (V8_ENABLE_CONTROL_FLOW_INTEGRITY)
1469 // is not supported for RISC-V.
1470 //
1471 // Define a function entrypoint.
1472 void CodeEntry() {}
1473 // Define an exception handler.
1474 void ExceptionHandler() { JumpTarget(); }
1475 void JumpTarget() {}
1477 void CallTarget() {}
1481 // Define an exception handler and bind a label.
1483
1484 // Wasm into RVV
1485 void WasmRvvExtractLane(Register dst, VRegister src, int8_t idx, VSew sew,
1486 Vlmul lmul) {
1487 VU.set(kScratchReg, sew, lmul);
1488 VRegister Vsrc = idx != 0 ? kSimd128ScratchReg : src;
1489 if (idx != 0) {
1490 vslidedown_vi(kSimd128ScratchReg, src, idx);
1491 }
1492 vmv_xs(dst, Vsrc);
1493 }
1494
1495 void WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
1496 Vlmul lmul);
1497 void WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
1498 Vlmul lmul);
1499 void WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
1500 Vlmul lmul);
1501 void WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
1502 Vlmul lmul);
1503 void WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
1504 Vlmul lmul);
1505 void WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
1506 Vlmul lmul);
1507
1508 void WasmRvvS128const(VRegister dst, const uint8_t imms[16]);
1509
1510 void LoadLane(
1511 int sz, VRegister dst, uint8_t laneidx, MemOperand src,
1512 Trapper&& trapper = [](int){});
1513 void StoreLane(
1514 int sz, VRegister src, uint8_t laneidx, MemOperand dst,
1515 Trapper&& trapper = [](int){});
1516
1517 // It assumes that the arguments are located below the stack pointer.
1518 void LoadReceiver(Register dest) { LoadWord(dest, MemOperand(sp, 0)); }
1519 void StoreReceiver(Register rec) { StoreWord(rec, MemOperand(sp, 0)); }
1520
1521 bool IsNear(Label* L, Condition cond, int rs_reg);
1522
1523 // Swap two registers. If the scratch register is omitted then a slightly
1524 // less efficient form using xor instead of mov is emitted.
1525 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
1526
1527 void PushRoot(RootIndex index) {
1528 UseScratchRegisterScope temps(this);
1529 Register scratch = temps.Acquire();
1530 LoadRoot(scratch, index);
1531 Push(scratch);
1532 }
1533
1534 // Compare the object in a register to a value from the root list.
1536 Label* target,
1539 Condition cc, Label* target);
1540 // Compare the object in a register to a value and jump if they are equal.
1541 void JumpIfRoot(Register with, RootIndex index, Label* if_equal,
1542 Label::Distance distance = Label::kFar) {
1543 Branch(if_equal, eq, with, index, distance);
1544 }
1545
1546 // Compare the object in a register to a value from the root list.
1547 void CompareRoot(const Register& obj, RootIndex index, const Register& result,
1549 void CompareTaggedRoot(const Register& with, RootIndex index,
1550 const Register& result);
1551
1552 // Compare the object in a register to a value and jump if they are not equal.
1553 void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal,
1554 Label::Distance distance = Label::kFar) {
1555 Branch(if_not_equal, ne, with, index, distance);
1556 }
1557
1558 // Checks if value is in range [lower_limit, higher_limit] using a single
1559 // comparison.
1560 void JumpIfIsInRange(Register value, unsigned lower_limit,
1561 unsigned higher_limit, Label* on_in_range);
1562 void JumpIfObjectType(Label* target, Condition cc, Register object,
1563 InstanceType instance_type, Register scratch = no_reg);
1564
1565 // Fast check if the object is a js receiver type. Assumes only primitive
1566 // objects or js receivers are passed.
1568 Register heap_object, Register scratch, Label* target,
1569 Label::Distance distance = Label::kFar,
1570 Condition condition = Condition::kUnsignedGreaterThanEqual);
1571 void JumpIfJSAnyIsPrimitive(Register heap_object, Register scratch,
1572 Label* target,
1573 Label::Distance distance = Label::kFar) {
1574 return JumpIfJSAnyIsNotPrimitive(heap_object, scratch, target, distance,
1575 Condition::kUnsignedLessThan);
1576 }
1577 // ---------------------------------------------------------------------------
1578 // GC Support
1579
1580 // Notify the garbage collector that we wrote a pointer into an object.
1581 // |object| is the object being stored into, |value| is the object being
1582 // stored. value and scratch registers are clobbered by the operation.
1583 // The offset is the offset from the start of the object, not the offset from
1584 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
1585 void RecordWriteField(
1586 Register object, int offset, Register value, RAStatus ra_status,
1587 SaveFPRegsMode save_fp, SmiCheck smi_check = SmiCheck::kInline,
1589 SlotDescriptor slot = SlotDescriptor::ForDirectPointerSlot());
1590
1591 // For a given |object| notify the garbage collector that the slot |address|
1592 // has been written. |value| is the object being stored. The value and
1593 // address registers are clobbered by the operation.
1594 void RecordWrite(
1595 Register object, Operand offset, Register value, RAStatus ra_status,
1596 SaveFPRegsMode save_fp, SmiCheck smi_check = SmiCheck::kInline,
1598 SlotDescriptor slot = SlotDescriptor::ForDirectPointerSlot());
1599
1600 // void Pref(int32_t hint, const MemOperand& rs);
1601
1602 // ---------------------------------------------------------------------------
1603 // Pseudo-instructions.
1604
1605 void LoadWordPair(Register rd, const MemOperand& rs);
1606 void StoreWordPair(Register rd, const MemOperand& rs);
1607
1612
1613 // stack_space - extra stack space.
1614 void EnterExitFrame(Register scratch, int stack_space,
1615 StackFrame::Type frame_type);
1616 // Leave the current exit frame.
1618
1619 // Make sure the stack is aligned. Only emits code in debug mode.
1621
1622 // Load the global proxy from the current context.
1624 LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
1625 }
1626
1627 void LoadNativeContextSlot(Register dst, int index);
1628
1629 // Falls through and sets scratch_and_result to 0 on failure, jumps to
1630 // on_result on success.
1631 void TryLoadOptimizedOsrCode(Register scratch_and_result,
1632 CodeKind min_opt_level, Register feedback_vector,
1633 FeedbackSlot slot, Label* on_result,
1634 Label::Distance distance);
1635
1636 // Load the initial map from the global function. The registers
1637 // function and map can be the same, function is then overwritten.
1639 Register scratch);
1640
1641 // -------------------------------------------------------------------------
1642 // JavaScript invokes.
1643 // On function call, call into the debugger.
1646 Register expected_parameter_count_or_dispatch_handle,
1647 Register actual_parameter_count);
1648
1649 // The way we invoke JSFunctions differs depending on whether leaptiering is
1650 // enabled. As such, these functions exist in two variants. In the future,
1651 // leaptiering will be used on all platforms. At that point, the
1652 // non-leaptiering variants will disappear.
1653
1654#if defined(V8_ENABLE_LEAPTIERING) && defined(V8_TARGET_ARCH_RISCV64)
1655 // Invoke the JavaScript function in the given register. Changes the
1656 // current context to the context in the function before invoking.
1657 void InvokeFunction(Register function, Register actual_parameter_count,
1658 InvokeType type,
1659 ArgumentAdaptionMode argument_adaption_mode =
1661 // Invoke the JavaScript function in the given register.
1662 // Changes the current context to the context in the function before invoking.
1663 void InvokeFunctionWithNewTarget(Register function, Register new_target,
1664 Register actual_parameter_count,
1665 InvokeType type);
1666 // Invoke the JavaScript function code by either calling or jumping.
1667 void InvokeFunctionCode(Register function, Register new_target,
1668 Register actual_parameter_count, InvokeType type,
1669 ArgumentAdaptionMode argument_adaption_mode =
1671#else
1672 void InvokeFunction(Register function, Register expected_parameter_count,
1673 Register actual_parameter_count, InvokeType type);
1674 // Invoke the JavaScript function in the given register. Changes the
1675 // current context to the context in the function before invoking.
1677 Register actual_parameter_count,
1678 InvokeType type);
1679 // Invoke the JavaScript function code by either calling or jumping.
1681 Register expected_parameter_count,
1682 Register actual_parameter_count, InvokeType type);
1683#endif
1684
1685 // On function call, call into the debugger if necessary.
1687 Register expected_parameter_count,
1688 Register actual_parameter_count);
1689 // ---- InstructionStream generation helpers ----
1690
1691 // ---------------------------------------------------------------------------
1692 // Support functions.
1693
1694 // Compare object type for heap object. heap_object contains a non-Smi
1695 // whose object type should be compared with the given type. This both
1696 // sets the flags and leaves the object type in the type_reg register.
1697 // It leaves the map in the map register (unless the type_reg and map register
1698 // are the same register). It leaves the heap object in the heap_object
1699 // register unless the heap_object register is the same register as one of the
1700 // other registers.
1701 void CompareObjectTypeAndJump(Register heap_object, Register map,
1702 Register type_reg, InstanceType type,
1703 Condition cond, Label* target,
1704 Label::Distance distance);
1705 // Variant of the above, which only guarantees to set the correct eq/ne flag.
1706 // Neither map, nor type_reg might be set to any particular value.
1707 void IsObjectType(Register heap_object, Register scratch1, Register scratch2,
1708 InstanceType type);
1709
1710 // Exception handling.
1711
1712 // Push a new stack handler and link into stack handler chain.
1714
1715 // Unlink the stack handler on top of the stack from the stack handler chain.
1716 // Must preserve the result register.
1718
1719 // Tiering support.
1725 Register closure);
1727
1728#ifndef V8_ENABLE_LEAPTIERING
1730 Register flags, Register feedback_vector, CodeKind current_code_kind,
1731 Label* flags_need_processing);
1733 Register feedback_vector);
1734#endif
1735
1736 // -------------------------------------------------------------------------
1737 // Support functions.
1738
1739 void GetObjectType(Register function, Register map, Register type_reg);
1740
1742 InstanceType lower_limit, Register range);
1743
1744 // -------------------------------------------------------------------------
1745 // Runtime calls.
1746
1747 // Call a runtime routine.
1748 void CallRuntime(const Runtime::Function* f, int num_arguments);
1749
1750 // Convenience function: Same as above, but takes the fid instead.
1752 const Runtime::Function* function = Runtime::FunctionForId(fid);
1753 CallRuntime(function, function->nargs);
1754 }
1755
1756 // Convenience function: Same as above, but takes the fid instead.
1757 void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
1758 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
1759 }
1760
1761 // Convenience function: tail call a runtime routine (jump).
1763
1764 // Jump to the builtin routine.
1766 bool builtin_exit_frame = false);
1767 // ---------------------------------------------------------------------------
1768 // In-place weak references.
1769 void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
1770
1771 // -------------------------------------------------------------------------
1772 // StatsCounter support.
1773
1774 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1775 Register scratch2) {
1776 if (!v8_flags.native_code_counters) return;
1777 EmitIncrementCounter(counter, value, scratch1, scratch2);
1778 }
1779 void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
1780 Register scratch2);
1781 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1782 Register scratch2) {
1783 if (!v8_flags.native_code_counters) return;
1784 EmitDecrementCounter(counter, value, scratch1, scratch2);
1785 }
1786 void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
1787 Register scratch2);
1788
1789 // -------------------------------------------------------------------------
1790 // Stack limit utilities
1792 void StackOverflowCheck(Register num_args, Register scratch1,
1793 Register scratch2, Label* stack_overflow,
1794 Label* done = nullptr);
1795
1796 // Left-shifted from int32 equivalent of Smi.
1797 void SmiScale(Register dst, Register src, int scale) {
1798#if V8_TARGET_ARCH_RISCV64
1799 if (SmiValuesAre32Bits()) {
1800 // The int portion is upper 32-bits of 64-bit word.
1801 srai(dst, src, (kSmiShift - scale) & 0x3F);
1802 } else {
1805 slliw(dst, src, scale - kSmiTagSize);
1806 }
1807#elif V8_TARGET_ARCH_RISCV32
1810 slli(dst, src, scale - kSmiTagSize);
1811#endif
1812 }
1813
1814 // Test if the register contains a smi.
1815 inline void SmiTst(Register value, Register scratch) {
1816 And(scratch, value, Operand(kSmiTagMask));
1817 }
1818
1819 enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
1820 enum ArgumentsCountType { kCountIsInteger, kCountIsSmi };
1823
1825 Label* if_marked_for_deoptimization);
1827
1828 // Jump if the register contains a non-smi.
1829 void JumpIfNotSmi(Register value, Label* not_smi_label,
1830 Label::Distance dist = Label::kFar);
1831 // Abort execution if argument is not a Constructor, enabled via --debug-code.
1833
1834 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1836
1837 // Abort execution if argument is not a callable JSFunction, enabled via
1838 // --debug-code.
1840
1841 // Abort execution if argument is not a JSBoundFunction,
1842 // enabled via --debug-code.
1844
1845 // Abort execution if argument is not a JSGeneratorObject (or subclass),
1846 // enabled via --debug-code.
1848
1849 // Calls Abort(msg) if the condition cond is not satisfied.
1850 // Use --debug_code to enable.
1852
1853 // Like Assert(), but without condition.
1854 // Use --debug_code to enable.
1856
1857 // Abort execution if argument is not undefined or an AllocationSite, enabled
1858 // via --debug-code.
1860
1861 template <typename Field>
1863 ExtractBits(dst, src, Field::kShift, Field::kSize);
1864 }
1865
1866 template <typename Field>
1868 DecodeField<Field>(reg, reg);
1869 }
1870
1871#ifdef V8_ENABLE_LEAPTIERING
1872 // Load the entrypoint pointer of a JSDispatchTable entry.
1873 void LoadEntrypointFromJSDispatchTable(Register destination,
1874 Register dispatch_handle,
1875 Register scratch);
1876 void LoadEntrypointFromJSDispatchTable(Register destination,
1877 JSDispatchHandle dispatch_handle,
1878 Register scratch);
1879#ifdef V8_TARGET_ARCH_RISCV64
1880 // On 32 bit architectures only the mark bit is shared with the pointer.
1881 // see src/sandbox/js-dispatch-table.h
1882 void LoadParameterCountFromJSDispatchTable(Register destination,
1883 Register dispatch_handle,
1884 Register scratch);
1885 void LoadEntrypointAndParameterCountFromJSDispatchTable(
1886 Register entrypoint, Register parameter_count, Register dispatch_handle,
1887 Register scratch);
1888#endif // V8_TARGET_ARCH_RISCV64
1889#endif // V8_ENABLE_LEAPTIERING
1890 // Load a protected pointer field.
1892 MemOperand field_operand);
1893 // Performs a truncating conversion of a floating point number as used by
1894 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
1895 // succeeds, otherwise falls through if result is saturated. On return
1896 // 'result' either holds answer, or is clobbered on fall through.
1898 Label* done);
1899
1900 protected:
1902 inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
1903
1904 private:
1905 bool has_double_zero_reg_set_ = false;
1906 bool has_single_zero_reg_set_ = false;
1907
1909 Register function, int num_reg_arguments, int num_double_arguments,
1910 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
1911 Label* return_location = nullptr);
1912
1913 // TODO(RISCV) Reorder parameters so out parameters come last.
1914 bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
1915 bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
1916 Register* scratch, const Operand& rt);
1917
1918 void BranchShortHelper(int32_t offset, Label* L);
1919 bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
1920 const Operand& rt);
1921 bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
1922 const Operand& rt);
1923
1924 void BranchAndLinkShortHelper(int32_t offset, Label* L);
1925 void BranchAndLinkShort(int32_t offset);
1926 void BranchAndLinkShort(Label* L);
1927 bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
1928 Register rs, const Operand& rt);
1929 bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
1930 Register rs, const Operand& rt);
1931 void BranchAndLinkLong(Label* L);
1932#if V8_TARGET_ARCH_RISCV64
1933 template <typename F_TYPE>
1934 void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
1935 FPURoundingMode mode);
1936#elif V8_TARGET_ARCH_RISCV32
1937 void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
1938 FPURoundingMode mode);
1939
1940 void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
1941 FPURoundingMode mode);
1942#endif
1943 template <typename F>
1944 void RoundHelper(VRegister dst, VRegister src, Register scratch,
1945 VRegister v_scratch, FPURoundingMode frm,
1946 bool keep_nan_same = true);
1947
1948 template <typename TruncFunc>
1950 TruncFunc trunc);
1951
1952 // Push a fixed frame, consisting of ra, fp.
1953 void PushCommonFrame(Register marker_reg = no_reg);
1954
1955 // Helper functions for generating invokes.
1956 void InvokePrologue(Register expected_parameter_count,
1957 Register actual_parameter_count, InvokeType type);
1958
1959 // Compute memory operands for safepoint stack slots.
1960 static int SafepointRegisterStackIndex(int reg_code);
1961
1962 // Needs access to SafepointRegisterStackIndex for compiled frame
1963 // traversal.
1964 friend class CommonFrame;
1965
1967};
1968
1969template <typename Func>
1970void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
1971 Func GetLabelFunction) {
1972 // Ensure that dd-ed labels following this instruction use 8 bytes aligned
1973 // addresses.
1974 BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 +
1976 UseScratchRegisterScope temps(this);
1977 Register scratch = temps.Acquire();
1978 Register scratch2 = temps.Acquire();
1979
1980 Align(8);
1981 // Load the address from the jump table at index and jump to it
1982 auipc(scratch, 0); // Load the current PC into scratch
1983 slli(scratch2, index,
1984 kSystemPointerSizeLog2); // scratch2 = offset of indexth entry
1985 add(scratch2, scratch2,
1986 scratch); // scratch2 = (saved PC) + (offset of indexth entry)
1987 LoadWord(scratch2,
1988 MemOperand(scratch2,
1989 6 * kInstrSize)); // Add the size of these 6 instructions
1990 // to the offset, then load
1991 jr(scratch2); // Jump to the address loaded from the table
1992 nop(); // For 16-byte alignment
1993 for (size_t index = 0; index < case_count; ++index) {
1994 dd(GetLabelFunction(index));
1995 }
1996}
1997
1998struct MoveCycleState {
1999 // List of scratch registers reserved for pending moves in a move cycle, and
2000 // which should therefore not be used as a temporary location by
2001 // {MoveToTempLocation}.
2003 // Available scratch registers during the move cycle resolution scope.
2004 std::optional<UseScratchRegisterScope> temps;
2005 // Scratch register picked by {MoveToTempLocation}.
2006 std::optional<Register> scratch_reg;
2007};
2008
2010 static constexpr int kSPOffset = 1 * kSystemPointerSize;
2011 return MemOperand(sp, kSPOffset + offset);
2012}
2013
2017}
2018
2019// Calls an API function. Allocates HandleScope, extracts returned value
2020// from handle and propagates exceptions. Clobbers C argument registers
2021// and C caller-saved registers. Restores context. On return removes
2022// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
2023// (GCed, includes the call JS arguments space and the additional space
2024// allocated for the fast call).
2025void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
2026 Register function_address,
2027 ExternalReference thunk_ref, Register thunk_arg,
2028 int slots_to_drop_on_return,
2029 MemOperand* argc_operand,
2030 MemOperand return_value_operand);
2031
2032#define ACCESS_MASM(masm) masm->
2033
2034} // namespace internal
2035} // namespace v8
2036
2037#endif // V8_CODEGEN_RISCV_MACRO_ASSEMBLER_RISCV_H_
int16_t parameter_count
Definition builtins.cc:67
interpreter::OperandScale scale
Definition builtins.cc:44
Builtins::Kind kind
Definition builtins.cc:40
SourcePosition pos
void slli(Register rd, Register rs1, uint8_t shamt)
void jr(Register target)
void auipc(Register rs, int16_t imm16)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void BlockTrampolinePoolFor(int instructions)
static constexpr int kFixedSlotCountAboveFp
void JumpIfJSAnyIsPrimitive(Register heap_object, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2)
void GenerateSwitchTable(Register index, size_t case_count, Func GetLabelFunction)
void CompareTaggedAndBranch(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void Abort(AbortReason msg)
void LoadStackLimit(Register destination, StackLimitKind kind)
void Cvt_s_ul(FPURegister fd, Register rs)
void Push(Tagged< TaggedIndex > index)
void GetObjectType(Register function, Register map, Register type_reg)
void Branch(Label *target, Label::Distance distance)
void CallJSFunction(Register function_object, uint16_t argument_count)
void LiLower32BitHelper(Register rd, Operand j)
int32_t GetOffset(int32_t offset, Label *L, OffsetSize bits)
void StoreWordPair(Register rd, const MemOperand &rs)
bool IsNear(Label *L, Condition cond, int rs_reg)
void pop_helper(Register r, Rs... rs)
int CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void JumpIfIsInRange(Register value, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend=false)
int CallCFunction(Register function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void MultiPopFPU(DoubleRegList regs)
void GenPCRelativeJump(Register rd, int32_t imm32)
void Move(FPURegister dst, FPURegister src)
void Swap(Register reg1, Register reg2, Register scratch=no_reg)
void ByteSwap(Register dest, Register src, int operand_size, Register scratch=no_reg)
void Cvt_d_ul(FPURegister fd, Register rs)
void CallRuntime(Runtime::FunctionId fid)
void MultiPushFPU(DoubleRegList regs)
void EnterFrame(StackFrame::Type type)
void IsObjectType(Register heap_object, Register scratch1, Register scratch2, InstanceType type)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason)
void DecodeField(Register dst, Register src)
void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft)
void WasmRvvExtractLane(Register dst, VRegister src, int8_t idx, VSew sew, Vlmul lmul)
void Call(Register target, COND_ARGS)
void near_call(int offset, RelocInfo::Mode rmode)
void TailCallBuiltin(Builtin builtin, Condition cond, Register type, Operand range)
static int InstrCountForLi64Bit(int64_t value)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void Neg_s(FPURegister fd, FPURegister fs)
void PushStandardFrame(Register function_reg)
void li(Register dst, ExternalReference value, LiFlags mode=OPTIMIZE_SIZE)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi)
void Uld(Register rd, const MemOperand &rs)
void CompareTaggedRoot(const Register &with, RootIndex index, const Register &result)
void near_call(HeapNumberRequest request)
void AssertSignExtended(Register int32_register) NOOP_UNLESS_DEBUG_CODE
void UnalignedFStoreHelper(FPURegister frd, const MemOperand &rs)
void li(Register rd, intptr_t j, LiFlags mode=OPTIMIZE_SIZE)
void Move(Register dst, Register src)
void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft)
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS)
void MoveDouble(FPURegister dst, FPURegister src)
void Move(Register dst, Tagged< Smi > smi)
void Amosub_w(bool aq, bool rl, Register rd, Register rs1, Register rs2)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void JumpIfEqual(Register a, int32_t b, Label *dest)
void li_optimized(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void StoreReturnAddressAndCall(Register target)
void LoadZeroIfConditionZero(Register dest, Register condition)
void LeaveExitFrame(Register scratch)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
void SbxCheck(Condition cc, AbortReason reason, Register rs, Operand rt)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void MoveIfZero(Register rd, Register rj, Register rk)
void LoadRootRelative(Register destination, int32_t offset) final
void Assert(Condition cc, AbortReason reason, Register rs, Operand rt)
static int ActivationFrameAlignment()
void Push(Handle< HeapObject > handle)
void PrepareCallCFunction(int num_reg_arguments, Register scratch)
void UnalignedFLoadHelper(FPURegister frd, const MemOperand &rs)
void MultiPush(RegList regs)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void Jump(Register target, COND_ARGS)
int CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void MulOverflow32(Register dst, Register left, const Operand &right, Register overflow, bool sign_extend_inputs=true)
void PopAll(DoubleRegList registers, int stack_slot_size=kDoubleSize)
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
void CompareTaggedRootAndBranch(const Register &with, RootIndex index, Condition cc, Label *target)
void SmiUntag(Register dst, Register src)
void NegateBool(Register rd, Register rs)
void Neg_d(FPURegister fd, FPURegister fs)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
void LoadFPRImmediate(FPURegister dst, float imm)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
int CallCFunction(Register function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void TailCallBuiltin(Builtin builtin)
void near_jump(int offset, RelocInfo::Mode rmode)
void LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft)
void ExtractLowWordFromF64(Register dst_low, FPURegister src)
void JumpIfUnsignedLessThan(Register x, int32_t y, Label *dest)
void JumpIfRoot(Register with, RootIndex index, Label *if_equal, Label::Distance distance=Label::kFar)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void WasmRvvS128const(VRegister dst, const uint8_t imms[16])
void Jump(const ExternalReference &reference)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Register scratch, Label *target, Label::Distance distance=Label::kFar, Condition condition=Condition::kUnsignedGreaterThanEqual)
MemOperand ExternalReferenceAsOperand(IsolateFieldId id)
void LoadFPRImmediate(FPURegister dst, double imm)
bool CanUseNearCallOrJump(RelocInfo::Mode rmode)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void AssertConstructor(Register object)
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS)
void PushAll(DoubleRegList registers, int stack_slot_size=kDoubleSize)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void Drop(int count, Condition cond=cc_always, Register reg=no_reg, const Operand &op=Operand(no_reg))
void SmiTag(Register dst, Register src)
void Push(Register r, Rs... rs)
void Check(Condition cc, AbortReason reason, Register rs, Operand rt)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void CallBuiltin(Builtin builtin)
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void DropAndRet(int drop, Condition cond, Register reg, const Operand &op)
void SignExtendShort(Register rd, Register rs)
void PushCommonFrame(Register marker_reg=no_reg)
void MovFromFloatParameter(DoubleRegister dst)
void CallIndirectPointerBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode, IndirectPointerTag tag)
void LeaveFrame(StackFrame::Type type)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void JumpIfMarking(Label *is_marking, Label::Distance condition_met_distance=Label::kFar)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
Operand ClearedValue() const
void InsertBits(Register dest, Register source, Register pos, int size)
void StoreCodePointerField(Register value, MemOperand dst_field_operand)
void MultiPop(RegList regs)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void MovToFloatParameter(DoubleRegister src)
void Push(Register src, Condition cond, Register tst1, Register tst2)
void Usw(Register rd, const MemOperand &rs)
void LoadRoot(Register destination, RootIndex index) final
void MovToFloatResult(DoubleRegister src)
void push_helper(Register r, Rs... rs)
void CheckPageFlag(const Register &object, Register scratch, int mask, Condition cc, Label *condition_met)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DoubleRegister double_input, StubCallMode stub_mode)
void PatchAndJump(Address target)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Call(Handle< Code > code, RelocInfo::Mode rmode=RelocInfo::CODE_TARGET, COND_ARGS)
void CallCodeObject(Register code_object, CodeEntrypointTag tag)
void LoadZeroIfConditionNotZero(Register dest, Register condition)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
bool CalculateOffset(Label *L, int32_t *offset, OffsetSize bits, Register *scratch, const Operand &rt)
void JumpIfSmi(Register value, Label *smi_label, Label::Distance distance=Label::kFar)
void LoadFromConstantsTable(Register destination, int constant_index) final
void LoadProtectedPointerField(Register destination, MemOperand field_operand)
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void PrepareCEntryFunction(const ExternalReference &ref)
void ComputeCodeStartAddress(Register dst)
void MaybeSaveRegisters(RegList registers)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void AssertZeroExtended(Register int32_register) NOOP_UNLESS_DEBUG_CODE
void LoadTaggedRoot(Register destination, RootIndex index)
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler)
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal, Label::Distance distance=Label::kFar)
void Jump(Handle< Code > code, RelocInfo::Mode rmode, COND_ARGS)
static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode, uint8_t *pc)
void AssertGeneratorObject(Register object)
void AssertFunction(Register object)
void SmiTst(Register value, Register scratch)
void SmiToInt32(Register smi)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void Ulhu(Register rd, const MemOperand &rs)
void JumpIfNotMarking(Label *not_marking, Label::Distance condition_met_distance=Label::kFar)
void CheckDebugHook(Register fun, Register new_target, Register expected_parameter_count, Register actual_parameter_count)
void MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset)
void LoadCompressedMap(Register dst, Register object)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void GenPCRelativeJumpAndLink(Register rd, int32_t imm32)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg)
void JumpIfObjectType(Label *target, Condition cc, Register object, InstanceType instance_type, Register scratch=no_reg)
void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void ExtractHighWordFromF64(Register dst_high, FPURegister src)
void LoadCodePointerField(Register destination, MemOperand field_operand)
void Push(Tagged< Smi > smi)
std::function< void(int)> Trapper
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void Check(Condition cond, AbortReason reason)
void Cvt_s_uw(FPURegister fd, Register rs)
void Usd(Register rd, const MemOperand &rs)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void JumpCodeObject(Register code_object, CodeEntrypointTag tag, JumpMode jump_mode=JumpMode::kJump)
void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft)
bool CalculateOffset(Label *L, int32_t *offset, OffsetSize bits)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void Pop(Register r, Rs... rs)
void StoreTrustedPointerField(Register value, MemOperand dst_field_operand)
void Call(Label *target)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch, Label *if_marked_for_deoptimization)
Register GetRtAsRegisterHelper(const Operand &rt, Register scratch)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi)
void AllocateStackSpace(Register bytes)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result, TruncFunc trunc)
void JumpIfLessThan(Register a, int32_t b, Label *dest)
void AssertUndefinedOrAllocationSite(Register object, Register scratch)
void Move(Register output, MemOperand operand)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void CompareRootAndBranch(const Register &obj, RootIndex index, Condition cc, Label *target, ComparisonMode mode=ComparisonMode::kDefault)
void AssertBoundFunction(Register object)
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS)
void AssertRange(Condition cond, AbortReason reason, Register value, Register scratch, unsigned lower_limit, unsigned higher_limit) NOOP_UNLESS_DEBUG_CODE
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Cvt_d_uw(FPURegister fd, Register rs)
void GetInstanceTypeRange(Register map, Register type_reg, InstanceType lower_limit, Register range)
void Ulw(Register rd, const MemOperand &rs)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void Ulh(Register rd, const MemOperand &rs)
void CompareRoot(const Register &obj, RootIndex index, const Register &result, ComparisonMode mode=ComparisonMode::kDefault)
void MoveFloat(FPURegister dst, FPURegister src)
void MaybeRestoreRegisters(RegList registers)
void MulOverflow64(Register dst, Register left, const Operand &right, Register overflow)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void CallDebugOnFunctionCall(Register fun, Register new_target, Register expected_parameter_count_or_dispatch_handle, Register actual_parameter_count)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, Register scratch)
void TryLoadOptimizedOsrCode(Register scratch_and_result, CodeKind min_opt_level, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void LoadIsolateField(const Register &rd, IsolateFieldId id)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertCallableFunction(Register object)
void LoadWordPair(Register rd, const MemOperand &rs)
void StubPrologue(StackFrame::Type type)
void SmiToInt32(Register dst, Register smi)
void StoreRootRelative(int32_t offset, Register value) final
void jmp(Label *L, Label::Distance distance=Label::kFar)
void LoadMap(Register destination, Register object)
void MovFromFloatResult(DoubleRegister dst)
void SmiUntag(Register dst, const MemOperand &src)
void TailCallRuntime(Runtime::FunctionId fid)
void CallRuntime(Runtime::FunctionId fid, int num_arguments)
void JumpIfNotSmi(Register value, Label *not_smi_label, Label::Distance dist=Label::kFar)
void LoadNativeContextSlot(Register dst, int index)
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, Label *done)
void Switch(Register scratch, Register value, int case_value_base, Label **labels, int num_labels)
void SignExtendByte(Register rd, Register rs)
void AssertSmiOrHeapObjectInMainCompressionCage(Register object) NOOP_UNLESS_DEBUG_CODE
void DropArguments(Register count)
void SmiScale(Register dst, Register src, int scale)
#define kScratchReg
#define NOOP_UNLESS_DEBUG_CODE
Definition assembler.h:628
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
Isolate * isolate
int32_t offset
TNode< Object > receiver
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int pc_offset
int y
int x
uint32_t const mask
#define DEFINE_INSTRUCTION2(instr)
#define COND_ARGS
#define DEFINE_INSTRUCTION(instr)
#define DECLARE_BRANCH_PROTOTYPES(Name)
#define xlen
#define DEFINE_INSTRUCTION3(instr)
ReadOnlyCheck
SmiCheck
ComparisonMode
ArgumentAdaptionMode
InvokeType
SetIsolateDataSlots
JumpMode
RegListBase< RegisterT > registers
InstructionOperand destination
int r
Definition mul-fft.cc:298
constexpr Register no_reg
constexpr Register kRootRegister
MemOperand ExitFrameCallerStackSlotOperand(int index)
const int kSmiTagSize
Definition v8-internal.h:87
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
constexpr int L
constexpr int kSmiShift
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
MemOperand CFunctionArgumentOperand(int index)
constexpr Simd128Register kSimd128ScratchReg
constexpr bool SmiValuesAre31Bits()
const int kCArgsSlotsSize
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
constexpr Register kPtrComprCageBaseRegister
const intptr_t kSmiTagMask
Definition v8-internal.h:88
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
constexpr uint8_t kInstrSize
const int kSmiTag
Definition v8-internal.h:86
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr int kDoubleSize
Definition globals.h:407
MemOperand ExitFrameStackSlotOperand(int offset)
constexpr int kNumRegisters
const int kCArgSlotCount
#define UNREACHABLE()
Definition logging.h:67
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define UNIMPLEMENTED()
Definition logging.h:66
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define V8_EXPORT_PRIVATE
Definition macros.h:460
std::optional< CPURegister > scratch_reg
std::optional< UseScratchRegisterScope > temps