v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-mips64.h
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_CODEGEN_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
6#define V8_CODEGEN_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
7
8#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
9#error This header must be included via macro-assembler.h
10#endif
11
12#include <optional>
13
16#include "src/common/globals.h"
19
20namespace v8 {
21namespace internal {
22
23// Forward declarations.
24enum class AbortReason : uint8_t;
25
26// Reserved Register Usage Summary.
27//
28// Registers t8, t9, and at are reserved for use by the MacroAssembler.
29//
30// The programmer should know that the MacroAssembler may clobber these three,
31// but won't touch other registers except in special cases.
32//
33// Per the MIPS ABI, register t9 must be used for indirect function call
34// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
35// trying to update gp register for position-independent-code. Whenever
36// MIPS generated code calls C code, it must be via t9 register.
37
38// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
40
41// Flags used for the li macro-assembler function.
42enum LiFlags {
43 // If the constant value can be represented in just 16 bits, then
44 // optimize the li to use a single instruction, rather than lui/ori/dsll
45 // sequence. A number of other optimizations that emits less than
46 // maximum number of instructions exists.
47 OPTIMIZE_SIZE = 0,
48 // Always use 6 instructions (lui/ori/dsll sequence) for release 2 or 4
49 // instructions for release 6 (lui/ori/dahi/dati), even if the constant
50 // could be loaded with just one, so that this value is patchable later.
51 CONSTANT_SIZE = 1,
52 // For address loads only 4 instruction are required. Used to mark
53 // constant load that will be used as address without relocation
54 // information. It ensures predictable code size, so specific sites
55 // in code are patchable.
56 ADDRESS_LOAD = 2
57};
58
60
61Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
62 Register reg3 = no_reg,
63 Register reg4 = no_reg,
64 Register reg5 = no_reg,
65 Register reg6 = no_reg);
66
67// -----------------------------------------------------------------------------
68// Static helper functions.
69
70#if defined(V8_TARGET_LITTLE_ENDIAN)
71#define SmiWordOffset(offset) (offset + kPointerSize / 2)
72#else
73#define SmiWordOffset(offset) offset
74#endif
75
76// Generate a MemOperand for loading a field from an object.
77inline MemOperand FieldMemOperand(Register object, int offset) {
78 return MemOperand(object, offset - kHeapObjectTag);
79}
80
81// Generate a MemOperand for storing arguments 5..N on the stack
82// when calling CallCFunction().
83// TODO(plind): Currently ONLY used for O32. Should be fixed for
84// n64, and used in RegExp code, and other places
85// with more than 8 arguments.
88 // Argument 5 takes the slot just past the four Arg-slots.
89 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
90 return MemOperand(sp, offset);
91}
92
93class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
94 public:
95 using MacroAssemblerBase::MacroAssemblerBase;
96
97 // Activation support.
99 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
100 // Out-of-line constant pool not implemented on mips.
101 UNREACHABLE();
102 }
104
105 void AllocateStackSpace(Register bytes) { Dsubu(sp, sp, bytes); }
106
107 void AllocateStackSpace(int bytes) {
108 DCHECK_GE(bytes, 0);
109 if (bytes == 0) return;
110 Dsubu(sp, sp, Operand(bytes));
111 }
112
113 // Generates function and stub prologue code.
115 void Prologue();
116
118 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
119 li(kRootRegister, Operand(isolate_root));
120 }
121
122 // Jump unconditionally to given label.
123 // We NEED a nop in the branch delay slot, as it used by v8, for example in
124 // CodeGenerator::ProcessDeferred().
125 // Currently the branch delay slot is filled by the MacroAssembler.
126 // Use rather b(Label) for code generation.
127 void jmp(Label* L) { Branch(L); }
128
129 // -------------------------------------------------------------------------
130 // Debugging.
131
132 void Trap();
134
135 // Calls Abort(msg) if the condition cc is not satisfied.
136 // Use --debug_code to enable.
139
140 void AssertJSAny(Register object, Register map_tmp, Register tmp,
142
143 // Like Assert(), but always enabled.
144 void Check(Condition cc, AbortReason reason, Register rs, Operand rt);
145
146 // Same as Check() but expresses that the check is needed for the sandbox.
148
149 // Print a message to stdout and abort execution.
151
152 // Arguments macros.
153#define COND_TYPED_ARGS Condition cond, Register r1, const Operand &r2
154#define COND_ARGS cond, r1, r2
155
156 // Cases when relocation is not needed.
157#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
158 void Name(target_type target, BranchDelaySlot bd = PROTECT); \
159 inline void Name(BranchDelaySlot bd, target_type target) { \
160 Name(target, bd); \
161 } \
162 void Name(target_type target, COND_TYPED_ARGS, \
163 BranchDelaySlot bd = PROTECT); \
164 inline void Name(BranchDelaySlot bd, target_type target, COND_TYPED_ARGS) { \
165 Name(target, COND_ARGS, bd); \
166 }
167
168#define DECLARE_BRANCH_PROTOTYPES(Name) \
169 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
170 DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
171
173 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
174 DECLARE_BRANCH_PROTOTYPES(BranchShort)
175
176#undef DECLARE_BRANCH_PROTOTYPES
177#undef COND_TYPED_ARGS
178#undef COND_ARGS
179
180 // Floating point branches
182 CompareF(S, cc, cmp1, cmp2);
183 }
184
186 CompareIsNanF(S, cmp1, cmp2);
187 }
188
190 CompareF(D, cc, cmp1, cmp2);
191 }
192
194 CompareIsNanF(D, cmp1, cmp2);
195 }
196
197 void BranchTrueShortF(Label* target, BranchDelaySlot bd = PROTECT);
198 void BranchFalseShortF(Label* target, BranchDelaySlot bd = PROTECT);
199
200 void BranchTrueF(Label* target, BranchDelaySlot bd = PROTECT);
201 void BranchFalseF(Label* target, BranchDelaySlot bd = PROTECT);
202
203 // MSA branches
205 MSARegister wt, BranchDelaySlot bd = PROTECT);
206
208 const Operand& rhs);
209
210 void BranchLong(int32_t offset, BranchDelaySlot bdslot = PROTECT);
211 void Branch(Label* L, Condition cond, Register rs, RootIndex index,
212 BranchDelaySlot bdslot = PROTECT);
213
214 static int InstrCountForLi64Bit(int64_t value);
216 void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
217 // Load int32 in the rd register.
218 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
219 inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
220 li(rd, Operand(j), mode);
221 }
222 // inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
223 // li(rd, Operand(static_cast<int64_t>(j)), mode);
224 // }
225 void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
226 void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
227
228 void LoadFromConstantsTable(Register destination, int constant_index) final;
231 void StoreRootRelative(int32_t offset, Register value) final;
232
233 // Operand pointing to an external reference.
234 // May emit code to set up the scratch register. The operand is
235 // only guaranteed to be correct as long as the scratch register
236 // isn't changed.
237 // If the operand is used more than once, use a scratch register
238 // that is guaranteed not to be clobbered.
240 Register scratch);
242 return ExternalReferenceAsOperand(ExternalReference::Create(id), no_reg);
243 }
244
245 inline void Move(Register output, MemOperand operand) { Ld(output, operand); }
246
247// Jump, Call, and Ret pseudo instructions implementing inter-working.
248#define COND_ARGS \
249 Condition cond = al, Register rs = zero_reg, \
250 const Operand &rt = Operand(zero_reg), \
251 BranchDelaySlot bd = PROTECT
252
253 void Jump(Register target, COND_ARGS);
254 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
255 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
256 // Deffer from li, this method save target to the memory, and then load
257 // it to register use ld, it can be used in wasm jump table for concurrent
258 // patching.
259 void PatchAndJump(Address target);
261 void Jump(const ExternalReference& reference);
262 void Call(Register target, COND_ARGS);
263 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
264 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
265 COND_ARGS);
266 void Call(Label* target);
267 void LoadAddress(Register dst, Label* target);
269
270 // Load the builtin given by the Smi in |builtin_index| into |target|.
271 void LoadEntryFromBuiltinIndex(Register builtin_index, Register target);
274
275 void CallBuiltinByIndex(Register builtin_index, Register target);
276 void CallBuiltin(Builtin builtin);
278 void TailCallBuiltin(Builtin builtin, Condition cond, Register type,
279 Operand range);
280
281 // Load the code entry point from the Code object.
283 Register code_data_container_object,
285 void CallCodeObject(Register code_data_container_object,
287 void JumpCodeObject(Register code_data_container_object,
289 JumpMode jump_mode = JumpMode::kJump);
290
291 // Convenience functions to call/jmp to the code of a JSFunction object.
292 void CallJSFunction(Register function_object, uint16_t argument_count);
293 void JumpJSFunction(Register function_object,
294 JumpMode jump_mode = JumpMode::kJump);
295
296#ifdef V8_ENABLE_WEBASSEMBLY
297 void ResolveWasmCodePointer(Register target);
298 void CallWasmCodePointer(Register target,
299 CallJumpMode call_jump_mode = CallJumpMode::kCall);
300#endif
301
302 // Generates an instruction sequence s.t. the return address points to the
303 // instruction following the call.
304 // The return address on the stack is used by frame iteration.
307 void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
309 Label* jump_deoptimization_entry_label);
310
312 inline void Ret(BranchDelaySlot bd, Condition cond = al,
313 Register rs = zero_reg,
314 const Operand& rt = Operand(zero_reg)) {
315 Ret(cond, rs, rt, bd);
316 }
317
318 // Emit code to discard a non-negative number of pointer-sized elements
319 // from the stack, clobbering only the sp register.
320 void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
321 const Operand& op = Operand(no_reg));
322
325
326 // Trivial case of DropAndRet that utilizes the delay slot.
327 void DropAndRet(int drop);
328
329 void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
330
331 void Ld(Register rd, const MemOperand& rs);
332 void Sd(Register rd, const MemOperand& rs);
333
334 void push(Register src) {
335 Daddu(sp, sp, Operand(-kPointerSize));
336 Sd(src, MemOperand(sp, 0));
337 }
338 void Push(Register src) { push(src); }
340 void Push(Tagged<Smi> smi);
341
342 // Push two registers. Pushes leftmost register first (to highest address).
343 void Push(Register src1, Register src2) {
344 Dsubu(sp, sp, Operand(2 * kPointerSize));
345 Sd(src1, MemOperand(sp, 1 * kPointerSize));
346 Sd(src2, MemOperand(sp, 0 * kPointerSize));
347 }
348
349 // Push three registers. Pushes leftmost register first (to highest address).
350 void Push(Register src1, Register src2, Register src3) {
351 Dsubu(sp, sp, Operand(3 * kPointerSize));
352 Sd(src1, MemOperand(sp, 2 * kPointerSize));
353 Sd(src2, MemOperand(sp, 1 * kPointerSize));
354 Sd(src3, MemOperand(sp, 0 * kPointerSize));
355 }
356
357 // Push four registers. Pushes leftmost register first (to highest address).
358 void Push(Register src1, Register src2, Register src3, Register src4) {
359 Dsubu(sp, sp, Operand(4 * kPointerSize));
360 Sd(src1, MemOperand(sp, 3 * kPointerSize));
361 Sd(src2, MemOperand(sp, 2 * kPointerSize));
362 Sd(src3, MemOperand(sp, 1 * kPointerSize));
363 Sd(src4, MemOperand(sp, 0 * kPointerSize));
364 }
365
366 // Push five registers. Pushes leftmost register first (to highest address).
367 void Push(Register src1, Register src2, Register src3, Register src4,
368 Register src5) {
369 Dsubu(sp, sp, Operand(5 * kPointerSize));
370 Sd(src1, MemOperand(sp, 4 * kPointerSize));
371 Sd(src2, MemOperand(sp, 3 * kPointerSize));
372 Sd(src3, MemOperand(sp, 2 * kPointerSize));
373 Sd(src4, MemOperand(sp, 1 * kPointerSize));
374 Sd(src5, MemOperand(sp, 0 * kPointerSize));
375 }
376
377 void Push(Register src, Condition cond, Register tst1, Register tst2) {
378 // Since we don't have conditional execution we use a Branch.
379 Branch(3, cond, tst1, Operand(tst2));
380 Dsubu(sp, sp, Operand(kPointerSize));
381 Sd(src, MemOperand(sp, 0));
382 }
383
384 enum PushArrayOrder { kNormal, kReverse };
385 void PushArray(Register array, Register size, Register scratch,
386 Register scratch2, PushArrayOrder order = kNormal);
387
390
391 void CallEphemeronKeyBarrier(Register object, Register slot_address,
392 SaveFPRegsMode fp_mode);
393
395 Register object, Register slot_address, SaveFPRegsMode fp_mode,
396 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
398 Register object, Register slot_address, SaveFPRegsMode fp_mode,
399 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
400
401 // Push multiple registers on the stack.
402 // Registers are saved in numerical order, with higher numbered registers
403 // saved in higher memory addresses.
404 void MultiPush(RegList regs);
407
408 // Calculate how much stack space (in bytes) are required to store caller
409 // registers excluding those specified in the arguments.
411 Register exclusion1 = no_reg,
412 Register exclusion2 = no_reg,
413 Register exclusion3 = no_reg) const;
414
415 // Push caller saved registers on the stack, and return the number of bytes
416 // stack pointer is adjusted.
417 int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
418 Register exclusion2 = no_reg,
419 Register exclusion3 = no_reg);
420 // Restore caller saved registers from the stack, and return the number of
421 // bytes stack pointer is adjusted.
422 int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
423 Register exclusion2 = no_reg,
424 Register exclusion3 = no_reg);
425
426 void pop(Register dst) {
427 Ld(dst, MemOperand(sp, 0));
428 Daddu(sp, sp, Operand(kPointerSize));
429 }
430 void Pop(Register dst) { pop(dst); }
431
432 // Pop two registers. Pops rightmost register first (from lower address).
433 void Pop(Register src1, Register src2) {
434 DCHECK(src1 != src2);
435 Ld(src2, MemOperand(sp, 0 * kPointerSize));
436 Ld(src1, MemOperand(sp, 1 * kPointerSize));
437 Daddu(sp, sp, 2 * kPointerSize);
438 }
439
440 // Pop three registers. Pops rightmost register first (from lower address).
441 void Pop(Register src1, Register src2, Register src3) {
442 Ld(src3, MemOperand(sp, 0 * kPointerSize));
443 Ld(src2, MemOperand(sp, 1 * kPointerSize));
444 Ld(src1, MemOperand(sp, 2 * kPointerSize));
445 Daddu(sp, sp, 3 * kPointerSize);
446 }
447
448 void Pop(uint32_t count = 1) { Daddu(sp, sp, Operand(count * kPointerSize)); }
449
450 // Pops multiple values from the stack and load them in the
451 // registers specified in regs. Pop order is the opposite as in MultiPush.
452 void MultiPop(RegList regs);
455
456#define DEFINE_INSTRUCTION(instr) \
457 void instr(Register rd, Register rs, const Operand& rt); \
458 void instr(Register rd, Register rs, Register rt) { \
459 instr(rd, rs, Operand(rt)); \
460 } \
461 void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); }
462
463#define DEFINE_INSTRUCTION2(instr) \
464 void instr(Register rs, const Operand& rt); \
465 void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
466 void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
467
469 DEFINE_INSTRUCTION(Daddu)
472 DEFINE_INSTRUCTION(Ddivu)
477 DEFINE_INSTRUCTION(Dsubu)
479 DEFINE_INSTRUCTION(Dmodu)
482 DEFINE_INSTRUCTION(Mulhu)
484 DEFINE_INSTRUCTION(Dmulh)
485 DEFINE_INSTRUCTION(Dmulhu)
489 DEFINE_INSTRUCTION2(Dmultu)
494
500
509
510 // MIPS32 R2 instruction macro.
513
514#undef DEFINE_INSTRUCTION
515#undef DEFINE_INSTRUCTION2
516#undef DEFINE_INSTRUCTION3
517
518 void SmiTag(Register dst, Register src) {
519 static_assert(kSmiTag == 0);
520 if (SmiValuesAre32Bits()) {
521 dsll32(dst, src, 0);
522 } else {
524 Addu(dst, src, src);
525 }
526 }
527
529
530 void SmiUntag(Register dst, const MemOperand& src);
531 void SmiUntag(Register dst, Register src) {
532 if (SmiValuesAre32Bits()) {
533 dsra32(dst, src, kSmiShift - 32);
534 } else {
536 sra(dst, src, kSmiShift);
537 }
538 }
539
541
542 // Left-shifted from int32 equivalent of Smi.
543 void SmiScale(Register dst, Register src, int scale) {
544 if (SmiValuesAre32Bits()) {
545 // The int portion is upper 32-bits of 64-bit word.
546 dsra(dst, src, kSmiShift - scale);
547 } else {
550 sll(dst, src, scale - kSmiTagSize);
551 }
552 }
553
554 // On MIPS64, we should sign-extend 32-bit values.
556 if (v8_flags.enable_slow_asserts) {
557 AssertSmi(smi);
558 }
560 SmiUntag(smi);
561 }
562
563 // Abort execution if argument is a smi, enabled via --debug-code.
566
567 int CalculateStackPassedWords(int num_reg_arguments,
568 int num_double_arguments);
569
570 // Before calling a C-function from generated code, align arguments on stack
571 // and add space for the four mips argument slots.
572 // After aligning the frame, non-register arguments must be stored on the
573 // stack, after the argument-slots using helper: CFunctionArgumentOperand().
574 // The argument count assumes all arguments are word sized.
575 // Some compilers/platforms require the stack to be aligned when calling
576 // C++ code.
577 // Needs a scratch register to do some arithmetic. This register will be
578 // trashed.
579 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
580 Register scratch);
581 void PrepareCallCFunction(int num_reg_arguments, Register scratch);
582
583 // Arguments 1-4 are placed in registers a0 through a3 respectively.
584 // Arguments 5..n are stored to stack using following:
585 // Sw(a4, CFunctionArgumentOperand(5));
586
587 // Calls a C function and cleans up the space for arguments allocated
588 // by PrepareCallCFunction. The called function is not allowed to trigger a
589 // garbage collection, since that might move the code and invalidate the
590 // return address (unless this is somehow accounted for by the called
591 // function).
593 ExternalReference function, int num_arguments,
594 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
595 Label* return_location = nullptr);
597 Register function, int num_arguments,
598 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
599 Label* return_location = nullptr);
601 ExternalReference function, int num_reg_arguments,
602 int num_double_arguments,
603 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
604 Label* return_location = nullptr);
606 Register function, int num_reg_arguments, int num_double_arguments,
607 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
608 Label* return_location = nullptr);
609 void MovFromFloatResult(DoubleRegister dst);
610 void MovFromFloatParameter(DoubleRegister dst);
611
612 // There are two ways of passing double arguments on MIPS, depending on
613 // whether soft or hard floating point ABI is used. These functions
614 // abstract parameter passing for the three different ways we call
615 // C functions from generated code.
616 void MovToFloatParameter(DoubleRegister src);
617 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
618 void MovToFloatResult(DoubleRegister src);
619
620 // See comments at the beginning of Builtins::Generate_CEntry.
621 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
622 inline void PrepareCEntryFunction(const ExternalReference& ref) {
623 li(a1, ref);
624 }
625
626 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
627 Label* condition_met);
628#undef COND_ARGS
629
630 // Performs a truncating conversion of a floating point number as used by
631 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
632 // Exits with 'result' holding the answer.
634 DoubleRegister double_input, StubCallMode stub_mode);
635
636 // Conditional move.
637 void Movz(Register rd, Register rs, Register rt);
638 void Movn(Register rd, Register rs, Register rt);
639 void Movt(Register rd, Register rs, uint16_t cc = 0);
640 void Movf(Register rd, Register rs, uint16_t cc = 0);
641
644
647
648 void Clz(Register rd, Register rs);
649 void Dclz(Register rd, Register rs);
650 void Ctz(Register rd, Register rs);
651 void Dctz(Register rd, Register rs);
654
655 // MIPS64 R2 instruction macro.
656 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
657 void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
658 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
659 void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
660 void ExtractBits(Register dest, Register source, Register pos, int size,
661 bool sign_extend = false);
662 void InsertBits(Register dest, Register source, Register pos, int size);
665
666 // MIPS64 R6 instruction macros.
667 void Bovc(Register rt, Register rs, Label* L);
668 void Bnvc(Register rt, Register rs, Label* L);
669
670 // Convert single to unsigned word.
673
674 // Change endianness
675 void ByteSwapSigned(Register dest, Register src, int operand_size);
676 void ByteSwapUnsigned(Register dest, Register src, int operand_size);
677
678 void Ulh(Register rd, const MemOperand& rs);
679 void Ulhu(Register rd, const MemOperand& rs);
680 void Ush(Register rd, const MemOperand& rs, Register scratch);
681
682 void Ulw(Register rd, const MemOperand& rs);
683 void Ulwu(Register rd, const MemOperand& rs);
684 void Usw(Register rd, const MemOperand& rs);
685
686 void Uld(Register rd, const MemOperand& rs);
687 void Usd(Register rd, const MemOperand& rs);
688
689 void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
690 void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
691
692 void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
693 void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
694
695 void Lb(Register rd, const MemOperand& rs);
696 void Lbu(Register rd, const MemOperand& rs);
697 void Sb(Register rd, const MemOperand& rs);
698
699 void Lh(Register rd, const MemOperand& rs);
700 void Lhu(Register rd, const MemOperand& rs);
701 void Sh(Register rd, const MemOperand& rs);
702
703 void Lw(Register rd, const MemOperand& rs);
704 void Lwu(Register rd, const MemOperand& rs);
705 void Sw(Register rd, const MemOperand& rs);
706
707 void Lwc1(FPURegister fd, const MemOperand& src);
708 void Swc1(FPURegister fs, const MemOperand& dst);
709
710 void Ldc1(FPURegister fd, const MemOperand& src);
711 void Sdc1(FPURegister fs, const MemOperand& dst);
712
713 void Ll(Register rd, const MemOperand& rs);
714 void Sc(Register rd, const MemOperand& rs);
715
716 void Lld(Register rd, const MemOperand& rs);
717 void Scd(Register rd, const MemOperand& rs);
718
719 // Perform a floating-point min or max operation with the
720 // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
721 // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
722 // handled in out-of-line code. The specific behaviour depends on supported
723 // instructions.
724 //
725 // These functions assume (and assert) that src1!=src2. It is permitted
726 // for the result to alias either input register.
728 Label* out_of_line);
730 Label* out_of_line);
732 Label* out_of_line);
734 Label* out_of_line);
735
736 // Generate out-of-line cases for the macros above.
741
742 bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
743
745
746 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
747
748 inline void Move(Register dst, Handle<HeapObject> handle) { li(dst, handle); }
749 inline void Move(Register dst, Tagged<Smi> value) { li(dst, Operand(value)); }
750
751 inline void Move(Register dst, Register src) {
752 if (dst != src) {
753 mov(dst, src);
754 }
755 }
756
757 inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
758
759 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
760 mfc1(dst_low, src);
761 mfhc1(dst_high, src);
762 }
763
764 inline void Move(Register dst, FPURegister src) { dmfc1(dst, src); }
765
766 inline void Move(FPURegister dst, Register src) { dmtc1(src, dst); }
767
768 inline void FmoveHigh(Register dst_high, FPURegister src) {
769 mfhc1(dst_high, src);
770 }
771
772 inline void FmoveHigh(FPURegister dst, Register src_high) {
773 mthc1(src_high, dst);
774 }
775
776 inline void FmoveLow(Register dst_low, FPURegister src) {
777 mfc1(dst_low, src);
778 }
779
780 void FmoveLow(FPURegister dst, Register src_low);
781
782 inline void Move(FPURegister dst, Register src_low, Register src_high) {
783 mtc1(src_low, dst);
784 mthc1(src_high, dst);
785 }
786
787 inline void Move_d(FPURegister dst, FPURegister src) {
788 if (dst != src) {
789 mov_d(dst, src);
790 }
791 }
792
793 inline void Move_s(FPURegister dst, FPURegister src) {
794 if (dst != src) {
795 mov_s(dst, src);
796 }
797 }
798
799 void Move(FPURegister dst, float imm) {
800 Move(dst, base::bit_cast<uint32_t>(imm));
801 }
802 void Move(FPURegister dst, double imm) {
803 Move(dst, base::bit_cast<uint64_t>(imm));
804 }
805 void Move(FPURegister dst, uint32_t src);
806 void Move(FPURegister dst, uint64_t src);
807
808 // DaddOverflow sets overflow register to a negative value if
809 // overflow occured, otherwise it is zero or positive
810 void DaddOverflow(Register dst, Register left, const Operand& right,
811 Register overflow);
812 // DsubOverflow sets overflow register to a negative value if
813 // overflow occured, otherwise it is zero or positive
814 void DsubOverflow(Register dst, Register left, const Operand& right,
815 Register overflow);
816 // [D]MulOverflow set overflow register to zero if no overflow occured
817 void MulOverflow(Register dst, Register left, const Operand& right,
818 Register overflow);
819 void DMulOverflow(Register dst, Register left, const Operand& right,
820 Register overflow);
821
822// Number of instructions needed for calculation of switch table entry address
823#ifdef _MIPS_ARCH_MIPS64R6
824 static const int kSwitchTablePrologueSize = 6;
825#else
826 static const int kSwitchTablePrologueSize = 11;
827#endif
828
829 // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
830 // functor/function with 'Label *func(size_t index)' declaration.
831 template <typename Func>
832 void GenerateSwitchTable(Register index, size_t case_count,
833 Func GetLabelFunction);
834
835 // Load an object from the root table.
838 Register src1, const Operand& src2);
839
841
842 void LoadFeedbackVector(Register dst, Register closure, Register scratch,
843 Label* fbv_undef);
844
845 // If the value is a NaN, canonicalize the value else, do nothing.
847
848 // ---------------------------------------------------------------------------
849 // FPU macros. These do not handle special cases like NaN or +- inf.
850
851 // Convert unsigned word to double.
853 void Cvt_d_uw(FPURegister fd, Register rs);
854
855 // Convert unsigned long to double.
858
859 // Convert unsigned word to float.
861 void Cvt_s_uw(FPURegister fd, Register rs);
862
863 // Convert unsigned long to float.
866
867 // Convert double to unsigned word.
870
871 // Convert double to unsigned long.
873 Register result = no_reg);
875 Register result = no_reg);
876
877 // Convert single to unsigned long.
879 Register result = no_reg);
881 Register result = no_reg);
882
883 // Round double functions
888
889 // Round float functions
894
895 void LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx, MemOperand src);
896 void StoreLane(MSASize sz, MSARegister src, uint8_t laneidx, MemOperand dst);
898 MSARegister src2);
900 MSARegister src2);
905
906 // Jump the register contains a smi.
907 void JumpIfSmi(Register value, Label* smi_label,
908 BranchDelaySlot bd = PROTECT);
909
910 void JumpIfEqual(Register a, int32_t b, Label* dest) {
911 li(kScratchReg, Operand(b));
912 Branch(dest, eq, a, Operand(kScratchReg));
913 }
914
915 void JumpIfLessThan(Register a, int32_t b, Label* dest) {
916 li(kScratchReg, Operand(b));
917 Branch(dest, lt, a, Operand(kScratchReg));
918 }
919
920 // Push a standard frame, consisting of ra, fp, context and JS function.
921 void PushStandardFrame(Register function_reg);
922
923 // Get the actual activation frame alignment for target environment.
925
926 // Load Scaled Address instructions. Parameter sa (shift argument) must be
927 // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
928 // may be clobbered.
929 void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
930 Register scratch = at);
931 void Dlsa(Register rd, Register rs, Register rt, uint8_t sa,
932 Register scratch = at);
933
934 // Compute the start of the generated instruction stream from the current PC.
935 // This is an alternative to embedding the {CodeObject} handle as a reference.
937
938 // Control-flow integrity:
939
940 // Define a function entrypoint. This doesn't emit any code for this
941 // architecture, as control-flow integrity is not supported for it.
942 void CodeEntry() {}
943 // Define an exception handler.
945 // Define an exception handler and bind a label.
947
948 // Loads a field containing any tagged value and decompresses it if necessary.
949 // void LoadTaggedField(Register destination, const MemOperand&
950 // field_operand);
951
952 // It assumes that the arguments are located below the stack pointer.
953 void LoadReceiver(Register dest) { Ld(dest, MemOperand(sp, 0)); }
954 void StoreReceiver(Register rec) { Sd(rec, MemOperand(sp, 0)); }
955
956#ifdef V8_ENABLE_LEAPTIERING
957 // Load the entrypoint pointer of a JSDispatchTable entry.
958 void LoadEntrypointFromJSDispatchTable(Register destination,
959 Register dispatch_handle,
960 Register scratch);
961 void LoadParameterCountFromJSDispatchTable(Register destination,
962 Register dispatch_handle,
963 Register scratch);
964 void LoadEntrypointAndParameterCountFromJSDispatchTable(
965 Register entrypoint, Register parameter_count, Register dispatch_handle,
966 Register scratch);
967#endif // V8_ENABLE_LEAPTIERING
968
969 bool IsNear(Label* L, Condition cond, int rs_reg);
970
971 // Swap two registers. If the scratch register is omitted then a slightly
972 // less efficient form using xor instead of mov is emitted.
973 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
974
976 Register scratch,
977 Condition cond, Label* target);
979
980 void PushRoot(RootIndex index) {
981 UseScratchRegisterScope temps(this);
982 Register scratch = temps.Acquire();
983 LoadRoot(scratch, index);
984 Push(scratch);
985 }
986
987 // Compare the object in a register to a value and jump if they are equal.
988 void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
989 UseScratchRegisterScope temps(this);
990 Register scratch = temps.Acquire();
991 LoadRoot(scratch, index);
992 Branch(if_equal, eq, with, Operand(scratch));
993 }
994
995 // Compare the object in a register to a value and jump if they are not equal.
996 void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
997 UseScratchRegisterScope temps(this);
998 Register scratch = temps.Acquire();
999 LoadRoot(scratch, index);
1000 Branch(if_not_equal, ne, with, Operand(scratch));
1001 }
1002
1003 // Checks if value is in range [lower_limit, higher_limit] using a single
1004 // comparison.
1005 void JumpIfIsInRange(Register value, unsigned lower_limit,
1006 unsigned higher_limit, Label* on_in_range);
1007
1008 // ---------------------------------------------------------------------------
1009 // GC Support
1010
1011 // Notify the garbage collector that we wrote a pointer into an object.
1012 // |object| is the object being stored into, |value| is the object being
1013 // stored. value and scratch registers are clobbered by the operation.
1014 // The offset is the offset from the start of the object, not the offset from
1015 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
1016 void RecordWriteField(Register object, int offset, Register value,
1017 Register scratch, RAStatus ra_status,
1018 SaveFPRegsMode save_fp,
1019 SmiCheck smi_check = SmiCheck::kInline);
1020
1021 // For a given |object| notify the garbage collector that the slot |address|
1022 // has been written. |value| is the object being stored. The value and
1023 // address registers are clobbered by the operation.
1024 void RecordWrite(Register object, Register address, Register value,
1025 RAStatus ra_status, SaveFPRegsMode save_fp,
1026 SmiCheck smi_check = SmiCheck::kInline);
1027
1028 void Pref(int32_t hint, const MemOperand& rs);
1029
1030 // ---------------------------------------------------------------------------
1031 // Pseudo-instructions.
1032
1033 void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
1034 void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
1035
1036 // Convert double to unsigned long.
1038
1043
1048
1050 FPURegister scratch);
1052 FPURegister scratch);
1054 FPURegister scratch);
1056 FPURegister scratch);
1057
1058 // Enter exit frame.
1059 // stack_space - extra stack space.
1060 void EnterExitFrame(Register scratch, int stack_space,
1061 StackFrame::Type frame_type);
1062
1063 // Leave the current exit frame.
1065
1066 // Make sure the stack is aligned. Only emits code in debug mode.
1068
1069 // Load the global proxy from the current context.
1070 void LoadGlobalProxy(Register dst) {
1071 LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
1072 }
1073
1074 void LoadNativeContextSlot(Register dst, int index);
1075
1076 // Load the initial map from the global function. The registers
1077 // function and map can be the same, function is then overwritten.
1079 Register scratch);
1080
1081 // -------------------------------------------------------------------------
1082 // JavaScript invokes.
1083 // On function call, call into the debugger.
1085 Register expected_parameter_count_or_dispatch_handle,
1086 Register actual_parameter_count);
1087
1088 // The way we invoke JSFunctions differs depending on whether leaptiering is
1089 // enabled. As such, these functions exist in two variants. In the future,
1090 // leaptiering will be used on all platforms. At that point, the
1091 // non-leaptiering variants will disappear.
1092
1093#ifdef V8_ENABLE_LEAPTIERING
1094 // Invoke the JavaScript function in the given register. Changes the
1095 // current context to the context in the function before invoking.
1096 void InvokeFunction(Register function, Register actual_parameter_count,
1097 InvokeType type,
1098 ArgumentAdaptionMode argument_adaption_mode =
1100 // Invoke the JavaScript function in the given register.
1101 // Changes the current context to the context in the function before invoking.
1102 void InvokeFunctionWithNewTarget(Register function, Register new_target,
1103 Register actual_parameter_count,
1104 InvokeType type);
1105 // Invoke the JavaScript function code by either calling or jumping.
1106 void InvokeFunctionCode(Register function, Register new_target,
1107 Register actual_parameter_count, InvokeType type,
1108 ArgumentAdaptionMode argument_adaption_mode =
1110#else
1111 void InvokeFunction(Register function, Register expected_parameter_count,
1112 Register actual_parameter_count, InvokeType type);
1113
1114 // Invoke the JavaScript function in the given register. Changes the
1115 // current context to the context in the function before invoking.
1117 Register actual_parameter_count,
1118 InvokeType type);
1119 // Invoke the JavaScript function code by either calling or jumping.
1121 Register expected_parameter_count,
1122 Register actual_parameter_count, InvokeType type);
1123#endif
1124 // On function call, call into the debugger if necessary.
1125 // void CheckDebugHook(Register fun, Register new_target,
1126 // Register expected_parameter_count,
1127 // Register actual_parameter_count);
1128
1129 // Exception handling.
1130
1131 // Push a new stack handler and link into stack handler chain.
1133
1134 // Unlink the stack handler on top of the stack from the stack handler chain.
1135 // Must preserve the result register.
1137
1138 // -------------------------------------------------------------------------
1139 // Support functions.
1140
1141 void GetObjectType(Register function, Register map, Register type_reg);
1142
1144 InstanceType lower_limit, Register range);
1145
1146 // -------------------------------------------------------------------------
1147 // Runtime calls.
1148
1149 // Call a runtime routine.
1150 void CallRuntime(const Runtime::Function* f, int num_arguments);
1151
1152 // Convenience function: Same as above, but takes the fid instead.
1154 const Runtime::Function* function = Runtime::FunctionForId(fid);
1155 CallRuntime(function, function->nargs);
1156 }
1157
1158 // Convenience function: Same as above, but takes the fid instead.
1159 void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
1160 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
1161 }
1162
1163 // Convenience function: tail call a runtime routine (jump).
1165
1166 // Jump to the builtin routine.
1168 bool builtin_exit_frame = false);
1169
1170 // ---------------------------------------------------------------------------
1171 // In-place weak references.
1172 void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
1173
1174 // -------------------------------------------------------------------------
1175 // StatsCounter support.
1176
1177 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1178 Register scratch2) {
1179 if (!v8_flags.native_code_counters) return;
1180 EmitIncrementCounter(counter, value, scratch1, scratch2);
1181 }
1182 void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
1183 Register scratch2);
1184 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1185 Register scratch2) {
1186 if (!v8_flags.native_code_counters) return;
1187 EmitDecrementCounter(counter, value, scratch1, scratch2);
1188 }
1189 void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
1190 Register scratch2);
1191
1192 // -------------------------------------------------------------------------
1193 // Stack limit utilities
1194
1197 void StackOverflowCheck(Register num_args, Register scratch1,
1198 Register scratch2, Label* stack_overflow);
1199
1200 // ---------------------------------------------------------------------------
1201 // Smi utilities.
1202
1203 // Test if the register contains a smi.
1204 inline void SmiTst(Register value, Register scratch) {
1205 And(scratch, value, Operand(kSmiTagMask));
1206 }
1207
1208 // Jump if the register contains a non-smi.
1209 void JumpIfNotSmi(Register value, Label* not_smi_label,
1210 BranchDelaySlot bd = PROTECT);
1211
1212 // Abort execution if argument is not a Constructor, enabled via --debug-code.
1214
1215 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1217
1218 // Abort execution if argument is not a callable JSFunction, enabled via
1219 // --debug-code.
1221
1222 // Abort execution if argument is not a JSBoundFunction,
1223 // enabled via --debug-code.
1225
1226 // Abort execution if argument is not a JSGeneratorObject (or subclass),
1227 // enabled via --debug-code.
1229
1230 // Abort execution if argument is not undefined or an AllocationSite, enabled
1231 // via --debug-code.
1234
1235 // ---------------------------------------------------------------------------
1236 // Tiering support.
1242 Register closure, Register scratch1,
1243 Register scratch2);
1245#ifndef V8_ENABLE_LEAPTIERING
1247 Register flags, Register feedback_vector, CodeKind current_code_kind,
1248 Label* flags_need_processing);
1250 Register feedback_vector);
1251#endif // !V8_ENABLE_LEAPTIERING
1252 template <typename Field>
1254 Ext(dst, src, Field::kShift, Field::kSize);
1255 }
1256
1257 template <typename Field>
1259 DecodeField<Field>(reg, reg);
1260 }
1261
1262 protected:
1263 inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
1264 inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
1265
1266 private:
1267 bool has_double_zero_reg_set_ = false;
1268
1269 // Helper functions for generating invokes.
1270 void InvokePrologue(Register expected_parameter_count,
1271 Register actual_parameter_count, InvokeType type);
1272 // Performs a truncating conversion of a floating point number as used by
1273 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
1274 // succeeds, otherwise falls through if result is saturated. On return
1275 // 'result' either holds answer, or is clobbered on fall through.
1277 Label* done);
1278
1280 FPURegister cmp2);
1281
1283 FPURegister cmp2);
1284
1286 MSARegister wt, BranchDelaySlot bd = PROTECT);
1287
1289 Register function, int num_reg_arguments, int num_double_arguments,
1290 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
1291 Label* return_location = nullptr);
1292
1293 // TODO(mips) Reorder parameters so out parameters come last.
1294 bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
1295 bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
1296 Register* scratch, const Operand& rt);
1297
1301 Register rs, const Operand& rt);
1303 const Operand& rt, BranchDelaySlot bdslot);
1304 bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
1305 const Operand& rt, BranchDelaySlot bdslot);
1306
1309 BranchDelaySlot bdslot);
1310 void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
1311 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1313 Register rs, const Operand& rt);
1315 Register rs, const Operand& rt,
1316 BranchDelaySlot bdslot);
1318 Register rs, const Operand& rt,
1319 BranchDelaySlot bdslot);
1322
1323 template <typename RoundFunc>
1325 RoundFunc round);
1326
1327 template <typename RoundFunc>
1329 RoundFunc round);
1330
1331 // Push a fixed frame, consisting of ra, fp.
1332 void PushCommonFrame(Register marker_reg = no_reg);
1333
1335};
1336
1337template <typename Func>
1338void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
1339 Func GetLabelFunction) {
1340 // Ensure that dd-ed labels following this instruction use 8 bytes aligned
1341 // addresses.
1342 BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 +
1344 UseScratchRegisterScope temps(this);
1345 Register scratch = temps.Acquire();
1346 if (kArchVariant >= kMips64r6) {
1347 // Opposite of Align(8) as we have odd number of instructions in this case.
1348 if ((pc_offset() & 7) == 0) {
1349 nop();
1350 }
1351 addiupc(scratch, 5);
1352 Dlsa(scratch, scratch, index, kPointerSizeLog2);
1353 Ld(scratch, MemOperand(scratch));
1354 } else {
1355 Label here;
1356 Align(8);
1357 push(ra);
1358 bal(&here);
1359 dsll(scratch, index, kPointerSizeLog2); // Branch delay slot.
1360 bind(&here);
1361 daddu(scratch, scratch, ra);
1362 pop(ra);
1363 Ld(scratch, MemOperand(scratch, 6 * v8::internal::kInstrSize));
1364 }
1365 jr(scratch);
1366 nop(); // Branch delay slot nop.
1367 for (size_t index = 0; index < case_count; ++index) {
1368 dd(GetLabelFunction(index));
1369 }
1370}
1371
1372struct MoveCycleState {
1373 // List of scratch registers reserved for pending moves in a move cycle, and
1374 // which should therefore not be used as a temporary location by
1375 // {MoveToTempLocation}.
1377 // Available scratch registers during the move cycle resolution scope.
1378 std::optional<UseScratchRegisterScope> temps;
1379 // Scratch register picked by {MoveToTempLocation}.
1380 std::optional<Register> scratch_reg;
1381};
1382
1383// Provides access to exit frame parameters (GC-ed).
1385 // The slot at [sp] is reserved in all ExitFrames for storing the return
1386 // address before doing the actual call, it's necessary for frame iteration
1387 // (see StoreReturnAddressAndCall for details).
1388 static constexpr int kSPOffset = 1 * kSystemPointerSize;
1389 return MemOperand(sp, kSPOffset + offset);
1390}
1391
1392// Provides access to exit frame parameters (GC-ed).
1396}
1397
1398// Calls an API function. Allocates HandleScope, extracts returned value
1399// from handle and propagates exceptions. Clobbers C argument registers
1400// and C caller-saved registers. Restores context. On return removes
1401// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
1402// (GCed, includes the call JS arguments space and the additional space
1403// allocated for the fast call).
1404void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
1405 Register function_address,
1406 ExternalReference thunk_ref, Register thunk_arg,
1407 int slots_to_drop_on_return,
1408 MemOperand* argc_operand,
1409 MemOperand return_value_operand);
1410
1411} // namespace internal
1412} // namespace v8
1413
1414#define ACCESS_MASM(masm) masm->
1415
1416#endif // V8_CODEGEN_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
int16_t parameter_count
Definition builtins.cc:67
interpreter::OperandScale scale
Definition builtins.cc:44
Builtins::Kind kind
Definition builtins.cc:40
SourcePosition pos
void bal(int16_t offset)
void jr(Register target)
Simd128Register Simd128Register ra
void addiupc(Register rs, int32_t imm19)
void BlockTrampolinePoolFor(int instructions)
void daddu(Register rd, Register rs, Register rt)
void dsll(Register rd, Register rt, uint16_t sa)
static constexpr int kFixedSlotCountAboveFp
void Clz(Register rd, Register rs)
void Trunc_ul_d(Register rd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void JumpIfRoot(Register with, RootIndex index, Label *if_equal)
void Push(Register src1, Register src2, Register src3, Register src4, Register src5)
void GenerateSwitchTable(Register index, size_t case_count, Func GetLabelFunction)
void Ret(BranchDelaySlot bd, Condition cond=al, Register rs=zero_reg, const Operand &rt=Operand(zero_reg))
void Abort(AbortReason msg)
void CallCodeObject(Register code_data_container_object, CodeEntrypointTag tag)
void LoadStackLimit(Register destination, StackLimitKind kind)
void Cvt_s_ul(FPURegister fd, Register rs)
void Pop(Register src1, Register src2, Register src3)
void GetObjectType(Register function, Register map, Register type_reg)
void Trunc_ul_s(Register rd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void CallJSFunction(Register function_object, uint16_t argument_count)
void LiLower32BitHelper(Register rd, Operand j)
void Lbu(Register rd, const MemOperand &rs)
void BranchAndLinkShort(Label *L, BranchDelaySlot bdslot=PROTECT)
bool IsNear(Label *L, Condition cond, int rs_reg)
void BranchAndLinkLong(Label *L, BranchDelaySlot bdslot)
void LoadSplat(MSASize sz, MSARegister dst, MemOperand src)
int CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void JumpIfIsInRange(Register value, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend=false)
int CallCFunction(Register function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void Move(FPURegister dst, float imm)
void MultiPopFPU(DoubleRegList regs)
void RecordWriteField(Register object, int offset, Register value, Register scratch, RAStatus ra_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void Move(FPURegister dst, FPURegister src)
void Ctz(Register rd, Register rs)
void Swap(Register reg1, Register reg2, Register scratch=no_reg)
void Cvt_s_uw(FPURegister fd, FPURegister fs)
void Cvt_d_ul(FPURegister fd, Register rs)
void CallRuntime(Runtime::FunctionId fid)
void Move(FPURegister dst, Register src)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void MultiPushFPU(DoubleRegList regs)
bool BranchShortHelper(int16_t offset, Label *L, Condition cond, Register rs, const Operand &rt, BranchDelaySlot bdslot)
void LoadZeroIfFPUCondition(Register dest)
void Trunc_l_d(FPURegister fd, FPURegister fs)
void EnterFrame(StackFrame::Type type)
void Scd(Register rd, const MemOperand &rs)
void Dins(Register rt, Register rs, uint16_t pos, uint16_t size)
void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, FPURegister cmp2)
void Sh(Register rd, const MemOperand &rs)
void li(Register dst, Handle< HeapObject > value, LiFlags mode=OPTIMIZE_SIZE)
void Move(FPURegister dst, double imm)
void DecodeField(Register dst, Register src)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void Branch(Label *L, Condition cond, Register rs, RootIndex index, BranchDelaySlot bdslot=PROTECT)
void Call(Register target, COND_ARGS)
void Ceil_l_d(FPURegister fd, FPURegister fs)
void Uswc1(FPURegister fd, const MemOperand &rs, Register scratch)
void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal)
void Lb(Register rd, const MemOperand &rs)
void TailCallBuiltin(Builtin builtin, Condition cond, Register type, Operand range)
static int InstrCountForLi64Bit(int64_t value)
void Movz(Register rd, Register rs, Register rt)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void Neg_s(FPURegister fd, FPURegister fs)
void TestCodeIsMarkedForDeoptimizationAndJump(Register code_data_container, Register scratch, Condition cond, Label *target)
void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch)
void PushStandardFrame(Register function_reg)
void Floor_l_d(FPURegister fd, FPURegister fs)
void li(Register dst, ExternalReference value, LiFlags mode=OPTIMIZE_SIZE)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void Swc1(FPURegister fs, const MemOperand &dst)
void Move(Register dst, Register src)
void Move(Register dst, FPURegister src)
void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot=PROTECT)
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS)
void JumpIfEqual(Register a, int32_t b, Label *dest)
void li_optimized(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void mov(Register rd, Register rt)
void StoreReturnAddressAndCall(Register target)
void Move(Register dst, Tagged< Smi > value)
void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, FPURegister scratch)
void LoadZeroIfConditionZero(Register dest, Register condition)
void FmoveLow(FPURegister dst, Register src_low)
void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch)
void LeaveExitFrame(Register scratch)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
void LoadRootRelative(Register destination, int32_t offset) final
void Round_d_d(FPURegister fd, FPURegister fs)
static int ActivationFrameAlignment()
void Push(Handle< HeapObject > handle)
void PrepareCallCFunction(int num_reg_arguments, Register scratch)
void Cvt_s_ul(FPURegister fd, FPURegister fs)
void MultiPush(RegList regs)
void CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode)
void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2)
void Ceil_d_d(FPURegister fd, FPURegister fs)
void Lwu(Register rd, const MemOperand &rs)
void Jump(Register target, COND_ARGS)
int CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void CompareF32(FPUCondition cc, FPURegister cmp1, FPURegister cmp2)
void FmoveHigh(Register dst_high, FPURegister src)
void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE
void CheckDebugHook(Register fun, Register new_target, Register expected_parameter_count_or_dispatch_handle, Register actual_parameter_count)
void SmiUntag(Register dst, Register src)
void Move(FPURegister dst, Register src_low, Register src_high)
void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, FPURegister scratch)
void Neg_d(FPURegister fd, FPURegister fs)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void Trunc_w_d(FPURegister fd, FPURegister fs)
void Ulwu(Register rd, const MemOperand &rs)
void BranchTrueShortF(Label *target, BranchDelaySlot bd=PROTECT)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
bool BranchAndLinkShortCheck(int32_t offset, Label *L, Condition cond, Register rs, const Operand &rt, BranchDelaySlot bdslot)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Cvt_d_ul(FPURegister fd, FPURegister fs)
void MultiPushMSA(DoubleRegList regs)
int CallCFunction(Register function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void TailCallBuiltin(Builtin builtin)
void Bnvc(Register rt, Register rs, Label *L)
void LoadAddressPCRelative(Register dst, Label *target)
void LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void Sd(Register rd, const MemOperand &rs)
void BranchShortHelper(int16_t offset, Label *L, BranchDelaySlot bdslot)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure, Register scratch1, Register scratch2)
void Lwc1(FPURegister fd, const MemOperand &src)
void RecordWrite(Register object, Register address, Register value, RAStatus ra_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void Move_d(FPURegister dst, FPURegister src)
void Jump(const ExternalReference &reference)
MemOperand ExternalReferenceAsOperand(IsolateFieldId id)
void Trunc_s_s(FPURegister fd, FPURegister fs)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void DaddOverflow(Register dst, Register left, const Operand &right, Register overflow)
void Uldc1(FPURegister fd, const MemOperand &rs, Register scratch)
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode, RoundFunc round)
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode, RoundFunc round)
void Push(Register src1, Register src2, Register src3)
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS)
void BranchLong(Label *L, BranchDelaySlot bdslot)
void Move(Register dst, Handle< HeapObject > handle)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void Drop(int count, Condition cond=cc_always, Register reg=no_reg, const Operand &op=Operand(no_reg))
void SmiTag(Register dst, Register src)
void CompareWord(Condition cond, Register dst, Register lhs, const Operand &rhs)
void Check(Condition cc, AbortReason reason, Register rs, Operand rt)
void CompareF64(FPUCondition cc, FPURegister cmp1, FPURegister cmp2)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void CallBuiltin(Builtin builtin)
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
bool BranchShortHelperR6(int32_t offset, Label *L, Condition cond, Register rs, const Operand &rt)
void Move(FPURegister dst, uint64_t src)
void BranchFalseF(Label *target, BranchDelaySlot bd=PROTECT)
void PushCommonFrame(Register marker_reg=no_reg)
void Lsa(Register rd, Register rs, Register rt, uint8_t sa, Register scratch=at)
void LeaveFrame(StackFrame::Type type)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void Movt(Register rd, Register rs, uint16_t cc=0)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void Sw(Register rd, const MemOperand &rs)
void MultiPopMSA(DoubleRegList regs)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
void LoadZeroIfNotFPUCondition(Register dest)
void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
Operand ClearedValue() const
void StackOverflowCheck(Register num_args, Register scratch1, Register scratch2, Label *stack_overflow)
void BranchAndLinkShortHelper(int16_t offset, Label *L, BranchDelaySlot bdslot)
void MulOverflow(Register dst, Register left, const Operand &right, Register overflow)
void Lhu(Register rd, const MemOperand &rs)
void InsertBits(Register dest, Register source, Register pos, int size)
void BranchLong(int32_t offset, BranchDelaySlot bdslot=PROTECT)
void FmoveHigh(FPURegister dst, Register src_high)
void BranchTrueF(Label *target, BranchDelaySlot bd=PROTECT)
void JumpIfNotSmi(Register value, Label *not_smi_label, BranchDelaySlot bd=PROTECT)
void MultiPop(RegList regs)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void li(Register rd, int64_t j, LiFlags mode=OPTIMIZE_SIZE)
bool BranchAndLinkShortHelper(int16_t offset, Label *L, Condition cond, Register rs, const Operand &rt, BranchDelaySlot bdslot)
void FmoveLow(Register dst_low, FPURegister src)
void Push(Register src, Condition cond, Register tst1, Register tst2)
void ExtMulHigh(MSADataType type, MSARegister dst, MSARegister src1, MSARegister src2)
void LoadRoot(Register destination, RootIndex index) final
void Trunc_d_d(FPURegister fd, FPURegister fs)
void BranchShortMSA(MSABranchDF df, Label *target, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd=PROTECT)
void LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx, MemOperand src)
void Popcnt(Register rd, Register rs)
void Pop(Register src1, Register src2)
void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Floor_d_d(FPURegister fd, FPURegister fs)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DoubleRegister double_input, StubCallMode stub_mode)
void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
void Ceil_w_d(FPURegister fd, FPURegister fs)
void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void Round_s_s(FPURegister fd, FPURegister fs)
void PushArray(Register array, Register size, Register scratch, Register scratch2, PushArrayOrder order=kNormal)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size)
void MSARoundD(MSARegister dst, MSARegister src, FPURoundingMode mode)
void Call(Handle< Code > code, RelocInfo::Mode rmode=RelocInfo::CODE_TARGET, COND_ARGS)
void LoadZeroIfConditionNotZero(Register dest, Register condition)
void Dlsa(Register rd, Register rs, Register rt, uint8_t sa, Register scratch=at)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void LoadFromConstantsTable(Register destination, int constant_index) final
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void Sdc1(FPURegister fs, const MemOperand &dst)
void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE
void PrepareCEntryFunction(const ExternalReference &ref)
void ComputeCodeStartAddress(Register dst)
void MaybeSaveRegisters(RegList registers)
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size)
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler)
void ExtAddPairwise(MSADataType type, MSARegister dst, MSARegister src)
void LoadWordPair(Register rd, const MemOperand &rs, Register scratch=at)
void Lld(Register rd, const MemOperand &rs)
void Jump(Handle< Code > code, RelocInfo::Mode rmode, COND_ARGS)
void SmiTst(Register value, Register scratch)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void SbxCheck(Condition cc, AbortReason reason, Register rj, Operand rk)
void Lw(Register rd, const MemOperand &rs)
void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, FPURegister scratch)
void BranchShortHelperR6(int32_t offset, Label *L)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
bool BranchShortCheck(int32_t offset, Label *L, Condition cond, Register rs, const Operand &rt, BranchDelaySlot bdslot)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void ByteSwapUnsigned(Register dest, Register src, int operand_size)
void Lh(Register rd, const MemOperand &rs)
void Usdc1(FPURegister fd, const MemOperand &rs, Register scratch)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg)
void Assert(Condition cc, AbortReason reason, Register rs, Operand rt) NOOP_UNLESS_DEBUG_CODE
void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, FPURegister scratch)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void Push(Tagged< Smi > smi)
void Cvt_d_uw(FPURegister fd, FPURegister fs)
void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2)
void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, Label *out_of_line)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void Sc(Register rd, const MemOperand &rs)
void Call(Label *target)
void Bovc(Register rt, Register rs, Label *L)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2)
void MSARoundW(MSARegister dst, MSARegister src, FPURoundingMode mode)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void JumpIfLessThan(Register a, int32_t b, Label *dest)
void Move(Register output, MemOperand operand)
void Dpopcnt(Register rd, Register rs)
void Ll(Register rd, const MemOperand &rs)
void Ulwc1(FPURegister fd, const MemOperand &rs, Register scratch)
void Round_l_d(FPURegister fd, FPURegister fs)
void DMulOverflow(Register dst, Register left, const Operand &right, Register overflow)
void Push(Register src1, Register src2, Register src3, Register src4)
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void LoadAddress(Register dst, Label *target)
void StoreWordPair(Register rd, const MemOperand &rs, Register scratch=at)
void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch, Register result=no_reg)
void Sb(Register rd, const MemOperand &rs)
void Ld(Register rd, const MemOperand &rs)
void Ceil_s_s(FPURegister fd, FPURegister fs)
void GetInstanceTypeRange(Register map, Register type_reg, InstanceType lower_limit, Register range)
void Movn(Register rd, Register rs, Register rt)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void Movf(Register rd, Register rs, uint16_t cc=0)
void LoadIsolateField(Register dst, IsolateFieldId id)
void LoadRoot(Register destination, RootIndex index, Condition cond, Register src1, const Operand &src2)
void Trunc_uw_d(Register rd, FPURegister fs, FPURegister scratch)
void JumpIfSmi(Register value, Label *smi_label, BranchDelaySlot bd=PROTECT)
void Move_s(FPURegister dst, FPURegister src)
void ByteSwapSigned(Register dest, Register src, int operand_size)
void MaybeRestoreRegisters(RegList registers)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Floor_s_s(FPURegister fd, FPURegister fs)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, Register scratch)
void Dclz(Register rd, Register rs)
void BranchAndLinkShortHelperR6(int32_t offset, Label *L)
void Dctz(Register rd, Register rs)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1, FPURegister cmp2)
void Pref(int32_t hint, const MemOperand &rs)
void Round_w_d(FPURegister fd, FPURegister fs)
void BranchMSA(Label *target, MSABranchDF df, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd=PROTECT)
void DsubOverflow(Register dst, Register left, const Operand &right, Register overflow)
void StubPrologue(StackFrame::Type type)
void Trunc_uw_s(Register rd, FPURegister fs, FPURegister scratch)
void StoreLane(MSASize sz, MSARegister src, uint8_t laneidx, MemOperand dst)
void StoreRootRelative(int32_t offset, Register value) final
void Move(FPURegister dst, uint32_t src)
void Push(Register src1, Register src2)
void LoadMap(Register destination, Register object)
void SmiUntag(Register dst, const MemOperand &src)
void AssertStackIsAligned() NOOP_UNLESS_DEBUG_CODE
void TailCallRuntime(Runtime::FunctionId fid)
void ExtMulLow(MSADataType type, MSARegister dst, MSARegister src1, MSARegister src2)
void BranchFalseShortF(Label *target, BranchDelaySlot bd=PROTECT)
void CallRuntime(Runtime::FunctionId fid, int num_arguments)
void Dext(Register rt, Register rs, uint16_t pos, uint16_t size)
void LoadNativeContextSlot(Register dst, int index)
void CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, Label *done)
bool BranchAndLinkShortHelperR6(int32_t offset, Label *L, Condition cond, Register rs, const Operand &rt)
void Floor_w_d(FPURegister fd, FPURegister fs)
void Move(Register dst_low, Register dst_high, FPURegister src)
void Ldc1(FPURegister fd, const MemOperand &src)
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label *condition_met)
void JumpCodeObject(Register code_data_container_object, CodeEntrypointTag tag, JumpMode jump_mode=JumpMode::kJump)
void DropArguments(Register count)
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void LoadCodeInstructionStart(Register destination, Register code_data_container_object, CodeEntrypointTag tag)
void SmiScale(Register dst, Register src, int scale)
void Ush(Register rd, const MemOperand &rs, Register scratch)
#define kScratchReg
#define NOOP_UNLESS_DEBUG_CODE
Definition assembler.h:628
@ kMips64r6
static const ArchVariants kArchVariant
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
Isolate * isolate
int32_t offset
TNode< Object > receiver
ZoneVector< RpoNumber > & result
LiftoffRegister reg
uint32_t const mask
#define DEFINE_INSTRUCTION2(instr)
#define COND_ARGS
#define DEFINE_INSTRUCTION(instr)
#define DECLARE_BRANCH_PROTOTYPES(Name)
SmiCheck
ArgumentAdaptionMode
InvokeType
SetIsolateDataSlots
JumpMode
RegListBase< RegisterT > registers
#define D(Name)
Definition maglev-ir.h:6426
InstructionOperand destination
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr Register kRootRegister
constexpr int kPointerSizeLog2
Definition globals.h:600
MemOperand ExitFrameCallerStackSlotOperand(int index)
const int kSmiTagSize
Definition v8-internal.h:87
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr int kPointerSize
Definition globals.h:599
constexpr int L
constexpr int kSmiShift
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
MemOperand CFunctionArgumentOperand(int index)
constexpr int S
constexpr bool SmiValuesAre31Bits()
const int kCArgsSlotsSize
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
const intptr_t kSmiTagMask
Definition v8-internal.h:88
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
constexpr uint8_t kInstrSize
const int kSmiTag
Definition v8-internal.h:86
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
MemOperand ExitFrameStackSlotOperand(int offset)
const int kCArgSlotCount
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define V8_EXPORT_PRIVATE
Definition macros.h:460
std::optional< CPURegister > scratch_reg
std::optional< UseScratchRegisterScope > temps