v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-arm64.h
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
6#define V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
7
8#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
9#error This header must be included via macro-assembler.h
10#endif
11
12#include <optional>
13
14#include "src/base/bits.h"
17#include "src/common/globals.h"
19
20// Simulator specific helpers.
21#if USE_SIMULATOR
22#if DEBUG
23#define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
24#define ASM_LOCATION_IN_ASSEMBLER(message) \
25 Debug("LOCATION: " message, __LINE__, NO_PARAM)
26#else
27#define ASM_LOCATION(message)
28#define ASM_LOCATION_IN_ASSEMBLER(message)
29#endif
30#else
31#define ASM_LOCATION(message)
32#define ASM_LOCATION_IN_ASSEMBLER(message)
33#endif
34
35namespace v8 {
36namespace internal {
37
38namespace wasm {
39class JumpTableAssembler;
40}
41
42#define LS_MACRO_LIST(V) \
43 V(Ldrb, Register&, rt, LDRB_w) \
44 V(Strb, Register&, rt, STRB_w) \
45 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
46 V(Ldrh, Register&, rt, LDRH_w) \
47 V(Strh, Register&, rt, STRH_w) \
48 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
49 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
50 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
51 V(Ldrsw, Register&, rt, LDRSW_x)
52
53#define LSPAIR_MACRO_LIST(V) \
54 V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
55 V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
56 V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
57
58#define LDA_STL_MACRO_LIST(V) \
59 V(Ldarb, ldarb) \
60 V(Ldarh, ldarh) \
61 V(Ldar, ldar) \
62 V(Ldaxrb, ldaxrb) \
63 V(Ldaxrh, ldaxrh) \
64 V(Ldaxr, ldaxr) \
65 V(Stlrb, stlrb) \
66 V(Stlrh, stlrh) \
67 V(Stlr, stlr)
68
69#define STLX_MACRO_LIST(V) \
70 V(Stlxrb, stlxrb) \
71 V(Stlxrh, stlxrh) \
72 V(Stlxr, stlxr)
73
74#define CAS_SINGLE_MACRO_LIST(V) \
75 V(Cas, cas) \
76 V(Casa, casa) \
77 V(Casl, casl) \
78 V(Casal, casal) \
79 V(Casb, casb) \
80 V(Casab, casab) \
81 V(Caslb, caslb) \
82 V(Casalb, casalb) \
83 V(Cash, cash) \
84 V(Casah, casah) \
85 V(Caslh, caslh) \
86 V(Casalh, casalh)
87
88#define CAS_PAIR_MACRO_LIST(V) \
89 V(Casp, casp) \
90 V(Caspa, caspa) \
91 V(Caspl, caspl) \
92 V(Caspal, caspal)
93
94// These macros generate all the variations of the atomic memory operations,
95// e.g. ldadd, ldadda, ldaddb, staddl, etc.
96
97#define ATOMIC_MEMORY_SIMPLE_MACRO_LIST(V, DEF, MASM_PRE, ASM_PRE) \
98 V(DEF, MASM_PRE##add, ASM_PRE##add) \
99 V(DEF, MASM_PRE##clr, ASM_PRE##clr) \
100 V(DEF, MASM_PRE##eor, ASM_PRE##eor) \
101 V(DEF, MASM_PRE##set, ASM_PRE##set) \
102 V(DEF, MASM_PRE##smax, ASM_PRE##smax) \
103 V(DEF, MASM_PRE##smin, ASM_PRE##smin) \
104 V(DEF, MASM_PRE##umax, ASM_PRE##umax) \
105 V(DEF, MASM_PRE##umin, ASM_PRE##umin)
106
107#define ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \
108 V(MASM, ASM) \
109 V(MASM##l, ASM##l) \
110 V(MASM##b, ASM##b) \
111 V(MASM##lb, ASM##lb) \
112 V(MASM##h, ASM##h) \
113 V(MASM##lh, ASM##lh)
114
115#define ATOMIC_MEMORY_LOAD_MACRO_MODES(V, MASM, ASM) \
116 ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \
117 V(MASM##a, ASM##a) \
118 V(MASM##al, ASM##al) \
119 V(MASM##ab, ASM##ab) \
120 V(MASM##alb, ASM##alb) \
121 V(MASM##ah, ASM##ah) \
122 V(MASM##alh, ASM##alh)
123
124// ----------------------------------------------------------------------------
125// Static helper functions
126
127// Generate a MemOperand for loading a field from an object.
128inline MemOperand FieldMemOperand(Register object, int offset);
129
130// ----------------------------------------------------------------------------
131// MacroAssembler
132
134 // Copies of architectural conditions.
135 // The associated conditions can be used in place of those, the code will
136 // take care of reinterpreting them with the correct type.
153
154 // These two are *different* from the architectural codes al and nv.
155 // 'always' is used to generate unconditional branches.
156 // 'never' is used to not generate a branch (generally as the inverse
157 // branch type of 'always).
160 // cbz and cbnz
163 // tbz and tbnz
166
167 // Aliases.
173
176 return static_cast<BranchType>(
177 NegateCondition(static_cast<Condition>(type)));
178 } else {
179 return static_cast<BranchType>(type ^ 1);
180 }
181}
182
185
186// The macro assembler supports moving automatically pre-shifted immediates for
187// arithmetic and logical instructions, and then applying a post shift in the
188// instruction to undo the modification, in order to reduce the code emitted for
189// an operation. For example:
190//
191// Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
192//
193// This optimisation can be only partially applied when the stack pointer is an
194// operand or destination, so this enumeration is used to control the shift.
196 kNoShift, // Don't pre-shift.
197 kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
198 kAnyShift // Allow any pre-shift.
200
201// TODO(victorgomes): Move definition to macro-assembler.h, once all other
202// platforms are updated.
204
205class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
206 public:
207 using MacroAssemblerBase::MacroAssemblerBase;
208
209#if DEBUG
210 void set_allow_macro_instructions(bool value) {
211 allow_macro_instructions_ = value;
212 }
213 bool allow_macro_instructions() const { return allow_macro_instructions_; }
214#endif
215
216 // We should not use near calls or jumps for calls to external references,
217 // since the code spaces are not guaranteed to be close to each other.
219 return rmode != RelocInfo::EXTERNAL_REFERENCE;
220 }
221
222 static bool IsNearCallOffset(int64_t offset);
223
224 // Activation support.
225 void EnterFrame(StackFrame::Type type);
226 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
227 // Out-of-line constant pool not implemented on arm64.
228 UNREACHABLE();
229 }
231
233
234 void Mov(const Register& rd, const Operand& operand,
235 DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
236 void Mov(const Register& rd, uint64_t imm);
237 void Mov(const Register& rd, ExternalReference reference);
238 void LoadIsolateField(const Register& rd, IsolateFieldId id);
239 void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
240 int vn_index) {
241 DCHECK(allow_macro_instructions());
242 mov(vd, vd_index, vn, vn_index);
243 }
244 void Mov(const Register& rd, Tagged<Smi> smi);
245 void Mov(const VRegister& vd, const VRegister& vn, int index) {
246 DCHECK(allow_macro_instructions());
247 mov(vd, vn, index);
248 }
249 void Mov(const VRegister& vd, int vd_index, const Register& rn) {
250 DCHECK(allow_macro_instructions());
251 mov(vd, vd_index, rn);
252 }
253 void Mov(const Register& rd, const VRegister& vn, int vn_index) {
254 DCHECK(allow_macro_instructions());
255 mov(rd, vn, vn_index);
256 }
257
258 // These are required for compatibility with architecture independent code.
259 // Remove if not needed.
260 void Move(Register dst, Tagged<Smi> src);
261 void Move(Register dst, MemOperand src);
262 void Move(Register dst, Register src);
263
264 // Move src0 to dst0 and src1 to dst1, handling possible overlaps.
265 void MovePair(Register dst0, Register src0, Register dst1, Register src1);
266
267 // Register swap. Note that the register operands should be distinct.
268 void Swap(Register lhs, Register rhs);
269 void Swap(VRegister lhs, VRegister rhs);
270
271// NEON by element instructions.
272#define NEON_BYELEMENT_MACRO_LIST(V) \
273 V(fmla, Fmla) \
274 V(fmls, Fmls) \
275 V(fmul, Fmul) \
276 V(fmulx, Fmulx) \
277 V(mul, Mul) \
278 V(mla, Mla) \
279 V(mls, Mls) \
280 V(sqdmulh, Sqdmulh) \
281 V(sqrdmulh, Sqrdmulh) \
282 V(sqdmull, Sqdmull) \
283 V(sqdmull2, Sqdmull2) \
284 V(sqdmlal, Sqdmlal) \
285 V(sqdmlal2, Sqdmlal2) \
286 V(sqdmlsl, Sqdmlsl) \
287 V(sqdmlsl2, Sqdmlsl2) \
288 V(smull, Smull) \
289 V(smull2, Smull2) \
290 V(smlal, Smlal) \
291 V(smlal2, Smlal2) \
292 V(smlsl, Smlsl) \
293 V(smlsl2, Smlsl2) \
294 V(umull, Umull) \
295 V(umull2, Umull2) \
296 V(umlal, Umlal) \
297 V(umlal2, Umlal2) \
298 V(umlsl, Umlsl) \
299 V(umlsl2, Umlsl2)
300
301#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
302 void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm, \
303 int vm_index) { \
304 DCHECK(allow_macro_instructions()); \
305 ASM(vd, vn, vm, vm_index); \
306 }
308#undef DEFINE_MACRO_ASM_FUNC
309
310// NEON 2 vector register instructions.
311#define NEON_2VREG_MACRO_LIST(V) \
312 V(abs, Abs) \
313 V(addp, Addp) \
314 V(addv, Addv) \
315 V(cls, Cls) \
316 V(clz, Clz) \
317 V(cnt, Cnt) \
318 V(faddp, Faddp) \
319 V(fcvtas, Fcvtas) \
320 V(fcvtau, Fcvtau) \
321 V(fcvtl, Fcvtl) \
322 V(fcvtms, Fcvtms) \
323 V(fcvtmu, Fcvtmu) \
324 V(fcvtn, Fcvtn) \
325 V(fcvtns, Fcvtns) \
326 V(fcvtnu, Fcvtnu) \
327 V(fcvtps, Fcvtps) \
328 V(fcvtpu, Fcvtpu) \
329 V(fmaxnmp, Fmaxnmp) \
330 V(fmaxnmv, Fmaxnmv) \
331 V(fmaxp, Fmaxp) \
332 V(fmaxv, Fmaxv) \
333 V(fminnmp, Fminnmp) \
334 V(fminnmv, Fminnmv) \
335 V(fminp, Fminp) \
336 V(fminv, Fminv) \
337 V(fneg, Fneg) \
338 V(frecpe, Frecpe) \
339 V(frecpx, Frecpx) \
340 V(frinta, Frinta) \
341 V(frinti, Frinti) \
342 V(frintm, Frintm) \
343 V(frintn, Frintn) \
344 V(frintp, Frintp) \
345 V(frintx, Frintx) \
346 V(frintz, Frintz) \
347 V(frsqrte, Frsqrte) \
348 V(fsqrt, Fsqrt) \
349 V(mov, Mov) \
350 V(mvn, Mvn) \
351 V(neg, Neg) \
352 V(not_, Not) \
353 V(rbit, Rbit) \
354 V(rev16, Rev16) \
355 V(rev32, Rev32) \
356 V(rev64, Rev64) \
357 V(sadalp, Sadalp) \
358 V(saddlp, Saddlp) \
359 V(saddlv, Saddlv) \
360 V(smaxv, Smaxv) \
361 V(sminv, Sminv) \
362 V(sqabs, Sqabs) \
363 V(sqneg, Sqneg) \
364 V(sqxtn2, Sqxtn2) \
365 V(sqxtn, Sqxtn) \
366 V(sqxtun2, Sqxtun2) \
367 V(sqxtun, Sqxtun) \
368 V(suqadd, Suqadd) \
369 V(sxtl2, Sxtl2) \
370 V(sxtl, Sxtl) \
371 V(uadalp, Uadalp) \
372 V(uaddlp, Uaddlp) \
373 V(uaddlv, Uaddlv) \
374 V(umaxv, Umaxv) \
375 V(uminv, Uminv) \
376 V(uqxtn2, Uqxtn2) \
377 V(uqxtn, Uqxtn) \
378 V(urecpe, Urecpe) \
379 V(ursqrte, Ursqrte) \
380 V(usqadd, Usqadd) \
381 V(uxtl2, Uxtl2) \
382 V(uxtl, Uxtl) \
383 V(xtn2, Xtn2) \
384 V(xtn, Xtn)
385
386#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
387 void MASM(const VRegister& vd, const VRegister& vn) { \
388 DCHECK(allow_macro_instructions()); \
389 ASM(vd, vn); \
390 }
392#undef DEFINE_MACRO_ASM_FUNC
393#undef NEON_2VREG_MACRO_LIST
394
395// NEON 2 vector register with immediate instructions.
396#define NEON_2VREG_FPIMM_MACRO_LIST(V) \
397 V(fcmeq, Fcmeq) \
398 V(fcmge, Fcmge) \
399 V(fcmgt, Fcmgt) \
400 V(fcmle, Fcmle) \
401 V(fcmlt, Fcmlt)
402
403#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
404 void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
405 DCHECK(allow_macro_instructions()); \
406 ASM(vd, vn, imm); \
407 }
409#undef DEFINE_MACRO_ASM_FUNC
410
411// NEON 3 vector register instructions.
412#define NEON_3VREG_MACRO_LIST(V) \
413 V(add, Add) \
414 V(addhn2, Addhn2) \
415 V(addhn, Addhn) \
416 V(addp, Addp) \
417 V(and_, And) \
418 V(bic, Bic) \
419 V(bif, Bif) \
420 V(bit, Bit) \
421 V(bsl, Bsl) \
422 V(cmeq, Cmeq) \
423 V(cmge, Cmge) \
424 V(cmgt, Cmgt) \
425 V(cmhi, Cmhi) \
426 V(cmhs, Cmhs) \
427 V(cmtst, Cmtst) \
428 V(eor, Eor) \
429 V(fabd, Fabd) \
430 V(facge, Facge) \
431 V(facgt, Facgt) \
432 V(faddp, Faddp) \
433 V(fcmeq, Fcmeq) \
434 V(fcmge, Fcmge) \
435 V(fcmgt, Fcmgt) \
436 V(fmaxnmp, Fmaxnmp) \
437 V(fmaxp, Fmaxp) \
438 V(fminnmp, Fminnmp) \
439 V(fminp, Fminp) \
440 V(fmla, Fmla) \
441 V(fmls, Fmls) \
442 V(fmulx, Fmulx) \
443 V(fnmul, Fnmul) \
444 V(frecps, Frecps) \
445 V(frsqrts, Frsqrts) \
446 V(mla, Mla) \
447 V(mls, Mls) \
448 V(mul, Mul) \
449 V(orn, Orn) \
450 V(orr, Orr) \
451 V(pmull2, Pmull2) \
452 V(pmull, Pmull) \
453 V(pmul, Pmul) \
454 V(raddhn2, Raddhn2) \
455 V(raddhn, Raddhn) \
456 V(rsubhn2, Rsubhn2) \
457 V(rsubhn, Rsubhn) \
458 V(sabal2, Sabal2) \
459 V(sabal, Sabal) \
460 V(saba, Saba) \
461 V(sabdl2, Sabdl2) \
462 V(sabdl, Sabdl) \
463 V(sabd, Sabd) \
464 V(saddl2, Saddl2) \
465 V(saddl, Saddl) \
466 V(saddw2, Saddw2) \
467 V(saddw, Saddw) \
468 V(sdot, Sdot) \
469 V(shadd, Shadd) \
470 V(shsub, Shsub) \
471 V(smaxp, Smaxp) \
472 V(smax, Smax) \
473 V(sminp, Sminp) \
474 V(smin, Smin) \
475 V(smlal2, Smlal2) \
476 V(smlal, Smlal) \
477 V(smlsl2, Smlsl2) \
478 V(smlsl, Smlsl) \
479 V(smull2, Smull2) \
480 V(smull, Smull) \
481 V(sqadd, Sqadd) \
482 V(sqdmlal2, Sqdmlal2) \
483 V(sqdmlal, Sqdmlal) \
484 V(sqdmlsl2, Sqdmlsl2) \
485 V(sqdmlsl, Sqdmlsl) \
486 V(sqdmulh, Sqdmulh) \
487 V(sqdmull2, Sqdmull2) \
488 V(sqdmull, Sqdmull) \
489 V(sqrdmulh, Sqrdmulh) \
490 V(sqrshl, Sqrshl) \
491 V(sqshl, Sqshl) \
492 V(sqsub, Sqsub) \
493 V(srhadd, Srhadd) \
494 V(srshl, Srshl) \
495 V(sshl, Sshl) \
496 V(ssubl2, Ssubl2) \
497 V(ssubl, Ssubl) \
498 V(ssubw2, Ssubw2) \
499 V(ssubw, Ssubw) \
500 V(subhn2, Subhn2) \
501 V(subhn, Subhn) \
502 V(sub, Sub) \
503 V(trn1, Trn1) \
504 V(trn2, Trn2) \
505 V(uabal2, Uabal2) \
506 V(uabal, Uabal) \
507 V(uaba, Uaba) \
508 V(uabdl2, Uabdl2) \
509 V(uabdl, Uabdl) \
510 V(uabd, Uabd) \
511 V(uaddl2, Uaddl2) \
512 V(uaddl, Uaddl) \
513 V(uaddw2, Uaddw2) \
514 V(uaddw, Uaddw) \
515 V(uhadd, Uhadd) \
516 V(uhsub, Uhsub) \
517 V(umaxp, Umaxp) \
518 V(umax, Umax) \
519 V(uminp, Uminp) \
520 V(umin, Umin) \
521 V(umlal2, Umlal2) \
522 V(umlal, Umlal) \
523 V(umlsl2, Umlsl2) \
524 V(umlsl, Umlsl) \
525 V(umull2, Umull2) \
526 V(umull, Umull) \
527 V(uqadd, Uqadd) \
528 V(uqrshl, Uqrshl) \
529 V(uqshl, Uqshl) \
530 V(uqsub, Uqsub) \
531 V(urhadd, Urhadd) \
532 V(urshl, Urshl) \
533 V(ushl, Ushl) \
534 V(usubl2, Usubl2) \
535 V(usubl, Usubl) \
536 V(usubw2, Usubw2) \
537 V(usubw, Usubw) \
538 V(uzp1, Uzp1) \
539 V(uzp2, Uzp2) \
540 V(zip1, Zip1) \
541 V(zip2, Zip2)
542
543#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
544 void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
545 DCHECK(allow_macro_instructions()); \
546 ASM(vd, vn, vm); \
547 }
549#undef DEFINE_MACRO_ASM_FUNC
550
551 void Bcax(const VRegister& vd, const VRegister& vn, const VRegister& vm,
552 const VRegister& va) {
553 DCHECK(allow_macro_instructions());
554 bcax(vd, vn, vm, va);
555 }
556
557 void Eor3(const VRegister& vd, const VRegister& vn, const VRegister& vm,
558 const VRegister& va) {
559 DCHECK(allow_macro_instructions());
560 eor3(vd, vn, vm, va);
561 }
562
563 void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
564 DCHECK(allow_macro_instructions());
565 bic(vd, imm8, left_shift);
566 }
567
568 // This is required for compatibility in architecture independent code.
569 inline void jmp(Label* L);
570
571 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
572 inline void B(Label* label);
573 inline void B(Condition cond, Label* label);
574 void B(Label* label, Condition cond);
575
576 void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
577 void Tbz(const Register& rt, unsigned bit_pos, Label* label);
578
579 void Cbnz(const Register& rt, Label* label);
580 void Cbz(const Register& rt, Label* label);
581
582 void Pacibsp() {
583 DCHECK(allow_macro_instructions_);
584 pacibsp();
585 }
586 void Autibsp() {
587 DCHECK(allow_macro_instructions_);
588 autibsp();
589 }
590
591 // The 1716 pac and aut instructions encourage people to use x16 and x17
592 // directly, perhaps without realising that this is forbidden. For example:
593 //
594 // UseScratchRegisterScope temps(&masm);
595 // Register temp = temps.AcquireX(); // temp will be x16
596 // __ Mov(x17, ptr);
597 // __ Mov(x16, modifier); // Will override temp!
598 // __ Pacib1716();
599 //
600 // To work around this issue, you must exclude x16 and x17 from the scratch
601 // register list. You may need to replace them with other registers:
602 //
603 // UseScratchRegisterScope temps(&masm);
604 // temps.Exclude(x16, x17);
605 // temps.Include(x10, x11);
606 // __ Mov(x17, ptr);
607 // __ Mov(x16, modifier);
608 // __ Pacib1716();
609 void Pacib1716() {
610 DCHECK(allow_macro_instructions_);
611 DCHECK(!TmpList()->IncludesAliasOf(x16));
612 DCHECK(!TmpList()->IncludesAliasOf(x17));
613 pacib1716();
614 }
615 void Autib1716() {
616 DCHECK(allow_macro_instructions_);
617 DCHECK(!TmpList()->IncludesAliasOf(x16));
618 DCHECK(!TmpList()->IncludesAliasOf(x17));
619 autib1716();
620 }
621
622 inline void Dmb(BarrierDomain domain, BarrierType type);
623 inline void Dsb(BarrierDomain domain, BarrierType type);
624 inline void Isb();
625 inline void Csdb();
626
627 inline void SmiUntag(Register dst, Register src);
628 inline void SmiUntag(Register dst, const MemOperand& src);
629 inline void SmiUntag(Register smi);
630
631 inline void SmiTag(Register dst, Register src);
632 inline void SmiTag(Register smi);
633
634 inline void SmiToInt32(Register smi);
635 inline void SmiToInt32(Register dst, Register smi);
636
637 // Calls Abort(msg) if the condition cond is not satisfied.
638 // Use --debug_code to enable.
640
641 // Like Assert(), but without condition.
642 // Use --debug_code to enable.
644
645 void AssertSmi(Register object,
646 AbortReason reason = AbortReason::kOperandIsNotASmi)
648
649 // Abort execution if argument is a smi, enabled via --debug-code.
651 AbortReason reason = AbortReason::kOperandIsASmi)
653
654 // Abort execution if a 64 bit register containing a 32 bit payload does
655 // not have zeros in the top 32 bits, enabled via --debug-code.
657
658 void AssertJSAny(Register object, Register map_tmp, Register tmp,
660
661 // Like Assert(), but always enabled.
662 void Check(Condition cond, AbortReason reason);
663
664 // Same as Check() but expresses that the check is needed for the sandbox.
666
667 // Functions performing a check on a known or potential smi. Returns
668 // a condition that is satisfied if the check is successful.
670
671 inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
672
673 void Trap();
675
676 // Print a message to stderr and abort execution.
677 void Abort(AbortReason reason);
678
679 // Like printf, but print at run-time from generated code.
680 //
681 // The caller must ensure that arguments for floating-point placeholders
682 // (such as %e, %f or %g) are VRegisters, and that arguments for integer
683 // placeholders are Registers.
684 //
685 // Format placeholders that refer to more than one argument, or to a specific
686 // argument, are not supported. This includes formats like "%1$d" or "%.*d".
687 //
688 // This function automatically preserves caller-saved registers so that
689 // calling code can use Printf at any point without having to worry about
690 // corruption. The preservation mechanism generates a lot of code. If this is
691 // a problem, preserve the important registers manually and then call
692 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
693 // implicitly preserved.
694 void Printf(const char* format, CPURegister arg0 = NoCPUReg,
695 CPURegister arg1 = NoCPUReg, CPURegister arg2 = NoCPUReg,
696 CPURegister arg3 = NoCPUReg);
697
698 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
699 //
700 // The return code from the system printf call will be returned in x0.
701 void PrintfNoPreserve(const char* format, const CPURegister& arg0 = NoCPUReg,
702 const CPURegister& arg1 = NoCPUReg,
703 const CPURegister& arg2 = NoCPUReg,
704 const CPURegister& arg3 = NoCPUReg);
705
706 // Remaining instructions are simple pass-through calls to the assembler.
707 inline void Asr(const Register& rd, const Register& rn, unsigned shift);
708 inline void Asr(const Register& rd, const Register& rn, const Register& rm);
709
710 // Try to move an immediate into the destination register in a single
711 // instruction. Returns true for success, and updates the contents of dst.
712 // Returns false, otherwise.
713 bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
714
715 inline void Bind(Label* label,
716 BranchTargetIdentifier id = BranchTargetIdentifier::kNone);
717
718 // Control-flow integrity:
719
720 // Define a function entrypoint.
721 inline void CodeEntry();
722 // Define an exception handler.
723 inline void ExceptionHandler();
724 // Define an exception handler and bind a label.
726
727 // Control-flow integrity:
728
729 // Define a jump (BR) target.
730 inline void JumpTarget();
731 // Define a jump (BR) target and bind a label.
732 inline void BindJumpTarget(Label* label);
733 // Define a call (BLR) target. The target also allows tail calls (via BR)
734 // when the target is x16 or x17.
735 inline void CallTarget();
736 // Define a jump/call target and bind a label.
737 inline void BindCallTarget(Label* label);
738 // Define a jump/call target.
739 inline void JumpOrCallTarget();
740 // Define a jump/call target and bind a label.
741 inline void BindJumpOrCallTarget(Label* label);
742
743 static unsigned CountSetHalfWords(uint64_t imm, unsigned reg_size);
744
745 CPURegList* TmpList() { return &tmp_list_; }
746 CPURegList* FPTmpList() { return &fptmp_list_; }
747
750
751 // Move macros.
752 inline void Mvn(const Register& rd, uint64_t imm);
753 void Mvn(const Register& rd, const Operand& operand);
754 static bool IsImmMovn(uint64_t imm, unsigned reg_size);
755 static bool IsImmMovz(uint64_t imm, unsigned reg_size);
756
757 void LogicalMacro(const Register& rd, const Register& rn,
758 const Operand& operand, LogicalOp op);
759 void AddSubMacro(const Register& rd, const Register& rn,
760 const Operand& operand, FlagsUpdate S, AddSubOp op);
761 inline void Orr(const Register& rd, const Register& rn,
762 const Operand& operand);
763 void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
764 DCHECK(allow_macro_instructions());
765 orr(vd, imm8, left_shift);
766 }
767 inline void Orn(const Register& rd, const Register& rn,
768 const Operand& operand);
769 inline void Eor(const Register& rd, const Register& rn,
770 const Operand& operand);
771 inline void Eon(const Register& rd, const Register& rn,
772 const Operand& operand);
773 inline void And(const Register& rd, const Register& rn,
774 const Operand& operand);
775 inline void Ands(const Register& rd, const Register& rn,
776 const Operand& operand);
777 inline void Tst(const Register& rn, const Operand& operand);
778 inline void Bic(const Register& rd, const Register& rn,
779 const Operand& operand);
780 inline void Blr(const Register& xn);
781 inline void Cmp(const Register& rn, const Operand& operand);
782 inline void CmpTagged(const Register& rn, const Operand& operand);
783 inline void Subs(const Register& rd, const Register& rn,
784 const Operand& operand);
785 void Csel(const Register& rd, const Register& rn, const Operand& operand,
786 Condition cond);
787 inline void Fcsel(const VRegister& fd, const VRegister& fn,
788 const VRegister& fm, Condition cond);
789
790 // Checks if value is in range [lower_limit, higher_limit] using a single
791 // comparison. Condition `ls` indicates the value is in the range.
792 void CompareRange(Register value, Register scratch, unsigned lower_limit,
793 unsigned higher_limit);
794 void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit,
795 unsigned higher_limit, Label* on_in_range);
796
797 // Emits a runtime assert that the stack pointer is aligned.
799
800 // Copy slot_count stack slots from the stack offset specified by src to
801 // the stack offset specified by dst. The offsets and count are expressed in
802 // slot-sized units. Offset dst must be less than src, or the gap between
803 // them must be greater than or equal to slot_count, otherwise the result is
804 // unpredictable. The function may corrupt its register arguments. The
805 // registers must not alias each other.
806 void CopySlots(int dst, Register src, Register slot_count);
807 void CopySlots(Register dst, Register src, Register slot_count);
808
809 // Copy count double words from the address in register src to the address
810 // in register dst. There are three modes for this function:
811 // 1) Address dst must be less than src, or the gap between them must be
812 // greater than or equal to count double words, otherwise the result is
813 // unpredictable. This is the default mode.
814 // 2) Address src must be less than dst, or the gap between them must be
815 // greater than or equal to count double words, otherwise the result is
816 // undpredictable. In this mode, src and dst specify the last (highest)
817 // address of the regions to copy from and to.
818 // 3) The same as mode 1, but the words are copied in the reversed order.
819 // The case where src == dst is not supported.
820 // The function may corrupt its register arguments. The registers must not
821 // alias each other.
825 kDstLessThanSrcAndReverse
826 };
828 CopyDoubleWordsMode mode = kDstLessThanSrc);
829
830 // Calculate the address of a double word-sized slot at slot_offset from the
831 // stack pointer, and write it to dst. Positive slot_offsets are at addresses
832 // greater than sp, with slot zero at sp.
833 void SlotAddress(Register dst, int slot_offset);
834 void SlotAddress(Register dst, Register slot_offset);
835
836 // Load a literal from the inline constant pool.
837 inline void Ldr(const CPURegister& rt, const Operand& imm);
838
839 // Claim or drop stack space.
840 //
841 // On Windows, Claim will write a value every 4k, as is required by the stack
842 // expansion mechanism.
843 //
844 // The stack pointer must be aligned to 16 bytes and the size claimed or
845 // dropped must be a multiple of 16 bytes.
846 //
847 // Note that unit_size must be specified in bytes. For variants which take a
848 // Register count, the unit size must be a power of two.
849 inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
850 inline void Claim(const Register& count, uint64_t unit_size = kXRegSize,
851 bool assume_sp_aligned = true);
852 inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
853 inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
854
855 // Drop 'count' + 'extra_slots' arguments from the stack, rounded up to
856 // a multiple of two, without actually accessing memory.
857 // We assume the size of the arguments is the pointer size.
858 inline void DropArguments(const Register& count, int extra_slots = 0);
859 inline void DropArguments(int64_t count);
860
861 // Drop 'count' slots from stack, rounded up to a multiple of two, without
862 // actually accessing memory.
863 inline void DropSlots(int64_t count);
864
865 // Push a single argument, with padding, to the stack.
866 inline void PushArgument(const Register& arg);
867
868 // Add and sub macros.
869 inline void Add(const Register& rd, const Register& rn,
870 const Operand& operand);
871 inline void Adds(const Register& rd, const Register& rn,
872 const Operand& operand);
873 inline void Sub(const Register& rd, const Register& rn,
874 const Operand& operand);
875
876 // Abort execution if argument is not a positive or zero integer, enabled via
877 // --debug-code.
879
880#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
881 inline void FN(const REGTYPE REG, const MemOperand& addr);
883#undef DECLARE_FUNCTION
884
885 // Caution: if {value} is a 32-bit negative int, it should be sign-extended
886 // to 64-bit before calling this function.
887 void Switch(Register scratch, Register value, int case_value_base,
888 Label** labels, int num_labels);
889
890 // Push or pop up to 4 registers of the same width to or from the stack.
891 //
892 // If an argument register is 'NoReg', all further arguments are also assumed
893 // to be 'NoReg', and are thus not pushed or popped.
894 //
895 // Arguments are ordered such that "Push(a, b);" is functionally equivalent
896 // to "Push(a); Push(b);".
897 //
898 // It is valid to push the same register more than once, and there is no
899 // restriction on the order in which registers are specified.
900 //
901 // It is not valid to pop into the same register more than once in one
902 // operation, not even into the zero register.
903 //
904 // The stack pointer must be aligned to 16 bytes on entry and the total size
905 // of the specified registers must also be a multiple of 16 bytes.
906 //
907 // Other than the registers passed into Pop, the stack pointer, (possibly)
908 // the system stack pointer and (possibly) the link register, these methods
909 // do not modify any other registers.
910 //
911 // Some of the methods take an optional LoadLRMode or StoreLRMode template
912 // argument, which specifies whether we need to sign the link register at the
913 // start of the operation, or authenticate it at the end of the operation,
914 // when control flow integrity measures are enabled.
915 // When the mode is kDontLoadLR or kDontStoreLR, LR must not be passed as an
916 // argument to the operation.
917 enum LoadLRMode { kAuthLR, kDontLoadLR };
918 enum StoreLRMode { kSignLR, kDontStoreLR };
919 template <StoreLRMode lr_mode = kDontStoreLR>
920 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
921 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
922 void Push(const CPURegister& src0, const CPURegister& src1,
923 const CPURegister& src2, const CPURegister& src3,
924 const CPURegister& src4, const CPURegister& src5 = NoReg,
925 const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
926 template <LoadLRMode lr_mode = kDontLoadLR>
927 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
928 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
929 void Pop(const CPURegister& dst0, const CPURegister& dst1,
930 const CPURegister& dst2, const CPURegister& dst3,
931 const CPURegister& dst4, const CPURegister& dst5 = NoReg,
932 const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
933 template <StoreLRMode lr_mode = kDontStoreLR>
934 void Push(const Register& src0, const VRegister& src1);
935
938
940 SaveFPRegsMode fp_mode);
941
942 void CallIndirectPointerBarrier(Register object, Operand offset,
943 SaveFPRegsMode fp_mode,
945
947 Register object, Operand offset, SaveFPRegsMode fp_mode,
948 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
950 Register object, Register slot_address, SaveFPRegsMode fp_mode,
951 StubCallMode mode = StubCallMode::kCallBuiltinPointer);
952
953 // For a given |object| and |offset|:
954 // - Move |object| to |dst_object|.
955 // - Compute the address of the slot pointed to by |offset| in |object| and
956 // write it to |dst_slot|.
957 // This method makes sure |object| and |offset| are allowed to overlap with
958 // the destination registers.
959 void MoveObjectAndSlot(Register dst_object, Register dst_slot,
960 Register object, Operand offset);
961
962 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
963 // specifies the registers that are to be pushed or popped. Higher-numbered
964 // registers are associated with higher memory addresses (as in the A32 push
965 // and pop instructions).
966 //
967 // (Push|Pop)SizeRegList allow you to specify the register size as a
968 // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
969 // kSRegSizeInBits are supported.
970 //
971 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
974
975 // Calculate how much stack space (in bytes) are required to store caller
976 // registers excluding those specified in the arguments.
978 Register exclusion) const;
979
980 // Push caller saved registers on the stack, and return the number of bytes
981 // stack pointer is adjusted.
982 int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
983
984 // Restore caller saved registers from the stack, and return the number of
985 // bytes stack pointer is adjusted.
986 int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
987
988 // Move an immediate into register dst, and return an Operand object for use
989 // with a subsequent instruction that accepts a shift. The value moved into
990 // dst is not necessarily equal to imm; it may have had a shifting operation
991 // applied to it that will be subsequently undone by the shift applied in the
992 // Operand.
994 PreShiftImmMode mode);
995
996 void CheckPageFlag(const Register& object, int mask, Condition cc,
997 Label* condition_met);
998
999 void CheckPageFlag(const Register& object, Register scratch, int mask,
1000 Condition cc, Label* condition_met) {
1001 CheckPageFlag(object, mask, cc, condition_met);
1002 }
1003
1004 // Compare a register with an operand, and branch to label depending on the
1005 // condition. May corrupt the status flags.
1006 inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
1007 Condition cond, Label* label);
1008 inline void CompareTaggedAndBranch(const Register& lhs, const Operand& rhs,
1009 Condition cond, Label* label);
1010
1011 // Test the bits of register defined by bit_pattern, and branch if ANY of
1012 // those bits are set. May corrupt the status flags.
1013 inline void TestAndBranchIfAnySet(const Register& reg,
1014 const uint64_t bit_pattern, Label* label);
1015
1016 // Test the bits of register defined by bit_pattern, and branch if ALL of
1017 // those bits are clear (ie. not set.) May corrupt the status flags.
1018 inline void TestAndBranchIfAllClear(const Register& reg,
1019 const uint64_t bit_pattern, Label* label);
1020
1021 inline void Brk(int code);
1022
1023 inline void JumpIfSmi(Register value, Label* smi_label,
1024 Label* not_smi_label = nullptr);
1025
1026 inline void JumpIf(Condition cond, Register x, int32_t y, Label* dest);
1027 inline void JumpIfEqual(Register x, int32_t y, Label* dest);
1028 inline void JumpIfLessThan(Register x, int32_t y, Label* dest);
1029 inline void JumpIfUnsignedLessThan(Register x, int32_t y, Label* dest);
1030
1031 void JumpIfMarking(Label* is_marking,
1032 Label::Distance condition_met_distance = Label::kFar);
1033 void JumpIfNotMarking(Label* not_marking,
1034 Label::Distance condition_met_distance = Label::kFar);
1035
1036 void LoadMap(Register dst, Register object);
1037 void LoadCompressedMap(Register dst, Register object);
1038
1039 void LoadFeedbackVector(Register dst, Register closure, Register scratch,
1040 Label* fbv_undef);
1041
1042 inline void Fmov(VRegister fd, VRegister fn);
1043 inline void Fmov(VRegister fd, Register rn);
1044 // Provide explicit double and float interfaces for FP immediate moves, rather
1045 // than relying on implicit C++ casts. This allows signalling NaNs to be
1046 // preserved when the immediate matches the format of fd. Most systems convert
1047 // signalling NaNs to quiet NaNs when converting between float and double.
1048 inline void Fmov(VRegister fd, double imm);
1049 inline void Fmov(VRegister fd, float imm);
1050 // Provide a template to allow other types to be converted automatically.
1051 template <typename T>
1052 void Fmov(VRegister fd, T imm) {
1053 DCHECK(allow_macro_instructions());
1054 Fmov(fd, static_cast<double>(imm));
1055 }
1056 inline void Fmov(Register rd, VRegister fn);
1057
1058 void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL,
1059 int shift_amount = 0);
1060 void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
1061
1062 void LoadFromConstantsTable(Register destination, int constant_index) final;
1065 void StoreRootRelative(int32_t offset, Register value) final;
1066
1067 // Operand pointing to an external reference.
1068 // May emit code to set up the scratch register. The operand is
1069 // only guaranteed to be correct as long as the scratch register
1070 // isn't changed.
1071 // If the operand is used more than once, use a scratch register
1072 // that is guaranteed not to be clobbered.
1074 Register scratch);
1076 return ExternalReferenceAsOperand(ExternalReference::Create(id), no_reg);
1077 }
1078
1079 void Jump(Register target, Condition cond = al);
1080 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
1081 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
1082 void Jump(const ExternalReference& reference);
1083
1084 void Call(Register target);
1085 void Call(Address target, RelocInfo::Mode rmode);
1086 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
1088
1089 // Generate an indirect call (for when a direct call's range is not adequate).
1090 void IndirectCall(Address target, RelocInfo::Mode rmode);
1091
1092 // Load the builtin given by the Smi in |builtin| into |target|.
1096 void CallBuiltinByIndex(Register builtin, Register target);
1097 void CallBuiltin(Builtin builtin);
1098 void TailCallBuiltin(Builtin builtin, Condition cond = al);
1099
1100 // Load code entry point from the Code object.
1102 CodeEntrypointTag tag);
1103 void CallCodeObject(Register code_object, CodeEntrypointTag tag);
1104 void JumpCodeObject(Register code_object, CodeEntrypointTag tag,
1105 JumpMode jump_mode = JumpMode::kJump);
1106
1107 // Convenience functions to call/jmp to the code of a JSFunction object.
1108 // TODO(42204201): These don't work properly with leaptiering as we need to
1109 // validate the parameter count at runtime. Instead, we should replace them
1110 // with CallJSDispatchEntry that generates a call to a given (compile-time
1111 // constant) JSDispatchHandle.
1112 void CallJSFunction(Register function_object, uint16_t argument_count);
1113 void JumpJSFunction(Register function_object,
1114 JumpMode jump_mode = JumpMode::kJump);
1115#ifdef V8_ENABLE_LEAPTIERING
1116 void CallJSDispatchEntry(JSDispatchHandle dispatch_handle,
1117 uint16_t argument_count);
1118#endif
1119#ifdef V8_ENABLE_WEBASSEMBLY
1120 void ResolveWasmCodePointer(Register target, uint64_t signature_hash);
1121 void CallWasmCodePointer(Register target, uint64_t signature_hash,
1122 CallJumpMode call_jump_mode = CallJumpMode::kCall);
1123 void CallWasmCodePointerNoSignatureCheck(Register target);
1124 void LoadWasmCodePointer(Register dst, MemOperand src);
1125#endif
1126
1127 // Generates an instruction sequence s.t. the return address points to the
1128 // instruction following the call.
1129 // The return address on the stack is used by frame iteration.
1131
1132 // TODO(olivf, 42204201) Rename this to AssertNotDeoptimized once
1133 // non-leaptiering is removed from the codebase.
1135 void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
1137 Label* jump_deoptimization_entry_label);
1138
1139 // Calls a C function and cleans up the space for arguments allocated
1140 // by PrepareCallCFunction. The called function is not allowed to trigger a
1141 // garbage collection, since that might move the code and invalidate the
1142 // return address (unless this is somehow accounted for by the called
1143 // function).
1145 ExternalReference function, int num_reg_arguments,
1146 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
1147 Label* return_location = nullptr);
1149 ExternalReference function, int num_reg_arguments,
1150 int num_double_arguments,
1151 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
1152 Label* return_location = nullptr);
1154 Register function, int num_reg_arguments, int num_double_arguments,
1155 SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes,
1156 Label* return_location = nullptr);
1157
1158 // Performs a truncating conversion of a floating point number as used by
1159 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1160 // Exits with 'result' holding the answer.
1162 DoubleRegister double_input, StubCallMode stub_mode,
1163 LinkRegisterStatus lr_status);
1164
1165 inline void Mul(const Register& rd, const Register& rn, const Register& rm);
1166
1167 inline void Fcvtzs(const Register& rd, const VRegister& fn);
1168 void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1169 DCHECK(allow_macro_instructions());
1170 fcvtzs(vd, vn, fbits);
1171 }
1172
1173 void Fjcvtzs(const Register& rd, const VRegister& vn) {
1174 DCHECK(allow_macro_instructions());
1175 DCHECK(!rd.IsZero());
1176 fjcvtzs(rd, vn);
1177 }
1178
1179 inline void Fcvtzu(const Register& rd, const VRegister& fn);
1180 void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1181 DCHECK(allow_macro_instructions());
1182 fcvtzu(vd, vn, fbits);
1183 }
1184
1185 inline void Madd(const Register& rd, const Register& rn, const Register& rm,
1186 const Register& ra);
1187 inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
1188 inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
1189 inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
1190 inline void Msub(const Register& rd, const Register& rn, const Register& rm,
1191 const Register& ra);
1192
1193 inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
1194 inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
1195 inline void Umull(const Register& rd, const Register& rn, const Register& rm);
1196 inline void Umulh(const Register& rd, const Register& rn, const Register& rm);
1197 inline void Smull(const Register& rd, const Register& rn, const Register& rm);
1198 inline void Smulh(const Register& rd, const Register& rn, const Register& rm);
1199
1200 inline void Sxtb(const Register& rd, const Register& rn);
1201 inline void Sxth(const Register& rd, const Register& rn);
1202 inline void Sxtw(const Register& rd, const Register& rn);
1203 inline void Ubfiz(const Register& rd, const Register& rn, unsigned lsb,
1204 unsigned width);
1205 inline void Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
1206 unsigned width);
1207 inline void Ubfx(const Register& rd, const Register& rn, unsigned lsb,
1208 unsigned width);
1209 inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
1210 inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
1211 inline void Ror(const Register& rd, const Register& rs, unsigned shift);
1212 inline void Ror(const Register& rd, const Register& rn, const Register& rm);
1213 inline void Cmn(const Register& rn, const Operand& operand);
1214 inline void Fadd(const VRegister& fd, const VRegister& fn,
1215 const VRegister& fm);
1216 inline void Fcmp(const VRegister& fn, const VRegister& fm);
1217 inline void Fcmp(const VRegister& fn, double value);
1218 inline void Fabs(const VRegister& fd, const VRegister& fn);
1219 inline void Fmul(const VRegister& fd, const VRegister& fn,
1220 const VRegister& fm);
1221 inline void Fsub(const VRegister& fd, const VRegister& fn,
1222 const VRegister& fm);
1223 inline void Fdiv(const VRegister& fd, const VRegister& fn,
1224 const VRegister& fm);
1225 inline void Fmax(const VRegister& fd, const VRegister& fn,
1226 const VRegister& fm);
1227 inline void Fmin(const VRegister& fd, const VRegister& fn,
1228 const VRegister& fm);
1229 inline void Rbit(const Register& rd, const Register& rn);
1230 inline void Rev(const Register& rd, const Register& rn);
1231
1232 enum AdrHint {
1233 // The target must be within the immediate range of adr.
1235 // The target may be outside of the immediate range of adr. Additional
1236 // instructions may be emitted.
1237 kAdrFar
1239 void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
1240
1241 // Add/sub with carry macros.
1242 inline void Adc(const Register& rd, const Register& rn,
1243 const Operand& operand);
1244
1245 // Conditional macros.
1246 inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
1247 Condition cond);
1248 inline void CcmpTagged(const Register& rn, const Operand& operand,
1249 StatusFlags nzcv, Condition cond);
1250 inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
1251 Condition cond);
1252
1253 inline void Clz(const Register& rd, const Register& rn);
1254
1255 // Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
1256 // be 16 byte aligned.
1257 // When the optional template argument is kSignLR and control flow integrity
1258 // measures are enabled, we sign the link register before poking it onto the
1259 // stack. 'src' must be lr in this case.
1260 template <StoreLRMode lr_mode = kDontStoreLR>
1261 void Poke(const CPURegister& src, const Operand& offset);
1262
1263 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
1264 // The stack pointer must be aligned to 16 bytes.
1265 // When the optional template argument is kAuthLR and control flow integrity
1266 // measures are enabled, we authenticate the link register after peeking the
1267 // value. 'dst' must be lr in this case.
1268 template <LoadLRMode lr_mode = kDontLoadLR>
1269 void Peek(const CPURegister& dst, const Operand& offset);
1270
1271 // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
1272 // with 'src2' at a higher address than 'src1'. The offset is in bytes. The
1273 // stack pointer must be 16 byte aligned.
1274 void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
1275
1276 inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
1277 unsigned width);
1278
1279 inline void Bfi(const Register& rd, const Register& rn, unsigned lsb,
1280 unsigned width);
1281
1282 inline void Scvtf(const VRegister& fd, const Register& rn,
1283 unsigned fbits = 0);
1284 void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1285 DCHECK(allow_macro_instructions());
1286 scvtf(vd, vn, fbits);
1287 }
1288 inline void Ucvtf(const VRegister& fd, const Register& rn,
1289 unsigned fbits = 0);
1290 void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1291 DCHECK(allow_macro_instructions());
1292 ucvtf(vd, vn, fbits);
1293 }
1294
1296 void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
1298
1299 inline void CmovX(const Register& rd, const Register& rn, Condition cond);
1300 inline void Cset(const Register& rd, Condition cond);
1301 inline void Csetm(const Register& rd, Condition cond);
1302 inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
1303 Condition cond);
1304 inline void Fccmp(const VRegister& fn, const double value, StatusFlags nzcv,
1305 Condition cond);
1306 inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
1307 Condition cond);
1308
1309 inline void Fcvt(const VRegister& fd, const VRegister& fn);
1310
1312
1313 void Ins(const VRegister& vd, int vd_index, const VRegister& vn,
1314 int vn_index) {
1315 DCHECK(allow_macro_instructions());
1316 ins(vd, vd_index, vn, vn_index);
1317 }
1318 void Ins(const VRegister& vd, int vd_index, const Register& rn) {
1319 DCHECK(allow_macro_instructions());
1320 ins(vd, vd_index, rn);
1321 }
1322
1323 inline void Bl(Label* label);
1324 inline void Br(const Register& xn);
1325
1326 inline void Uxtb(const Register& rd, const Register& rn);
1327 inline void Uxth(const Register& rd, const Register& rn);
1328 inline void Uxtw(const Register& rd, const Register& rn);
1329
1330 void Dup(const VRegister& vd, const VRegister& vn, int index) {
1331 DCHECK(allow_macro_instructions());
1332 dup(vd, vn, index);
1333 }
1334 void Dup(const VRegister& vd, const Register& rn) {
1335 DCHECK(allow_macro_instructions());
1336 dup(vd, rn);
1337 }
1338
1339#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
1340 inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
1342#undef DECLARE_FUNCTION
1343
1344 void St1(const VRegister& vt, const MemOperand& dst) {
1345 DCHECK(allow_macro_instructions());
1346 st1(vt, dst);
1347 }
1348 void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1349 DCHECK(allow_macro_instructions());
1350 st1(vt, vt2, dst);
1351 }
1352 void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1353 const MemOperand& dst) {
1354 DCHECK(allow_macro_instructions());
1355 st1(vt, vt2, vt3, dst);
1356 }
1357 void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1358 const VRegister& vt4, const MemOperand& dst) {
1359 DCHECK(allow_macro_instructions());
1360 st1(vt, vt2, vt3, vt4, dst);
1361 }
1362 void St1(const VRegister& vt, int lane, const MemOperand& dst) {
1363 DCHECK(allow_macro_instructions());
1364 st1(vt, lane, dst);
1365 }
1366
1367#define NEON_2VREG_SHIFT_MACRO_LIST(V) \
1368 V(rshrn, Rshrn) \
1369 V(rshrn2, Rshrn2) \
1370 V(shl, Shl) \
1371 V(shll, Shll) \
1372 V(shll2, Shll2) \
1373 V(shrn, Shrn) \
1374 V(shrn2, Shrn2) \
1375 V(sli, Sli) \
1376 V(sqrshrn, Sqrshrn) \
1377 V(sqrshrn2, Sqrshrn2) \
1378 V(sqrshrun, Sqrshrun) \
1379 V(sqrshrun2, Sqrshrun2) \
1380 V(sqshl, Sqshl) \
1381 V(sqshlu, Sqshlu) \
1382 V(sqshrn, Sqshrn) \
1383 V(sqshrn2, Sqshrn2) \
1384 V(sqshrun, Sqshrun) \
1385 V(sqshrun2, Sqshrun2) \
1386 V(sri, Sri) \
1387 V(srshr, Srshr) \
1388 V(srsra, Srsra) \
1389 V(sshll, Sshll) \
1390 V(sshll2, Sshll2) \
1391 V(sshr, Sshr) \
1392 V(ssra, Ssra) \
1393 V(uqrshrn, Uqrshrn) \
1394 V(uqrshrn2, Uqrshrn2) \
1395 V(uqshl, Uqshl) \
1396 V(uqshrn, Uqshrn) \
1397 V(uqshrn2, Uqshrn2) \
1398 V(urshr, Urshr) \
1399 V(ursra, Ursra) \
1400 V(ushll, Ushll) \
1401 V(ushll2, Ushll2) \
1402 V(ushr, Ushr) \
1403 V(usra, Usra)
1404
1405#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
1406 void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
1407 DCHECK(allow_macro_instructions()); \
1408 ASM(vd, vn, shift); \
1409 }
1411#undef DEFINE_MACRO_ASM_FUNC
1412
1413 void Umov(const Register& rd, const VRegister& vn, int vn_index) {
1414 DCHECK(allow_macro_instructions());
1415 umov(rd, vn, vn_index);
1416 }
1417 void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1418 DCHECK(allow_macro_instructions());
1419 tbl(vd, vn, vm);
1420 }
1421 void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1422 const VRegister& vm) {
1423 DCHECK(allow_macro_instructions());
1424 tbl(vd, vn, vn2, vm);
1425 }
1426 void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1427 const VRegister& vn3, const VRegister& vm) {
1428 DCHECK(allow_macro_instructions());
1429 tbl(vd, vn, vn2, vn3, vm);
1430 }
1431 void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1432 const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1433 DCHECK(allow_macro_instructions());
1434 tbl(vd, vn, vn2, vn3, vn4, vm);
1435 }
1436 void Ext(const VRegister& vd, const VRegister& vn, const VRegister& vm,
1437 int index) {
1438 DCHECK(allow_macro_instructions());
1439 ext(vd, vn, vm, index);
1440 }
1441
1442 void Smov(const Register& rd, const VRegister& vn, int vn_index) {
1443 DCHECK(allow_macro_instructions());
1444 smov(rd, vn, vn_index);
1445 }
1446
1447// Load-acquire/store-release macros.
1448#define DECLARE_FUNCTION(FN, OP) \
1449 inline void FN(const Register& rt, const Register& rn);
1451#undef DECLARE_FUNCTION
1452
1453#define DECLARE_FUNCTION(FN, OP) \
1454 inline void FN(const Register& rs, const Register& rt, const MemOperand& src);
1456#undef DECLARE_FUNCTION
1457
1458#define DECLARE_FUNCTION(FN, OP) \
1459 inline void FN(const Register& rs, const Register& rs2, const Register& rt, \
1460 const Register& rt2, const MemOperand& src);
1462#undef DECLARE_FUNCTION
1463
1464#define DECLARE_LOAD_FUNCTION(FN, OP) \
1465 inline void FN(const Register& rs, const Register& rt, const MemOperand& src);
1466#define DECLARE_STORE_FUNCTION(FN, OP) \
1467 inline void FN(const Register& rs, const MemOperand& src);
1468
1470 DECLARE_LOAD_FUNCTION, Ld, ld)
1473
1474#define DECLARE_SWP_FUNCTION(FN, OP) \
1475 inline void FN(const Register& rs, const Register& rt, const MemOperand& src);
1476
1478
1479#undef DECLARE_LOAD_FUNCTION
1480#undef DECLARE_STORE_FUNCTION
1481#undef DECLARE_SWP_FUNCTION
1482
1483 // Load an object from the root table.
1486 void PushRoot(RootIndex index);
1487
1488 inline void Ret(const Register& xn = lr);
1489
1490 // Perform a conversion from a double to a signed int64. If the input fits in
1491 // range of the 64-bit result, execution branches to done. Otherwise,
1492 // execution falls through, and the sign of the result can be used to
1493 // determine if overflow was towards positive or negative infinity.
1494 //
1495 // On successful conversion, the least significant 32 bits of the result are
1496 // equivalent to the ECMA-262 operation "ToInt32".
1498 Label* done);
1499
1500 inline void Mrs(const Register& rt, SystemRegister sysreg);
1501 inline void Msr(SystemRegister sysreg, const Register& rt);
1502
1503 // Prologue claims an extra slot due to arm64's alignement constraints.
1504 static constexpr int kExtraSlotClaimedByPrologue = 1;
1505 // Generates function prologue code.
1506 void Prologue();
1507
1508 void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
1509 DCHECK(allow_macro_instructions());
1510 cmgt(vd, vn, imm);
1511 }
1512 void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
1513 DCHECK(allow_macro_instructions());
1514 cmge(vd, vn, imm);
1515 }
1516 void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
1517 DCHECK(allow_macro_instructions());
1518 cmeq(vd, vn, imm);
1519 }
1520 void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
1521 DCHECK(allow_macro_instructions());
1522 cmlt(vd, vn, imm);
1523 }
1524 void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
1525 DCHECK(allow_macro_instructions());
1526 cmle(vd, vn, imm);
1527 }
1528
1529 inline void Neg(const Register& rd, const Operand& operand);
1530 inline void Negs(const Register& rd, const Operand& operand);
1531
1532 // Compute rd = abs(rm).
1533 // This function clobbers the condition flags. On output the overflow flag is
1534 // set iff the negation overflowed.
1535 //
1536 // If rm is the minimum representable value, the result is not representable.
1537 // Handlers for each case can be specified using the relevant labels.
1538 void Abs(const Register& rd, const Register& rm,
1539 Label* is_not_representable = nullptr,
1540 Label* is_representable = nullptr);
1541
1542 inline void Cls(const Register& rd, const Register& rn);
1543 inline void Cneg(const Register& rd, const Register& rn, Condition cond);
1544 inline void Rev16(const Register& rd, const Register& rn);
1545 inline void Rev32(const Register& rd, const Register& rn);
1546 inline void Fcvtns(const Register& rd, const VRegister& fn);
1547 inline void Fcvtnu(const Register& rd, const VRegister& fn);
1548 inline void Fcvtms(const Register& rd, const VRegister& fn);
1549 inline void Fcvtmu(const Register& rd, const VRegister& fn);
1550 inline void Fcvtas(const Register& rd, const VRegister& fn);
1551 inline void Fcvtau(const Register& rd, const VRegister& fn);
1552
1553 // Compute the start of the generated instruction stream from the current PC.
1554 // This is an alternative to embedding the {CodeObject} handle as a reference.
1556
1557 // ---------------------------------------------------------------------------
1558 // Pointer compression Support
1559
1560 // Loads a field containing any tagged value and decompresses it if necessary.
1562 const MemOperand& field_operand);
1563
1564 // Loads a field containing any tagged value but never decompresses it.
1566 const MemOperand& field_operand);
1567
1568 // Loads a field containing a tagged signed value and decompresses it if
1569 // necessary.
1571 const MemOperand& field_operand);
1572
1573 // Loads a field containing smi value and untags it.
1574 void SmiUntagField(Register dst, const MemOperand& src);
1575
1576 // Compresses and stores tagged value to given on-heap location.
1577 void StoreTaggedField(const Register& value,
1578 const MemOperand& dst_field_operand);
1580 const MemOperand& dst_field_operand);
1581
1582 // For compatibility with platform-independent code.
1583 void StoreTaggedField(const MemOperand& dst_field_operand,
1584 const Register& value) {
1585 StoreTaggedField(value, dst_field_operand);
1586 }
1587
1588 void AtomicStoreTaggedField(const Register& value, const Register& dst_base,
1589 const Register& dst_index, const Register& temp);
1590
1592 const MemOperand& field_operand);
1594 const MemOperand& field_operand);
1595 void DecompressTagged(const Register& destination, const Register& source);
1598 const MemOperand& field_operand);
1599
1601 const Register& base, const Register& index,
1602 const Register& temp);
1604 const Register& index, const Register& temp);
1605
1606 // Restore FP and LR from the values stored in the current frame. This will
1607 // authenticate the LR when pointer authentication is enabled.
1609
1610#if V8_ENABLE_WEBASSEMBLY
1611 void StoreReturnAddressInWasmExitFrame(Label* return_location);
1612#endif // V8_ENABLE_WEBASSEMBLY
1613
1614 // Wasm helpers. These instructions don't have direct lowering
1615 // to native instructions. These helpers allow us to define the optimal code
1616 // sequence, and be used in both TurboFan and Liftoff.
1618 void I8x16BitMask(Register dst, VRegister src, VRegister temp = NoVReg);
1623
1624 // ---------------------------------------------------------------------------
1625 // V8 Sandbox support
1626
1627 // Transform a SandboxedPointer from/to its encoded form, which is used when
1628 // the pointer is stored on the heap and ensures that the pointer will always
1629 // point into the sandbox.
1632 MemOperand field_operand);
1633 void StoreSandboxedPointerField(Register value, MemOperand dst_field_operand);
1634
1635 // Loads a field containing an off-heap ("external") pointer and does
1636 // necessary decoding if the sandbox is enabled.
1638 ExternalPointerTagRange tag_range,
1639 Register isolate_root = Register::no_reg());
1640
1641 // Load a trusted pointer field.
1642 // When the sandbox is enabled, these are indirect pointers using the trusted
1643 // pointer table. Otherwise they are regular tagged fields.
1644 void LoadTrustedPointerField(Register destination, MemOperand field_operand,
1645 IndirectPointerTag tag);
1646 // Store a trusted pointer field.
1647 void StoreTrustedPointerField(Register value, MemOperand dst_field_operand);
1648
1649 // Load a code pointer field.
1650 // These are special versions of trusted pointers that, when the sandbox is
1651 // enabled, reference code objects through the code pointer table.
1653 LoadTrustedPointerField(destination, field_operand,
1654 kCodeIndirectPointerTag);
1655 }
1656 // Store a code pointer field.
1657 void StoreCodePointerField(Register value, MemOperand dst_field_operand) {
1658 StoreTrustedPointerField(value, dst_field_operand);
1659 }
1660
1661 // Load an indirect pointer field.
1662 // Only available when the sandbox is enabled, but always visible to avoid
1663 // having to place the #ifdefs into the caller.
1665 IndirectPointerTag tag);
1666
1667 // Store an indirect pointer field.
1668 // Only available when the sandbox is enabled, but always visible to avoid
1669 // having to place the #ifdefs into the caller.
1670 void StoreIndirectPointerField(Register value, MemOperand dst_field_operand);
1671
1672#ifdef V8_ENABLE_SANDBOX
1673 // Retrieve the heap object referenced by the given indirect pointer handle,
1674 // which can either be a trusted pointer handle or a code pointer handle.
1675 void ResolveIndirectPointerHandle(Register destination, Register handle,
1676 IndirectPointerTag tag);
1677
1678 // Retrieve the heap object referenced by the given trusted pointer handle.
1679 void ResolveTrustedPointerHandle(Register destination, Register handle,
1680 IndirectPointerTag tag);
1681
1682 // Retrieve the Code object referenced by the given code pointer handle.
1683 void ResolveCodePointerHandle(Register destination, Register handle);
1684
1685 // Load the pointer to a Code's entrypoint via a code pointer.
1686 // Only available when the sandbox is enabled as it requires the code pointer
1687 // table.
1688 void LoadCodeEntrypointViaCodePointer(Register destination,
1689 MemOperand field_operand,
1690 CodeEntrypointTag tag);
1691
1692 // Load the value of Code pointer table corresponding to
1693 // IsolateGroup::current()->code_pointer_table_.
1694 // Only available when the sandbox is enabled.
1695 void LoadCodePointerTableBase(Register destination);
1696#endif
1697
1698#ifdef V8_ENABLE_LEAPTIERING
1699 void LoadEntrypointFromJSDispatchTable(Register destination,
1700 Register dispatch_handle,
1701 Register scratch);
1702 void LoadEntrypointFromJSDispatchTable(Register destination,
1703 JSDispatchHandle dispatch_handle,
1704 Register scratch);
1705 void LoadParameterCountFromJSDispatchTable(Register destination,
1706 Register dispatch_handle,
1707 Register scratch);
1708 void LoadEntrypointAndParameterCountFromJSDispatchTable(
1709 Register entrypoint, Register parameter_count, Register dispatch_handle,
1710 Register scratch);
1711#endif // V8_ENABLE_LEAPTIERING
1712
1713 // Load a protected pointer field.
1714 void LoadProtectedPointerField(Register destination,
1715 MemOperand field_operand);
1716
1717 // Instruction set functions ------------------------------------------------
1718 // Logical macros.
1719 inline void Bics(const Register& rd, const Register& rn,
1720 const Operand& operand);
1721
1722 inline void Adcs(const Register& rd, const Register& rn,
1723 const Operand& operand);
1724 inline void Sbc(const Register& rd, const Register& rn,
1725 const Operand& operand);
1726 inline void Sbcs(const Register& rd, const Register& rn,
1727 const Operand& operand);
1728 inline void Ngc(const Register& rd, const Operand& operand);
1729 inline void Ngcs(const Register& rd, const Operand& operand);
1730
1731#define DECLARE_FUNCTION(FN, OP) \
1732 inline void FN(const Register& rs, const Register& rt, const Register& rn);
1734#undef DECLARE_FUNCTION
1735
1736 // Branch type inversion relies on these relations.
1737 static_assert((reg_zero == (reg_not_zero ^ 1)) &&
1738 (reg_bit_clear == (reg_bit_set ^ 1)) &&
1739 (always == (never ^ 1)));
1740
1741 inline void Bfxil(const Register& rd, const Register& rn, unsigned lsb,
1742 unsigned width);
1743 inline void Cinc(const Register& rd, const Register& rn, Condition cond);
1744 inline void Cinv(const Register& rd, const Register& rn, Condition cond);
1745 inline void CzeroX(const Register& rd, Condition cond);
1746 inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
1747 Condition cond);
1748 inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
1749 Condition cond);
1750 inline void Extr(const Register& rd, const Register& rn, const Register& rm,
1751 unsigned lsb);
1752 void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1753 DCHECK(allow_macro_instructions());
1754 fcvtl2(vd, vn);
1755 }
1756 void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1757 DCHECK(allow_macro_instructions());
1758 fcvtn2(vd, vn);
1759 }
1760 void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1761 DCHECK(allow_macro_instructions());
1762 fcvtxn(vd, vn);
1763 }
1764 void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1765 DCHECK(allow_macro_instructions());
1766 fcvtxn2(vd, vn);
1767 }
1768 inline void Fmadd(const VRegister& fd, const VRegister& fn,
1769 const VRegister& fm, const VRegister& fa);
1770 inline void Fmaxnm(const VRegister& fd, const VRegister& fn,
1771 const VRegister& fm);
1772 inline void Fminnm(const VRegister& fd, const VRegister& fn,
1773 const VRegister& fm);
1774 inline void Fmsub(const VRegister& fd, const VRegister& fn,
1775 const VRegister& fm, const VRegister& fa);
1776 inline void Fnmadd(const VRegister& fd, const VRegister& fn,
1777 const VRegister& fm, const VRegister& fa);
1778 inline void Fnmsub(const VRegister& fd, const VRegister& fn,
1779 const VRegister& fm, const VRegister& fa);
1780 inline void Hint(SystemHint code);
1781 inline void Hlt(int code);
1782 inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
1783 const MemOperand& src);
1784 inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
1785 inline void Nop() { nop(); }
1786 void Mvni(const VRegister& vd, const int imm8, Shift shift = LSL,
1787 const int shift_amount = 0) {
1788 DCHECK(allow_macro_instructions());
1789 mvni(vd, imm8, shift, shift_amount);
1790 }
1791 inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
1792 const Register& ra);
1793 inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
1794 const Register& ra);
1795 inline void Stnp(const CPURegister& rt, const CPURegister& rt2,
1796 const MemOperand& dst);
1797 inline void Umaddl(const Register& rd, const Register& rn, const Register& rm,
1798 const Register& ra);
1799 inline void Umsubl(const Register& rd, const Register& rn, const Register& rm,
1800 const Register& ra);
1801
1802 void Ld1(const VRegister& vt, const MemOperand& src) {
1803 DCHECK(allow_macro_instructions());
1804 ld1(vt, src);
1805 }
1806 void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1807 DCHECK(allow_macro_instructions());
1808 ld1(vt, vt2, src);
1809 }
1810 void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1811 const MemOperand& src) {
1812 DCHECK(allow_macro_instructions());
1813 ld1(vt, vt2, vt3, src);
1814 }
1815 void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1816 const VRegister& vt4, const MemOperand& src) {
1817 DCHECK(allow_macro_instructions());
1818 ld1(vt, vt2, vt3, vt4, src);
1819 }
1820 void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
1821 DCHECK(allow_macro_instructions());
1822 ld1(vt, lane, src);
1823 }
1824 void Ld1r(const VRegister& vt, const MemOperand& src) {
1825 DCHECK(allow_macro_instructions());
1826 ld1r(vt, src);
1827 }
1828 void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1829 DCHECK(allow_macro_instructions());
1830 ld2(vt, vt2, src);
1831 }
1832 void Ld2(const VRegister& vt, const VRegister& vt2, int lane,
1833 const MemOperand& src) {
1834 DCHECK(allow_macro_instructions());
1835 ld2(vt, vt2, lane, src);
1836 }
1837 void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1838 DCHECK(allow_macro_instructions());
1839 ld2r(vt, vt2, src);
1840 }
1841 void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1842 const MemOperand& src) {
1843 DCHECK(allow_macro_instructions());
1844 ld3(vt, vt2, vt3, src);
1845 }
1846 void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1847 int lane, const MemOperand& src) {
1848 DCHECK(allow_macro_instructions());
1849 ld3(vt, vt2, vt3, lane, src);
1850 }
1851 void Ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1852 const MemOperand& src) {
1853 DCHECK(allow_macro_instructions());
1854 ld3r(vt, vt2, vt3, src);
1855 }
1856 void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1857 const VRegister& vt4, const MemOperand& src) {
1858 DCHECK(allow_macro_instructions());
1859 ld4(vt, vt2, vt3, vt4, src);
1860 }
1861 void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1862 const VRegister& vt4, int lane, const MemOperand& src) {
1863 DCHECK(allow_macro_instructions());
1864 ld4(vt, vt2, vt3, vt4, lane, src);
1865 }
1866 void Ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1867 const VRegister& vt4, const MemOperand& src) {
1868 DCHECK(allow_macro_instructions());
1869 ld4r(vt, vt2, vt3, vt4, src);
1870 }
1871 void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1872 DCHECK(allow_macro_instructions());
1873 st2(vt, vt2, dst);
1874 }
1875 void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1876 const MemOperand& dst) {
1877 DCHECK(allow_macro_instructions());
1878 st3(vt, vt2, vt3, dst);
1879 }
1880 void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1881 const VRegister& vt4, const MemOperand& dst) {
1882 DCHECK(allow_macro_instructions());
1883 st4(vt, vt2, vt3, vt4, dst);
1884 }
1885 void St2(const VRegister& vt, const VRegister& vt2, int lane,
1886 const MemOperand& dst) {
1887 DCHECK(allow_macro_instructions());
1888 st2(vt, vt2, lane, dst);
1889 }
1890 void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1891 int lane, const MemOperand& dst) {
1892 DCHECK(allow_macro_instructions());
1893 st3(vt, vt2, vt3, lane, dst);
1894 }
1895 void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1896 const VRegister& vt4, int lane, const MemOperand& dst) {
1897 DCHECK(allow_macro_instructions());
1898 st4(vt, vt2, vt3, vt4, lane, dst);
1899 }
1900 void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1901 DCHECK(allow_macro_instructions());
1902 tbx(vd, vn, vm);
1903 }
1904 void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1905 const VRegister& vm) {
1906 DCHECK(allow_macro_instructions());
1907 tbx(vd, vn, vn2, vm);
1908 }
1909 void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1910 const VRegister& vn3, const VRegister& vm) {
1911 DCHECK(allow_macro_instructions());
1912 tbx(vd, vn, vn2, vn3, vm);
1913 }
1914 void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1915 const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1916 DCHECK(allow_macro_instructions());
1917 tbx(vd, vn, vn2, vn3, vn4, vm);
1918 }
1919
1920 inline void PushSizeRegList(RegList registers, unsigned reg_size) {
1921 PushCPURegList(CPURegList(reg_size, registers));
1922 }
1923 inline void PushSizeRegList(DoubleRegList registers, unsigned reg_size) {
1924 PushCPURegList(CPURegList(reg_size, registers));
1925 }
1926 inline void PopSizeRegList(RegList registers, unsigned reg_size) {
1927 PopCPURegList(CPURegList(reg_size, registers));
1928 }
1929 inline void PopSizeRegList(DoubleRegList registers, unsigned reg_size) {
1930 PopCPURegList(CPURegList(reg_size, registers));
1931 }
1932 inline void PushXRegList(RegList regs) {
1933 PushSizeRegList(regs, kXRegSizeInBits);
1934 }
1935 inline void PopXRegList(RegList regs) {
1936 PopSizeRegList(regs, kXRegSizeInBits);
1937 }
1938 inline void PushWRegList(RegList regs) {
1939 PushSizeRegList(regs, kWRegSizeInBits);
1940 }
1941 inline void PopWRegList(RegList regs) {
1942 PopSizeRegList(regs, kWRegSizeInBits);
1943 }
1944 inline void PushQRegList(DoubleRegList regs) {
1945 PushSizeRegList(regs, kQRegSizeInBits);
1946 }
1947 inline void PopQRegList(DoubleRegList regs) {
1948 PopSizeRegList(regs, kQRegSizeInBits);
1949 }
1950 inline void PushDRegList(DoubleRegList regs) {
1951 PushSizeRegList(regs, kDRegSizeInBits);
1952 }
1953 inline void PopDRegList(DoubleRegList regs) {
1954 PopSizeRegList(regs, kDRegSizeInBits);
1955 }
1956 inline void PushSRegList(DoubleRegList regs) {
1957 PushSizeRegList(regs, kSRegSizeInBits);
1958 }
1959 inline void PopSRegList(DoubleRegList regs) {
1960 PopSizeRegList(regs, kSRegSizeInBits);
1961 }
1962
1963 // These PushAll/PopAll respect the order of the registers in the stack from
1964 // low index to high.
1967
1969 int stack_slot_size = kDoubleSize) {
1970 if (registers.Count() % 2 != 0) {
1971 DCHECK(!registers.has(fp_zero));
1972 registers.set(fp_zero);
1973 }
1974 PushDRegList(registers);
1975 }
1977 int stack_slot_size = kDoubleSize) {
1978 if (registers.Count() % 2 != 0) {
1979 DCHECK(!registers.has(fp_zero));
1980 registers.set(fp_zero);
1981 }
1982 PopDRegList(registers);
1983 }
1984
1985 // Push the specified register 'count' times.
1987
1988 // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
1989 // values peeked will be adjacent, with the value in 'dst2' being from a
1990 // higher address than 'dst1'. The offset is in bytes. The stack pointer must
1991 // be aligned to 16 bytes.
1992 void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
1993
1994 // Preserve the callee-saved registers (as defined by AAPCS64).
1995 //
1996 // Higher-numbered registers are pushed before lower-numbered registers, and
1997 // thus get higher addresses.
1998 // Floating-point registers are pushed before general-purpose registers, and
1999 // thus get higher addresses.
2000 //
2001 // When control flow integrity measures are enabled, this method signs the
2002 // link register before pushing it.
2003 //
2004 // Note that registers are not checked for invalid values. Use this method
2005 // only if you know that the GC won't try to examine the values on the stack.
2007
2008 // Restore the callee-saved registers (as defined by AAPCS64).
2009 //
2010 // Higher-numbered registers are popped after lower-numbered registers, and
2011 // thus come from higher addresses.
2012 // Floating-point registers are popped after general-purpose registers, and
2013 // thus come from higher addresses.
2014 //
2015 // When control flow integrity measures are enabled, this method
2016 // authenticates the link register after popping it.
2018
2019 // Tiering support.
2022 inline void AssertFeedbackVector(Register object);
2026 Register closure);
2028#ifndef V8_ENABLE_LEAPTIERING
2030 Register flags, Register feedback_vector, CodeKind current_code_kind);
2032 Register flags, Register feedback_vector, CodeKind current_code_kind,
2033 Label* flags_need_processing);
2035 Register feedback_vector);
2036#endif // !V8_ENABLE_LEAPTIERING
2037
2038 // Helpers ------------------------------------------------------------------
2039
2040 template <typename Field>
2042 static const int shift = Field::kShift;
2043 static const int setbits = CountSetBits(Field::kMask, 32);
2044 Ubfx(dst, src, shift, setbits);
2045 }
2046
2047 template <typename Field>
2049 DecodeField<Field>(reg, reg);
2050 }
2051
2053 Label* if_marked_for_deoptimization);
2055 Label* if_marked_for_deoptimization);
2057
2059
2060 // ---- SMI and Number Utilities ----
2061
2062 inline void JumpIfNotSmi(Register value, Label* not_smi_label);
2063
2064 // Abort execution if argument is not a Map, enabled via
2065 // --debug-code.
2067
2068 // Abort execution if argument is not a Code, enabled via
2069 // --debug-code.
2071
2072 // Abort execution if argument is not a Constructor, enabled via
2073 // --debug-code.
2075
2076 // Abort execution if argument is not a JSFunction, enabled via
2077 // --debug-code.
2079
2080 // Abort execution if argument is not a callable JSFunction, enabled via
2081 // --debug-code.
2083
2084 // Abort execution if argument is not a JSGeneratorObject (or subclass),
2085 // enabled via --debug-code.
2087
2088 // Abort execution if argument is not a JSBoundFunction,
2089 // enabled via --debug-code.
2091
2092 // Abort execution if argument is not undefined or an AllocationSite,
2093 // enabled via --debug-code.
2095
2096 // Abort execution if argument is not smi nor in the pointer compresssion
2097 // cage, enabled via --debug-code.
2100
2101 // ---- Calling / Jumping helpers ----
2102
2103 void CallRuntime(const Runtime::Function* f, int num_arguments);
2104
2105 // Convenience function: Same as above, but takes the fid instead.
2106 void CallRuntime(Runtime::FunctionId fid, int num_arguments) {
2107 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
2108 }
2109
2110 // Convenience function: Same as above, but takes the fid instead.
2112 const Runtime::Function* function = Runtime::FunctionForId(fid);
2113 CallRuntime(function, function->nargs);
2114 }
2115
2117
2118 // Jump to a runtime routine.
2120 bool builtin_exit_frame = false);
2121
2122 // Registers used through the invocation chain are hard-coded.
2123 // We force passing the parameters to ensure the contracts are correctly
2124 // honoured by the caller.
2125 // 'function' must be x1.
2126 // 'actual' must use an immediate or x0.
2127 // 'expected' must use an immediate or x2.
2128 // 'call_kind' must be x5.
2129 void InvokePrologue(Register expected_parameter_count,
2130 Register actual_parameter_count, InvokeType type);
2131
2132 // On function call, call into the debugger.
2135 Register expected_parameter_count_or_dispatch_handle,
2136 Register actual_parameter_count);
2137
2138 // The way we invoke JSFunctions differs depending on whether leaptiering is
2139 // enabled. As such, these functions exist in two variants. In the future,
2140 // leaptiering will be used on all platforms. At that point, the
2141 // non-leaptiering variants will disappear.
2142
2143#ifdef V8_ENABLE_LEAPTIERING
2144 // Invoke the JavaScript function in the given register. Changes the
2145 // current context to the context in the function before invoking.
2146 void InvokeFunction(Register function, Register actual_parameter_count,
2147 InvokeType type,
2148 ArgumentAdaptionMode argument_adaption_mode =
2150 // Invoke the JavaScript function in the given register.
2151 // Changes the current context to the context in the function before invoking.
2152 void InvokeFunctionWithNewTarget(Register function, Register new_target,
2153 Register actual_parameter_count,
2154 InvokeType type);
2155 // Invoke the JavaScript function code by either calling or jumping.
2156 void InvokeFunctionCode(Register function, Register new_target,
2157 Register actual_parameter_count, InvokeType type,
2158 ArgumentAdaptionMode argument_adaption_mode =
2160#else
2162 Register expected_parameter_count,
2163 Register actual_parameter_count, InvokeType type);
2164 // Invoke the JavaScript function in the given register.
2165 // Changes the current context to the context in the function before invoking.
2167 Register actual_parameter_count,
2168 InvokeType type);
2169 void InvokeFunction(Register function, Register expected_parameter_count,
2170 Register actual_parameter_count, InvokeType type);
2171#endif
2172
2173 // ---- InstructionStream generation helpers ----
2174
2175 // ---------------------------------------------------------------------------
2176 // Support functions.
2177
2178 // Compare object type for heap object. heap_object contains a non-Smi
2179 // whose object type should be compared with the given type. This both
2180 // sets the flags and leaves the object type in the type_reg register.
2181 // It leaves the map in the map register (unless the type_reg and map register
2182 // are the same register). It leaves the heap object in the heap_object
2183 // register unless the heap_object register is the same register as one of the
2184 // other registers.
2185 void CompareObjectType(Register heap_object, Register map, Register type_reg,
2186 InstanceType type);
2187 // Variant of the above, which only guarantees to set the correct eq/ne flag.
2188 // Neither map, nor type_reg might be set to any particular value.
2189 void IsObjectType(Register heap_object, Register scratch1, Register scratch2,
2190 InstanceType type);
2191 // Variant of the above, which compares against a type range rather than a
2192 // single type (lower_limit and higher_limit are inclusive).
2193 //
2194 // Always use unsigned comparisons: ls for a positive result.
2195 void IsObjectTypeInRange(Register heap_object, Register scratch,
2196 InstanceType lower_limit, InstanceType higher_limit);
2197#if V8_STATIC_ROOTS_BOOL
2198 // Fast variant which is guaranteed to not actually load the instance type
2199 // from the map.
2200 void IsObjectTypeFast(Register heap_object, Register compressed_map_scratch,
2201 InstanceType type);
2202 void CompareInstanceTypeWithUniqueCompressedMap(Register map,
2203 Register scratch,
2204 InstanceType type);
2205#endif // V8_STATIC_ROOTS_BOOL
2206
2207 // Compare object type for heap object, and branch if equal (or not.)
2208 // heap_object contains a non-Smi whose object type should be compared with
2209 // the given type. This both sets the flags and leaves the object type in
2210 // the type_reg register. It leaves the map in the map register (unless the
2211 // type_reg and map register are the same register). It leaves the heap
2212 // object in the heap_object register unless the heap_object register is the
2213 // same register as one of the other registers.
2214 void JumpIfObjectType(Register object, Register map, Register type_reg,
2215 InstanceType type, Label* if_cond_pass,
2216 Condition cond = eq);
2217
2218 // Fast check if the object is a js receiver type. Assumes only primitive
2219 // objects or js receivers are passed.
2220 void JumpIfJSAnyIsNotPrimitive(
2221 Register heap_object, Register scratch, Label* target,
2222 Label::Distance distance = Label::kFar,
2223 Condition condition = Condition::kUnsignedGreaterThanEqual);
2224 void JumpIfJSAnyIsPrimitive(Register heap_object, Register scratch,
2225 Label* target,
2226 Label::Distance distance = Label::kFar) {
2227 return JumpIfJSAnyIsNotPrimitive(heap_object, scratch, target, distance,
2228 Condition::kUnsignedLessThan);
2229 }
2230
2231 // Compare instance type in a map. map contains a valid map object whose
2232 // object type should be compared with the given type. This both
2233 // sets the flags and leaves the object type in the type_reg register.
2235
2236 // Compare instance type ranges for a map (lower_limit and higher_limit
2237 // inclusive).
2238 //
2239 // Always use unsigned comparisons: ls for a positive result.
2241 InstanceType lower_limit,
2242 InstanceType higher_limit);
2243
2244 // Load the elements kind field from a map, and return it in the result
2245 // register.
2247
2248 // Compare the object in a register to a value from the root list.
2249 void CompareRoot(const Register& obj, RootIndex index,
2251 void CompareTaggedRoot(const Register& with, RootIndex index);
2252
2253 // Compare the object in a register to a value and jump if they are equal.
2254 void JumpIfRoot(const Register& obj, RootIndex index, Label* if_equal);
2255
2256 // Compare the object in a register to a value and jump if they are not equal.
2257 void JumpIfNotRoot(const Register& obj, RootIndex index, Label* if_not_equal);
2258
2259 // Checks if value is in range [lower_limit, higher_limit] using a single
2260 // comparison.
2261 void JumpIfIsInRange(const Register& value, unsigned lower_limit,
2262 unsigned higher_limit, Label* on_in_range);
2263
2264 // ---------------------------------------------------------------------------
2265 // Frames.
2266
2267 // Enter exit frame. Exit frames are used when calling C code from generated
2268 // (JavaScript) code.
2269 //
2270 // The only registers modified by this function are the provided scratch
2271 // register, the frame pointer and the stack pointer.
2272 //
2273 // The 'extra_space' argument can be used to allocate some space in the exit
2274 // frame that will be ignored by the GC. This space will be reserved in the
2275 // bottom of the frame immediately above the return address slot.
2276 //
2277 // Set up a stack frame and registers as follows:
2278 // fp[8]: CallerPC (lr)
2279 // fp -> fp[0]: CallerFP (old fp)
2280 // fp[-8]: SPOffset (new sp)
2281 // fp[-16]: CodeObject()
2282 // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
2283 // sp[8]: Memory reserved for the caller if extra_space != 0.
2284 // Alignment padding, if necessary.
2285 // sp -> sp[0]: Space reserved for the return address.
2286 //
2287 // This function also stores the new frame information in the top frame, so
2288 // that the new frame becomes the current frame.
2289 void EnterExitFrame(const Register& scratch, int extra_space,
2290 StackFrame::Type frame_type);
2291
2292 // Leave the current exit frame, after a C function has returned to generated
2293 // (JavaScript) code.
2294 //
2295 // This effectively unwinds the operation of EnterExitFrame:
2296 // * The frame information is removed from the top frame.
2297 // * The exit frame is dropped.
2298 void LeaveExitFrame(const Register& scratch, const Register& scratch2);
2299
2300 // Load the global proxy from the current context.
2302
2303 // ---------------------------------------------------------------------------
2304 // In-place weak references.
2305 void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
2306
2307 // ---------------------------------------------------------------------------
2308 // StatsCounter support
2309
2310 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
2311 Register scratch2) {
2312 if (!v8_flags.native_code_counters) return;
2313 EmitIncrementCounter(counter, value, scratch1, scratch2);
2314 }
2315 void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
2316 Register scratch2);
2317 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
2318 Register scratch2) {
2319 if (!v8_flags.native_code_counters) return;
2320 EmitIncrementCounter(counter, -value, scratch1, scratch2);
2321 }
2322
2323 // ---------------------------------------------------------------------------
2324 // Stack limit utilities
2326 void StackOverflowCheck(Register num_args, Label* stack_overflow);
2327
2328 // ---------------------------------------------------------------------------
2329 // Garbage collector support (GC).
2330
2331 // Notify the garbage collector that we wrote a pointer into an object.
2332 // |object| is the object being stored into, |value| is the object being
2333 // stored.
2334 // The offset is the offset from the start of the object, not the offset from
2335 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
2337 Register object, int offset, Register value, LinkRegisterStatus lr_status,
2338 SaveFPRegsMode save_fp, SmiCheck smi_check = SmiCheck::kInline,
2340 SlotDescriptor slot = SlotDescriptor::ForDirectPointerSlot());
2341
2342 // For a given |object| notify the garbage collector that the slot at |offset|
2343 // has been written. |value| is the object being stored.
2345 Register object, Operand offset, Register value,
2346 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
2347 SmiCheck smi_check = SmiCheck::kInline,
2349 SlotDescriptor slot = SlotDescriptor::ForDirectPointerSlot());
2350
2351 // ---------------------------------------------------------------------------
2352 // Debugging.
2353
2354 void LoadNativeContextSlot(Register dst, int index);
2355
2356 // Falls through and sets scratch_and_result to 0 on failure, jumps to
2357 // on_result on success.
2358 void TryLoadOptimizedOsrCode(Register scratch_and_result,
2359 CodeKind min_opt_level, Register feedback_vector,
2360 FeedbackSlot slot, Label* on_result,
2361 Label::Distance distance);
2362
2363 protected:
2364 // The actual Push and Pop implementations. These don't generate any code
2365 // other than that required for the push or pop. This allows
2366 // (Push|Pop)CPURegList to bundle together run-time assertions for a large
2367 // block of registers.
2368 //
2369 // Note that size is per register, and is specified in bytes.
2370 void PushHelper(int count, int size, const CPURegister& src0,
2371 const CPURegister& src1, const CPURegister& src2,
2372 const CPURegister& src3);
2373 void PopHelper(int count, int size, const CPURegister& dst0,
2374 const CPURegister& dst1, const CPURegister& dst2,
2375 const CPURegister& dst3);
2376
2377 void ConditionalCompareMacro(const Register& rn, const Operand& operand,
2378 StatusFlags nzcv, Condition cond,
2380
2381 void AddSubWithCarryMacro(const Register& rd, const Register& rn,
2382 const Operand& operand, FlagsUpdate S,
2384
2385 // Call Printf. On a native build, a simple call will be generated, but if the
2386 // simulator is being used then a suitable pseudo-instruction is used. The
2387 // arguments and stack must be prepared by the caller as for a normal AAPCS64
2388 // call to 'printf'.
2389 //
2390 // The 'args' argument should point to an array of variable arguments in their
2391 // proper PCS registers (and in calling order). The argument registers can
2392 // have mixed types. The format string (x0) should not be included.
2393 void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
2394
2395 private:
2396#if DEBUG
2397 // Tell whether any of the macro instruction can be used. When false the
2398 // MacroAssembler will assert if a method which can emit a variable number
2399 // of instructions is called.
2400 bool allow_macro_instructions_ = true;
2401#endif
2402
2403 // Scratch registers available for use by the MacroAssembler.
2404 CPURegList tmp_list_ = DefaultTmpList();
2405 CPURegList fptmp_list_ = DefaultFPTmpList();
2406
2407 // Helps resolve branching to labels potentially out of range.
2408 // If the label is not bound, it registers the information necessary to later
2409 // be able to emit a veneer for this branch if necessary.
2410 // If the label is bound, it returns true if the label (or the previous link
2411 // in the label chain) is out of range. In that case the caller is responsible
2412 // for generating appropriate code.
2413 // Otherwise it returns false.
2414 // This function also checks wether veneers need to be emitted.
2415 template <ImmBranchType branch_type>
2417 static_assert((branch_type == CondBranchType) ||
2418 (branch_type == CompareBranchType) ||
2419 (branch_type == TestBranchType));
2420
2421 bool need_longer_range = false;
2422 // There are two situations in which we care about the offset being out of
2423 // range:
2424 // - The label is bound but too far away.
2425 // - The label is not bound but linked, and the previous branch
2426 // instruction in the chain is too far away.
2427 if (label->is_bound() || label->is_linked()) {
2428 need_longer_range = !Instruction::IsValidImmPCOffset(
2429 branch_type, label->pos() - pc_offset());
2430 }
2431 if (!need_longer_range && !label->is_bound()) {
2432 int max_reachable_pc =
2433 pc_offset() + Instruction::ImmBranchRange(branch_type);
2434
2435 // Use the LSB of the max_reachable_pc (always four-byte aligned) to
2436 // encode the branch type. We need only distinguish between TB[N]Z and
2437 // CB[N]Z/conditional branch, as the ranges for the latter are the same.
2438 int branch_type_tag = (branch_type == TestBranchType) ? 1 : 0;
2439
2440 unresolved_branches_.insert(
2441 std::pair<int, Label*>(max_reachable_pc + branch_type_tag, label));
2442 // Also maintain the next pool check.
2443 next_veneer_pool_check_ =
2444 std::min(next_veneer_pool_check_,
2445 max_reachable_pc - kVeneerDistanceCheckMargin);
2446 }
2447 return need_longer_range;
2448 }
2449
2450 void Movi16bitHelper(const VRegister& vd, uint64_t imm);
2451 void Movi32bitHelper(const VRegister& vd, uint64_t imm);
2452 void Movi64bitHelper(const VRegister& vd, uint64_t imm);
2453
2454 void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
2455 LoadStoreOp op);
2456 void LoadStoreMacroComplex(const CPURegister& rt, const MemOperand& addr,
2457 LoadStoreOp op);
2458
2459 void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
2460 const MemOperand& addr, LoadStorePairOp op);
2461
2462 static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
2463 uint8_t* pc);
2464
2465 void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
2466
2468
2470};
2471
2472// Use this scope when you need a one-to-one mapping between methods and
2473// instructions. This scope prevents the MacroAssembler from being called and
2474// literal pools from being emitted. It also asserts the number of instructions
2475// emitted is what you specified when creating the scope.
2477 public:
2479 : masm_(masm),
2480 block_pool_(masm, count * kInstrSize)
2481#ifdef DEBUG
2482 ,
2484#endif
2485 {
2486 masm_->CheckVeneerPool(false, true, count * kInstrSize);
2487 masm_->StartBlockVeneerPool();
2488#ifdef DEBUG
2489 if (count != 0) {
2490 masm_->bind(&start_);
2491 }
2492 previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
2493 masm_->set_allow_macro_instructions(false);
2494#endif
2495 }
2496
2498 masm_->EndBlockVeneerPool();
2499#ifdef DEBUG
2500 if (start_.is_bound()) {
2501 DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
2502 }
2503 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2504#endif
2505 }
2506
2507 private:
2510#ifdef DEBUG
2511 size_t size_;
2512 Label start_;
2513 bool previous_allow_macro_instructions_;
2514#endif
2515};
2516
2517// This scope utility allows scratch registers to be managed safely. The
2518// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2519// registers. These registers can be allocated on demand, and will be returned
2520// at the end of the scope.
2521//
2522// When the scope ends, the MacroAssembler's lists will be restored to their
2523// original state, even if the lists were modified by some other means. Note
2524// that this scope can be nested but the destructors need to run in the opposite
2525// order as the constructors. We do not have assertions for this.
2527 public:
2529 : available_(masm->TmpList()),
2530 availablefp_(masm->FPTmpList()),
2531 old_available_(available_->bits()),
2532 old_availablefp_(availablefp_->bits()) {
2533 DCHECK_EQ(available_->type(), CPURegister::kRegister);
2534 DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
2535 }
2536
2538 available_->set_bits(old_available_);
2539 availablefp_->set_bits(old_availablefp_);
2540 }
2541
2542 // Take a register from the appropriate temps list. It will be returned
2543 // automatically when the scope ends.
2544 Register AcquireW() { return AcquireNextAvailable(available_).W(); }
2545 Register AcquireX() { return AcquireNextAvailable(available_).X(); }
2546 VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
2547 VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
2548 VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
2550 return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
2551 }
2552
2553 bool CanAcquire() const { return !available_->IsEmpty(); }
2554 bool CanAcquireFP() const { return !availablefp_->IsEmpty(); }
2555
2557 int code = AcquireNextAvailable(available_).code();
2558 return Register::Create(code, reg.SizeInBits());
2559 }
2560
2562 int code = AcquireNextAvailable(availablefp_).code();
2563 return VRegister::Create(code, reg.SizeInBits());
2564 }
2565
2566 void Include(const CPURegList& list) { available_->Combine(list); }
2567 void IncludeFP(const CPURegList& list) { availablefp_->Combine(list); }
2568 void Exclude(const CPURegList& list) {
2569#if DEBUG
2570 CPURegList copy(list);
2571 while (!copy.IsEmpty()) {
2572 const CPURegister& reg = copy.PopHighestIndex();
2573 DCHECK(available_->IncludesAliasOf(reg));
2574 }
2575#endif
2576 available_->Remove(list);
2577 }
2578 void ExcludeFP(const CPURegList& list) {
2579#if DEBUG
2580 CPURegList copy(list);
2581 while (!copy.IsEmpty()) {
2582 const CPURegister& reg = copy.PopHighestIndex();
2583 DCHECK(availablefp_->IncludesAliasOf(reg));
2584 }
2585#endif
2586 availablefp_->Remove(list);
2587 }
2588 void Include(const Register& reg1, const Register& reg2 = NoReg) {
2589 CPURegList list(reg1, reg2);
2590 Include(list);
2591 }
2592 void Exclude(const Register& reg1, const Register& reg2 = NoReg) {
2593 CPURegList list(reg1, reg2);
2594 Exclude(list);
2595 }
2597
2599 void SetAvailable(const CPURegList& list) { *available_ = list; }
2600
2601 CPURegList* AvailableFP() { return availablefp_; }
2602 void SetAvailableFP(const CPURegList& list) { *availablefp_ = list; }
2603
2604 private:
2606 CPURegList* available) {
2607 CHECK(!available->IsEmpty());
2608 CPURegister result = available->PopLowestIndex();
2609 DCHECK(!AreAliased(result, xzr, sp));
2610 return result;
2611 }
2612
2613 // Available scratch registers.
2614 CPURegList* available_; // kRegister
2615 CPURegList* availablefp_; // kVRegister
2616
2617 // The state of the available lists at the start of this scope.
2618 uint64_t old_available_; // kRegister
2619 uint64_t old_availablefp_; // kVRegister
2620};
2621
2622struct MoveCycleState {
2623 // List of scratch registers reserved for pending moves in a move cycle, and
2624 // which should therefore not be used as a temporary location by
2625 // {MoveToTempLocation}.
2628 // Available scratch registers during the move cycle resolution scope.
2629 std::optional<UseScratchRegisterScope> temps;
2630 // Scratch register picked by {MoveToTempLocation}.
2631 std::optional<CPURegister> scratch_reg;
2632};
2633
2634// Provides access to exit frame parameters (GC-ed).
2636
2637// Provides access to exit frame parameters (GC-ed).
2639
2640// Calls an API function. Allocates HandleScope, extracts returned value
2641// from handle and propagates exceptions. Clobbers C argument registers
2642// and C caller-saved registers. Restores context. On return removes
2643// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
2644// (GCed, includes the call JS arguments space and the additional space
2645// allocated for the fast call).
2646void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
2647 Register function_address,
2648 ExternalReference thunk_ref, Register thunk_arg,
2649 int slots_to_drop_on_return,
2650 MemOperand* argc_operand,
2651 MemOperand return_value_operand);
2652
2653} // namespace internal
2654} // namespace v8
2655
2656#define ACCESS_MASM(masm) masm->
2657
2658#endif // V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
#define BREAK
RegList available_
int16_t parameter_count
Definition builtins.cc:67
Builtins::Kind kind
Definition builtins.cc:40
CPURegister PopHighestIndex()
InstructionAccurateScope(MacroAssembler *masm, size_t count=0)
MacroAssembler::BlockConstPoolScope block_pool_
void JumpIfJSAnyIsPrimitive(Register heap_object, Register scratch, Label *target, Label::Distance distance=Label::kFar)
ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_LOAD_MACRO_MODES, DECLARE_LOAD_FUNCTION, Ld, ld) ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_STORE_MACRO_MODES
void Ucvtf(const VRegister &vd, const VRegister &vn, int fbits=0)
bool NeedExtraInstructionsOrRegisterBranch(Label *label)
void Abort(AbortReason reason)
void PushAll(RegList registers)
void LoadStackLimit(Register destination, StackLimitKind kind)
void St4(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, const VRegister &vt4, const MemOperand &dst)
void Stnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void CallJSFunction(Register function_object, uint16_t argument_count)
void AddSubWithCarryMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
void Tbx(const VRegister &vd, const VRegister &vn, const VRegister &vm)
void DecompressTagged(const Register &destination, const Register &source)
void St1(const VRegister &vt, const MemOperand &dst)
void CallBuiltinByIndex(Register builtin, Register target)
int CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void AssertMap(Register object) NOOP_UNLESS_DEBUG_CODE
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void AddSubMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
int CallCFunction(Register function, int num_reg_arguments, int num_double_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion=no_reg)
void Cmge(const VRegister &vd, const VRegister &vn, int imm)
void DecompressTaggedSigned(const Register &destination, const MemOperand &field_operand)
void St3(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, const MemOperand &dst)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void CallRuntime(Runtime::FunctionId fid)
void Push(const CPURegister &src0, const CPURegister &src1, const CPURegister &src2, const CPURegister &src3, const CPURegister &src4, const CPURegister &src5=NoReg, const CPURegister &src6=NoReg, const CPURegister &src7=NoReg)
void IsObjectType(Register heap_object, Register scratch1, Register scratch2, InstanceType type)
void DecodeField(Register dst, Register src)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void PushSizeRegList(RegList registers, unsigned reg_size)
void SlotAddress(Register dst, int slot_offset)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void Ext(const VRegister &vd, const VRegister &vn, const VRegister &vm, int index)
void LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTagRange tag_range, Register isolate_root=Register::no_reg())
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
void PushMultipleTimes(CPURegister src, Register count)
void Ld2(const VRegister &vt, const VRegister &vt2, const MemOperand &src)
void Bic(const VRegister &vd, const int imm8, const int left_shift=0)
void I64x2BitMask(Register dst, VRegister src)
static CPURegList DefaultTmpList()
void AssertPositiveOrZero(Register value) NOOP_UNLESS_DEBUG_CODE
void LoadStoreMacroComplex(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
void Tbx(const VRegister &vd, const VRegister &vn, const VRegister &vn2, const VRegister &vm)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void Pop(const CPURegister &dst0, const CPURegister &dst1, const CPURegister &dst2, const CPURegister &dst3, const CPURegister &dst4, const CPURegister &dst5=NoReg, const CPURegister &dst6=NoReg, const CPURegister &dst7=NoReg)
void St1(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, const VRegister &vt4, const MemOperand &dst)
void Tbl(const VRegister &vd, const VRegister &vn, const VRegister &vn2, const VRegister &vn3, const VRegister &vn4, const VRegister &vm)
void Move(Register dst, Register src)
void Tbx(const VRegister &vd, const VRegister &vn, const VRegister &vn2, const VRegister &vn3, const VRegister &vn4, const VRegister &vm)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void Bcax(const VRegister &vd, const VRegister &vn, const VRegister &vm, const VRegister &va)
void CanonicalizeNaN(const VRegister &reg)
void I32x4BitMask(Register dst, VRegister src)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void AtomicDecompressTaggedSigned(const Register &destination, const Register &base, const Register &index, const Register &temp)
void StoreReturnAddressAndCall(Register target)
void PopSRegList(DoubleRegList regs)
void CopyDoubleWords(Register dst, Register src, Register count, CopyDoubleWordsMode mode=kDstLessThanSrc)
void Tbl(const VRegister &vd, const VRegister &vn, const VRegister &vn2, const VRegister &vn3, const VRegister &vm)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void LoadRootRelative(Register destination, int32_t offset) final
void Smov(const Register &rd, const VRegister &vn, int vn_index)
void PopHelper(int count, int size, const CPURegister &dst0, const CPURegister &dst1, const CPURegister &dst2, const CPURegister &dst3)
void Ldnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void JumpIfObjectType(Register object, Register map, Register type_reg, InstanceType type, Label *if_cond_pass, Condition cond=eq)
void CheckPageFlag(const Register &object, int mask, Condition cc, Label *condition_met)
void LoadSandboxedPointerField(Register destination, MemOperand field_operand)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void Ld3r(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, const MemOperand &src)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void Swap(Register lhs, Register rhs)
void PopAll(DoubleRegList registers, int stack_slot_size=kDoubleSize)
void Movi32bitHelper(const VRegister &vd, uint64_t imm)
void St1(const VRegister &vt, int lane, const MemOperand &dst)
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline, ReadOnlyCheck ro_check=ReadOnlyCheck::kInline, SlotDescriptor slot=SlotDescriptor::ForDirectPointerSlot())
void B(Label *label, BranchType type, Register reg=NoReg, int bit=-1)
void AtomicDecompressTagged(const Register &destination, const Register &base, const Register &index, const Register &temp)
void Dup(const VRegister &vd, const Register &rn)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void Ld4(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, const VRegister &vt4, int lane, const MemOperand &src)
void CallPrintf(int arg_count=0, const CPURegister *args=nullptr)
void St2(const VRegister &vt, const VRegister &vt2, int lane, const MemOperand &dst)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
void Ins(const VRegister &vd, int vd_index, const Register &rn)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void AssertCode(Register object) NOOP_UNLESS_DEBUG_CODE
void TryConvertDoubleToInt64(Register result, DoubleRegister input, Label *done)
void LoadMap(Register dst, Register object)
void Movi(const VRegister &vd, uint64_t hi, uint64_t lo)
void Fcvtzs(const VRegister &vd, const VRegister &vn, int fbits=0)
void Orr(const VRegister &vd, const int imm8, const int left_shift=0)
void Tbz(const Register &rt, unsigned bit_pos, Label *label)
void Scvtf(const VRegister &vd, const VRegister &vn, int fbits=0)
void Fcvtzu(const VRegister &vd, const VRegister &vn, int fbits=0)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void PopcntHelper(Register dst, Register src)
void IndirectCall(Address target, RelocInfo::Mode rmode)
void DecodeSandboxedPointer(Register value)
void Jump(const ExternalReference &reference)
void Ld4r(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, const VRegister &vt4, const MemOperand &src)
MemOperand ExternalReferenceAsOperand(IsolateFieldId id)
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag)
bool CanUseNearCallOrJump(RelocInfo::Mode rmode)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void StoreTaggedField(const MemOperand &dst_field_operand, const Register &value)
int CallCFunction(ExternalReference function, int num_reg_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_location=nullptr)
void Call(Handle< Code > code, RelocInfo::Mode rmode=RelocInfo::CODE_TARGET)
void SbxCheck(Condition cc, AbortReason reason)
void Mov(const VRegister &vd, const VRegister &vn, int index)
void PopQRegList(DoubleRegList regs)
void PushAll(DoubleRegList registers, int stack_slot_size=kDoubleSize)
void Cmeq(const VRegister &vd, const VRegister &vn, int imm)
void Fjcvtzs(const Register &rd, const VRegister &vn)
void Mov(const Register &rd, ExternalReference reference)
void Ld1(const VRegister &vt, const VRegister &vt2, const MemOperand &src)
void Mvn(const Register &rd, const Operand &operand)
void Ins(const VRegister &vd, int vd_index, const VRegister &vn, int vn_index)
void JumpIfCodeIsTurbofanned(Register code, Register scratch, Label *if_marked_for_deoptimization)
void Tbx(const VRegister &vd, const VRegister &vn, const VRegister &vn2, const VRegister &vn3, const VRegister &vm)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Ld4(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, const VRegister &vt4, const MemOperand &src)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void EnterExitFrame(const Register &scratch, int extra_space, StackFrame::Type frame_type)
void St3(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, int lane, const MemOperand &dst)
void Mov(const Register &rd, const Operand &operand, DiscardMoveMode discard_mode=kDontDiscardForSameWReg)
void LoadIndirectPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void JumpIfEqual(Register x, int32_t y, Label *dest)
void LeaveFrame(StackFrame::Type type)
void CompareRoot(const Register &obj, RootIndex index, ComparisonMode mode=ComparisonMode::kDefault)
void LoadGlobalProxy(Register dst)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion=no_reg)
void PopSizeRegList(DoubleRegList registers, unsigned reg_size)
static bool IsImmMovz(uint64_t imm, unsigned reg_size)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
Operand ClearedValue() const
void StoreCodePointerField(Register value, MemOperand dst_field_operand)
void IsObjectTypeInRange(Register heap_object, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void Mov(const Register &rd, const VRegister &vn, int vn_index)
void Fcvtn2(const VRegister &vd, const VRegister &vn)
void Umov(const Register &rd, const VRegister &vn, int vn_index)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void Ld2(const VRegister &vt, const VRegister &vt2, int lane, const MemOperand &src)
void Jump(Register target, Condition cond=al)
void BindExceptionHandler(Label *label)
void PushSRegList(DoubleRegList regs)
void LoadRoot(Register destination, RootIndex index) final
void DecompressProtected(const Register &destination, const MemOperand &field_operand)
void PushRoot(RootIndex index)
void CheckPageFlag(const Register &object, Register scratch, int mask, Condition cc, Label *condition_met)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void Ld1(const VRegister &vt, int lane, const MemOperand &src)
static bool IsImmMovn(uint64_t imm, unsigned reg_size)
void CanonicalizeNaN(const VRegister &dst, const VRegister &src)
void Csel(const Register &rd, const Register &rn, const Operand &operand, Condition cond)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void CompareRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void St1(const VRegister &vt, const VRegister &vt2, const MemOperand &dst)
void Ld3(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, int lane, const MemOperand &src)
void Swap(VRegister lhs, VRegister rhs)
void LoadFromConstantsTable(Register destination, int constant_index) final
void Ld1(const VRegister &vt, const MemOperand &src)
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void LogicalMacro(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
void I64x2AllTrue(Register dst, VRegister src)
void PokePair(const CPURegister &src1, const CPURegister &src2, int offset)
void LeaveExitFrame(const Register &scratch, const Register &scratch2)
void MaybeSaveRegisters(RegList registers)
void LoadStorePairMacro(const CPURegister &rt, const CPURegister &rt2, const MemOperand &addr, LoadStorePairOp op)
void AssertZeroExtended(Register int32_register) NOOP_UNLESS_DEBUG_CODE
void Tbnz(const Register &rt, unsigned bit_pos, Label *label)
void LoadTaggedRoot(Register destination, RootIndex index)
void PushCPURegList(CPURegList registers)
void Cmgt(const VRegister &vd, const VRegister &vn, int imm)
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler)
void PopSizeRegList(RegList registers, unsigned reg_size)
void Movi(const VRegister &vd, uint64_t imm, Shift shift=LSL, int shift_amount=0)
void I16x8BitMask(Register dst, VRegister src)
void Printf(const char *format, CPURegister arg0=NoCPUReg, CPURegister arg1=NoCPUReg, CPURegister arg2=NoCPUReg, CPURegister arg3=NoCPUReg)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void StoreIndirectPointerField(Register value, MemOperand dst_field_operand)
void SmiToInt32(Register smi)
void MovePair(Register dst0, Register src0, Register dst1, Register src1)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void PrintfNoPreserve(const char *format, const CPURegister &arg0=NoCPUReg, const CPURegister &arg1=NoCPUReg, const CPURegister &arg2=NoCPUReg, const CPURegister &arg3=NoCPUReg)
void Move(Register dst, Tagged< Smi > src)
void LoadStoreMacro(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
void Dup(const VRegister &vd, const VRegister &vn, int index)
void MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
Condition LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind)
void Call(ExternalReference target)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void Tbl(const VRegister &vd, const VRegister &vn, const VRegister &vm)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void Fcvtxn(const VRegister &vd, const VRegister &vn)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg)
void PopCPURegList(CPURegList registers)
void CompareInstanceTypeRange(Register map, Register type_reg, InstanceType lower_limit, InstanceType higher_limit)
void PeekPair(const CPURegister &dst1, const CPURegister &dst2, int offset)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline, ReadOnlyCheck ro_check=ReadOnlyCheck::kInline, SlotDescriptor slot=SlotDescriptor::ForDirectPointerSlot())
void LoadCodePointerField(Register destination, MemOperand field_operand)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void Ld2r(const VRegister &vt, const VRegister &vt2, const MemOperand &src)
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void Check(Condition cond, AbortReason reason)
void JumpIfRoot(const Register &obj, RootIndex index, Label *if_equal)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void StoreSandboxedPointerField(Register value, MemOperand dst_field_operand)
void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch, Label *if_marked_for_deoptimization)
void SlotAddress(Register dst, Register slot_offset)
void Move(Register dst, MemOperand src)
static bool IsNearCallOffset(int64_t offset)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DoubleRegister double_input, StubCallMode stub_mode, LinkRegisterStatus lr_status)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void Adr(const Register &rd, Label *label, AdrHint=kAdrNear)
void JumpIfLessThan(Register x, int32_t y, Label *dest)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void ComputeCodeStartAddress(const Register &rd)
void Mov(const VRegister &vd, int vd_index, const Register &rn)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Cmlt(const VRegister &vd, const VRegister &vn, int imm)
void I8x16BitMask(Register dst, VRegister src, VRegister temp=NoVReg)
Operand MoveImmediateForShiftedOp(const Register &dst, int64_t imm, PreShiftImmMode mode)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void StackOverflowCheck(Register num_args, Label *stack_overflow)
void LoadElementsKindFromMap(Register result, Register map)
void Mov(const Register &rd, uint64_t imm)
void ConditionalCompareMacro(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
void Tbl(const VRegister &vd, const VRegister &vn, const VRegister &vn2, const VRegister &vm)
void Cbnz(const Register &rt, Label *label)
void MaybeRestoreRegisters(RegList registers)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Fcvtxn2(const VRegister &vd, const VRegister &vn)
void CallDebugOnFunctionCall(Register fun, Register new_target, Register expected_parameter_count_or_dispatch_handle, Register actual_parameter_count)
void Cbz(const Register &rt, Label *label)
void Call(Register target)
void St1(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, const MemOperand &dst)
void TryLoadOptimizedOsrCode(Register scratch_and_result, CodeKind min_opt_level, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void Jump(Address target, RelocInfo::Mode rmode, Condition cond=al)
void AssertUndefinedOrAllocationSite(Register object) NOOP_UNLESS_DEBUG_CODE
void Ld3(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, const MemOperand &src)
void JumpIfIsInRange(const Register &value, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void Abs(const Register &rd, const Register &rm, Label *is_not_representable=nullptr, Label *is_representable=nullptr)
void AssertSpAligned() NOOP_UNLESS_DEBUG_CODE
void Jump(Handle< Code > code, RelocInfo::Mode rmode, Condition cond=al)
void Mov(const VRegister &vd, int vd_index, const VRegister &vn, int vn_index)
void Movi16bitHelper(const VRegister &vd, uint64_t imm)
bool TryOneInstrMoveImmediate(const Register &dst, int64_t imm)
void Ld1(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, const VRegister &vt4, const MemOperand &src)
void StoreTwoTaggedFields(const Register &value, const MemOperand &dst_field_operand)
void CompareTaggedRoot(const Register &with, RootIndex index)
void PopDRegList(DoubleRegList regs)
void LoadEntryFromBuiltinIndex(Register builtin, Register target)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) const
void St2(const VRegister &vt, const VRegister &vt2, const MemOperand &dst)
void SmiUntagField(Register dst, const MemOperand &src)
void PopAll(RegList registers)
void Movi64bitHelper(const VRegister &vd, uint64_t imm)
void DecompressTagged(const Register &destination, Tagged_t immediate)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
Condition CheckSmi(Register src)
void Call(Address target, RelocInfo::Mode rmode)
void JumpIf(Condition cond, Register x, int32_t y, Label *dest)
void SmiToInt32(Register dst, Register smi)
void StoreRootRelative(int32_t offset, Register value) final
void Ld1r(const VRegister &vt, const MemOperand &src)
void B(Label *label, Condition cond)
void LoadTaggedSignedField(const Register &destination, const MemOperand &field_operand)
void AtomicStoreTaggedField(const Register &value, const Register &dst_base, const Register &dst_index, const Register &temp)
void AssertFPCRState(Register fpcr=NoReg) NOOP_UNLESS_DEBUG_CODE
void TailCallRuntime(Runtime::FunctionId fid)
void PushHelper(int count, int size, const CPURegister &src0, const CPURegister &src1, const CPURegister &src2, const CPURegister &src3)
static unsigned CountSetHalfWords(uint64_t imm, unsigned reg_size)
void CallRuntime(Runtime::FunctionId fid, int num_arguments)
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond=al)
void Eor3(const VRegister &vd, const VRegister &vn, const VRegister &vm, const VRegister &va)
void PushDRegList(DoubleRegList regs)
void LoadNativeContextSlot(Register dst, int index)
void JumpIfNotRoot(const Register &obj, RootIndex index, Label *if_not_equal)
static CPURegList DefaultFPTmpList()
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void Cmle(const VRegister &vd, const VRegister &vn, int imm)
void Mov(const Register &rd, Tagged< Smi > smi)
void St4(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, const VRegister &vt4, int lane, const MemOperand &dst)
void PushSizeRegList(DoubleRegList registers, unsigned reg_size)
void Switch(Register scratch, Register value, int case_value_base, Label **labels, int num_labels)
void Fcvtl2(const VRegister &vd, const VRegister &vn)
void Mvni(const VRegister &vd, const int imm8, Shift shift=LSL, const int shift_amount=0)
void AssertSmiOrHeapObjectInMainCompressionCage(Register object) NOOP_UNLESS_DEBUG_CODE
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void Ld1(const VRegister &vt, const VRegister &vt2, const VRegister &vt3, const MemOperand &src)
void PushQRegList(DoubleRegList regs)
constexpr int8_t code() const
Register AcquireSameSizeAs(const Register &reg)
static V8_EXPORT_PRIVATE CPURegister AcquireNextAvailable(CPURegList *available)
void Include(const Register &reg1, const Register &reg2=NoReg)
VRegister AcquireV(VectorFormat format)
void Exclude(const Register &reg1, const Register &reg2=NoReg)
V8_EXPORT_PRIVATE VRegister AcquireSameSizeAs(const VRegister &reg)
void SetAvailableFP(const CPURegList &list)
Handle< Code > code
const int size_
Definition assembler.cc:132
uint8_t *const start_
Definition assembler.cc:131
#define NOOP_UNLESS_DEBUG_CODE
Definition assembler.h:628
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
int32_t offset
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int pc_offset
EmitFn fn
int y
int x
uint32_t const mask
#define ATOMIC_MEMORY_SIMPLE_MACRO_LIST(V, DEF, MASM_PRE, ASM_PRE)
#define STLX_MACRO_LIST(V)
#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP)
#define DECLARE_SWP_FUNCTION(FN, OP)
#define CAS_SINGLE_MACRO_LIST(V)
#define DECLARE_LOAD_FUNCTION(FN, OP)
#define NEON_3VREG_MACRO_LIST(V)
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)
#define NEON_2VREG_FPIMM_MACRO_LIST(V)
#define LDA_STL_MACRO_LIST(V)
#define NEON_BYELEMENT_MACRO_LIST(V)
#define LSPAIR_MACRO_LIST(V)
#define ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM)
#define NEON_2VREG_MACRO_LIST(V)
#define DECLARE_STORE_FUNCTION(FN, OP)
#define ATOMIC_MEMORY_LOAD_MACRO_MODES(V, MASM, ASM)
#define NEON_2VREG_SHIFT_MACRO_LIST(V)
#define LS_MACRO_LIST(V)
#define CAS_PAIR_MACRO_LIST(V)
ReadOnlyCheck
SmiCheck
ComparisonMode
ArgumentAdaptionMode
InvokeType
SetIsolateDataSlots
JumpMode
RegListBase< RegisterT > registers
MaglevAssembler *const masm_
InstructionOperand destination
constexpr Register no_reg
uint32_t AddSubWithCarryOp
V8_EXPORT_PRIVATE int CountSetBits(uint64_t value, int width)
MemOperand ExitFrameCallerStackSlotOperand(int index)
uint32_t ConditionalCompareOp
constexpr int kWRegSizeInBits
constexpr int B
Address Tagged_t
Definition globals.h:547
MemOperand FieldMemOperand(Register object, int offset)
Condition NegateCondition(Condition cond)
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
uint32_t LoadStorePairOp
BranchType InvertBranchType(BranchType type)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kXRegSizeInBits
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
constexpr uint8_t kInstrSize
constexpr int kSRegSizeInBits
constexpr int kQRegSizeInBits
uint32_t LoadStoreOp
MemOperand ExitFrameStackSlotOperand(int offset)
constexpr int kDRegSizeInBits
Definition c-api.cc:87
#define UNREACHABLE()
Definition logging.h:67
#define CHECK(condition)
Definition logging.h:124
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define V8_EXPORT_PRIVATE
Definition macros.h:460
std::optional< CPURegister > scratch_reg
std::optional< UseScratchRegisterScope > temps
#define V8_NODISCARD
Definition v8config.h:693
std::unique_ptr< ValueMirror > value