v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-arm64.cc
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_ARM64
6
7#include <optional>
8
9#include "src/base/bits.h"
20#include "src/debug/debug.h"
27#include "src/runtime/runtime.h"
29
30// Satisfy cpplint check, but don't include platform-specific header. It is
31// included recursively via macro-assembler.h.
32#if 0
34#endif
35
36#define __ ACCESS_MASM(masm)
37
38namespace v8 {
39namespace internal {
40
41CPURegList MacroAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); }
42
44 return CPURegList(fp_scratch1, fp_scratch2);
45}
46
47namespace {
48
49// For WebAssembly we care about the full floating point register. If we are not
50// running Wasm, we can get away with saving half of those registers.
51#if V8_ENABLE_WEBASSEMBLY
52constexpr int kStackSavedSavedFPSizeInBits = kQRegSizeInBits;
53#else
54constexpr int kStackSavedSavedFPSizeInBits = kDRegSizeInBits;
55#endif // V8_ENABLE_WEBASSEMBLY
56
57} // namespace
58
60 // If LR was stored here, we would need to sign it if
61 // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on.
62 DCHECK(!registers.IncludesAliasOf(lr));
63
64 int size = registers.RegisterSizeInBytes();
65 DCHECK_EQ(0, (size * registers.Count()) % 16);
66
67 // Push up to four registers at a time.
68 while (!registers.IsEmpty()) {
69 int count_before = registers.Count();
70 const CPURegister& src0 = registers.PopHighestIndex();
71 const CPURegister& src1 = registers.PopHighestIndex();
72 const CPURegister& src2 = registers.PopHighestIndex();
73 const CPURegister& src3 = registers.PopHighestIndex();
74 int count = count_before - registers.Count();
75 PushHelper(count, size, src0, src1, src2, src3);
76 }
77}
78
80 int size = registers.RegisterSizeInBytes();
81 DCHECK_EQ(0, (size * registers.Count()) % 16);
82
83 // If LR was loaded here, we would need to authenticate it if
84 // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on.
85 DCHECK(!registers.IncludesAliasOf(lr));
86
87 // Pop up to four registers at a time.
88 while (!registers.IsEmpty()) {
89 int count_before = registers.Count();
90 const CPURegister& dst0 = registers.PopLowestIndex();
91 const CPURegister& dst1 = registers.PopLowestIndex();
92 const CPURegister& dst2 = registers.PopLowestIndex();
93 const CPURegister& dst3 = registers.PopLowestIndex();
94 int count = count_before - registers.Count();
95 PopHelper(count, size, dst0, dst1, dst2, dst3);
96 }
97}
98
99void MacroAssembler::PushAll(RegList reglist) {
100 if (reglist.Count() % 2 != 0) {
101 DCHECK(!reglist.has(xzr));
102 reglist.set(xzr);
103 }
104
105 CPURegList registers(kXRegSizeInBits, reglist);
106 int size = registers.RegisterSizeInBytes();
107 DCHECK_EQ(0, (size * registers.Count()) % 16);
108
109 // If LR was stored here, we would need to sign it if
110 // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on.
111 DCHECK(!registers.IncludesAliasOf(lr));
112
113 while (!registers.IsEmpty()) {
114 const CPURegister& src0 = registers.PopLowestIndex();
115 const CPURegister& src1 = registers.PopLowestIndex();
116 stp(src1, src0, MemOperand(sp, -2 * size, PreIndex));
117 }
118}
119
120void MacroAssembler::PopAll(RegList reglist) {
121 if (reglist.Count() % 2 != 0) {
122 DCHECK(!reglist.has(xzr));
123 reglist.set(xzr);
124 }
125
126 CPURegList registers(kXRegSizeInBits, reglist);
127 int size = registers.RegisterSizeInBytes();
128 DCHECK_EQ(0, (size * registers.Count()) % 16);
129
130 // If LR was loaded here, we would need to authenticate it if
131 // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on.
132 DCHECK(!registers.IncludesAliasOf(lr));
133
134 while (!registers.IsEmpty()) {
135 const CPURegister& dst0 = registers.PopHighestIndex();
136 const CPURegister& dst1 = registers.PopHighestIndex();
137 ldp(dst0, dst1, MemOperand(sp, 2 * size, PostIndex));
138 }
139}
140
142 Register exclusion) const {
143 auto list = kCallerSaved;
144 list.Remove(exclusion);
145 list.Align();
146
147 int bytes = list.TotalSizeInBytes();
148
149 if (fp_mode == SaveFPRegsMode::kSave) {
150 auto fp_list = CPURegList::GetCallerSavedV(kStackSavedSavedFPSizeInBits);
151 DCHECK_EQ(fp_list.Count() % 2, 0);
152 bytes += fp_list.TotalSizeInBytes();
153 }
154 return bytes;
155}
156
158 Register exclusion) {
159 ASM_CODE_COMMENT(this);
160 auto list = kCallerSaved;
161 list.Remove(exclusion);
162 list.Align();
163
164 PushCPURegList(list);
165
166 int bytes = list.TotalSizeInBytes();
167
168 if (fp_mode == SaveFPRegsMode::kSave) {
169 auto fp_list = CPURegList::GetCallerSavedV(kStackSavedSavedFPSizeInBits);
170 DCHECK_EQ(fp_list.Count() % 2, 0);
171 PushCPURegList(fp_list);
172 bytes += fp_list.TotalSizeInBytes();
173 }
174 return bytes;
175}
176
177int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
178 ASM_CODE_COMMENT(this);
179 int bytes = 0;
180 if (fp_mode == SaveFPRegsMode::kSave) {
181 auto fp_list = CPURegList::GetCallerSavedV(kStackSavedSavedFPSizeInBits);
182 DCHECK_EQ(fp_list.Count() % 2, 0);
183 PopCPURegList(fp_list);
184 bytes += fp_list.TotalSizeInBytes();
185 }
186
187 auto list = kCallerSaved;
188 list.Remove(exclusion);
189 list.Align();
190
191 PopCPURegList(list);
192 bytes += list.TotalSizeInBytes();
193
194 return bytes;
195}
196
197void MacroAssembler::LogicalMacro(const Register& rd, const Register& rn,
198 const Operand& operand, LogicalOp op) {
199 ASM_CODE_COMMENT(this);
200 UseScratchRegisterScope temps(this);
201
202 if (operand.NeedsRelocation(this)) {
203 Register temp = temps.AcquireX();
204 Ldr(temp, operand.immediate());
205 Logical(rd, rn, temp, op);
206
207 } else if (operand.IsImmediate()) {
208 int64_t immediate = operand.ImmediateValue();
209 unsigned reg_size = rd.SizeInBits();
210
211 // If the operation is NOT, invert the operation and immediate.
212 if ((op & NOT) == NOT) {
213 op = static_cast<LogicalOp>(op & ~NOT);
214 immediate = ~immediate;
215 }
216
217 // Ignore the top 32 bits of an immediate if we're moving to a W register.
218 if (rd.Is32Bits()) {
219 immediate &= kWRegMask;
220 }
221
222 DCHECK(rd.Is64Bits() || is_uint32(immediate));
223
224 // Special cases for all set or all clear immediates.
225 if (immediate == 0) {
226 switch (op) {
227 case AND:
228 Mov(rd, 0);
229 return;
230 case ORR: // Fall through.
231 case EOR:
232 Mov(rd, rn);
233 return;
234 case ANDS: // Fall through.
235 case BICS:
236 break;
237 default:
238 UNREACHABLE();
239 }
240 } else if ((rd.Is64Bits() && (immediate == -1L)) ||
241 (rd.Is32Bits() && (immediate == 0xFFFFFFFFL))) {
242 switch (op) {
243 case AND:
244 Mov(rd, rn);
245 return;
246 case ORR:
247 Mov(rd, immediate);
248 return;
249 case EOR:
250 Mvn(rd, rn);
251 return;
252 case ANDS: // Fall through.
253 case BICS:
254 break;
255 default:
256 UNREACHABLE();
257 }
258 }
259
260 unsigned n, imm_s, imm_r;
261 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
262 // Immediate can be encoded in the instruction.
263 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
264 } else {
265 // Immediate can't be encoded: synthesize using move immediate.
266 Register temp = temps.AcquireSameSizeAs(rn);
267
268 // If the left-hand input is the stack pointer, we can't pre-shift the
269 // immediate, as the encoding won't allow the subsequent post shift.
270 PreShiftImmMode mode = rn == sp ? kNoShift : kAnyShift;
271 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
272
273 if (rd.IsSP()) {
274 // If rd is the stack pointer we cannot use it as the destination
275 // register so we use the temp register as an intermediate again.
276 Logical(temp, rn, imm_operand, op);
277 Mov(sp, temp);
278 } else {
279 Logical(rd, rn, imm_operand, op);
280 }
281 }
282
283 } else if (operand.IsExtendedRegister()) {
284 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
285 // Add/sub extended supports shift <= 4. We want to support exactly the
286 // same modes here.
287 DCHECK_LE(operand.shift_amount(), 4);
288 DCHECK(operand.reg().Is64Bits() ||
289 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
290 Register temp = temps.AcquireSameSizeAs(rn);
291 EmitExtendShift(temp, operand.reg(), operand.extend(),
292 operand.shift_amount());
293 Logical(rd, rn, temp, op);
294
295 } else {
296 // The operand can be encoded in the instruction.
297 DCHECK(operand.IsShiftedRegister());
298 Logical(rd, rn, operand, op);
299 }
300}
301
302void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
303 DCHECK(allow_macro_instructions());
304 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
305 DCHECK(!rd.IsZero());
306
307 // TODO(all) extend to support more immediates.
308 //
309 // Immediates on Aarch64 can be produced using an initial value, and zero to
310 // three move keep operations.
311 //
312 // Initial values can be generated with:
313 // 1. 64-bit move zero (movz).
314 // 2. 32-bit move inverted (movn).
315 // 3. 64-bit move inverted.
316 // 4. 32-bit orr immediate.
317 // 5. 64-bit orr immediate.
318 // Move-keep may then be used to modify each of the 16-bit half-words.
319 //
320 // The code below supports all five initial value generators, and
321 // applying move-keep operations to move-zero and move-inverted initial
322 // values.
323
324 // Try to move the immediate in one instruction, and if that fails, switch to
325 // using multiple instructions.
326 if (!TryOneInstrMoveImmediate(rd, imm)) {
327 unsigned reg_size = rd.SizeInBits();
328
329 // Generic immediate case. Imm will be represented by
330 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
331 // A move-zero or move-inverted is generated for the first non-zero or
332 // non-0xFFFF immX, and a move-keep for subsequent non-zero immX.
333
334 uint64_t ignored_halfword = 0;
335 bool invert_move = false;
336 // If the number of 0xFFFF halfwords is greater than the number of 0x0000
337 // halfwords, it's more efficient to use move-inverted.
338 if (CountSetHalfWords(imm, reg_size) > CountSetHalfWords(~imm, reg_size)) {
339 ignored_halfword = 0xFFFFL;
340 invert_move = true;
341 }
342
343 // Mov instructions can't move immediate values into the stack pointer, so
344 // set up a temporary register, if needed.
345 UseScratchRegisterScope temps(this);
346 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
347
348 // Iterate through the halfwords. Use movn/movz for the first non-ignored
349 // halfword, and movk for subsequent halfwords.
350 DCHECK_EQ(reg_size % 16, 0);
351 bool first_mov_done = false;
352 for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
353 uint64_t imm16 = (imm >> (16 * i)) & 0xFFFFL;
354 if (imm16 != ignored_halfword) {
355 if (!first_mov_done) {
356 if (invert_move) {
357 movn(temp, (~imm16) & 0xFFFFL, 16 * i);
358 } else {
359 movz(temp, imm16, 16 * i);
360 }
361 first_mov_done = true;
362 } else {
363 // Construct a wider constant.
364 movk(temp, imm16, 16 * i);
365 }
366 }
367 }
368 DCHECK(first_mov_done);
369
370 // Move the temporary if the original destination register was the stack
371 // pointer.
372 if (rd.IsSP()) {
373 mov(rd, temp);
374 }
375 }
376}
377
378void MacroAssembler::Mov(const Register& rd, ExternalReference reference) {
380 if (reference.IsIsolateFieldId()) {
381 Add(rd, kRootRegister, Operand(reference.offset_from_root_register()));
382 return;
383 }
384 }
385 // External references should not get created with IDs if
386 // `!root_array_available()`.
387 CHECK(!reference.IsIsolateFieldId());
388 Mov(rd, Operand(reference));
389}
390
391void MacroAssembler::LoadIsolateField(const Register& rd, IsolateFieldId id) {
393}
394
395void MacroAssembler::Mov(const Register& rd, const Operand& operand,
396 DiscardMoveMode discard_mode) {
397 DCHECK(allow_macro_instructions());
398 DCHECK(!rd.IsZero());
399
400 // Provide a swap register for instructions that need to write into the
401 // system stack pointer (and can't do this inherently).
402 UseScratchRegisterScope temps(this);
403 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
404
405 if (operand.NeedsRelocation(this)) {
406 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
407 // non-isolate-independent code. In many cases it might be cheaper than
408 // embedding the relocatable value.
409 if (root_array_available_ && options().isolate_independent_code) {
410 if (operand.ImmediateRMode() == RelocInfo::EXTERNAL_REFERENCE) {
411 Address addr = static_cast<Address>(operand.ImmediateValue());
412 ExternalReference reference = base::bit_cast<ExternalReference>(addr);
413 IndirectLoadExternalReference(rd, reference);
414 return;
415 } else if (RelocInfo::IsEmbeddedObjectMode(operand.ImmediateRMode())) {
416 Handle<HeapObject> x(
417 reinterpret_cast<Address*>(operand.ImmediateValue()));
418 // TODO(v8:9706): Fix-it! This load will always uncompress the value
419 // even when we are loading a compressed embedded object.
420 IndirectLoadConstant(rd.X(), x);
421 return;
422 }
423 }
424 Ldr(dst, operand);
425 } else if (operand.IsImmediate()) {
426 // Call the macro assembler for generic immediates.
427 Mov(dst, operand.ImmediateValue());
428 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
429 // Emit a shift instruction if moving a shifted register. This operation
430 // could also be achieved using an orr instruction (like orn used by Mvn),
431 // but using a shift instruction makes the disassembly clearer.
432 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
433 } else if (operand.IsExtendedRegister()) {
434 // Emit an extend instruction if moving an extended register. This handles
435 // extend with post-shift operations, too.
436 EmitExtendShift(dst, operand.reg(), operand.extend(),
437 operand.shift_amount());
438 } else {
439 // Otherwise, emit a register move only if the registers are distinct, or
440 // if they are not X registers.
441 //
442 // Note that mov(w0, w0) is not a no-op because it clears the top word of
443 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
444 // registers is not required to clear the top word of the X register. In
445 // this case, the instruction is discarded.
446 //
447 // If sp is an operand, add #0 is emitted, otherwise, orr #0.
448 if (rd != operand.reg() ||
449 (rd.Is32Bits() && (discard_mode == kDontDiscardForSameWReg))) {
450 Assembler::mov(rd, operand.reg());
451 }
452 // This case can handle writes into the system stack pointer directly.
453 dst = rd;
454 }
455
456 // Copy the result to the system stack pointer.
457 if (dst != rd) {
458 DCHECK(rd.IsSP());
459 Assembler::mov(rd, dst);
460 }
461}
462
463void MacroAssembler::Mov(const Register& rd, Tagged<Smi> smi) {
464 return Mov(rd, Operand(smi));
465}
466
467void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
468 DCHECK(is_uint16(imm));
469 int byte1 = (imm & 0xFF);
470 int byte2 = ((imm >> 8) & 0xFF);
471 if (byte1 == byte2) {
472 movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
473 } else if (byte1 == 0) {
474 movi(vd, byte2, LSL, 8);
475 } else if (byte2 == 0) {
476 movi(vd, byte1);
477 } else if (byte1 == 0xFF) {
478 mvni(vd, ~byte2 & 0xFF, LSL, 8);
479 } else if (byte2 == 0xFF) {
480 mvni(vd, ~byte1 & 0xFF);
481 } else {
482 UseScratchRegisterScope temps(this);
483 Register temp = temps.AcquireW();
484 movz(temp, imm);
485 dup(vd, temp);
486 }
487}
488
489void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
490 DCHECK(is_uint32(imm));
491
492 uint8_t bytes[sizeof(imm)];
493 memcpy(bytes, &imm, sizeof(imm));
494
495 // All bytes are either 0x00 or 0xFF.
496 {
497 bool all0orff = true;
498 for (int i = 0; i < 4; ++i) {
499 if ((bytes[i] != 0) && (bytes[i] != 0xFF)) {
500 all0orff = false;
501 break;
502 }
503 }
504
505 if (all0orff == true) {
506 movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm));
507 return;
508 }
509 }
510
511 // Of the 4 bytes, only one byte is non-zero.
512 for (int i = 0; i < 4; i++) {
513 if ((imm & (0xFF << (i * 8))) == imm) {
514 movi(vd, bytes[i], LSL, i * 8);
515 return;
516 }
517 }
518
519 // Of the 4 bytes, only one byte is not 0xFF.
520 for (int i = 0; i < 4; i++) {
521 uint32_t mask = ~(0xFF << (i * 8));
522 if ((imm & mask) == mask) {
523 mvni(vd, ~bytes[i] & 0xFF, LSL, i * 8);
524 return;
525 }
526 }
527
528 // Immediate is of the form 0x00MMFFFF.
529 if ((imm & 0xFF00FFFF) == 0x0000FFFF) {
530 movi(vd, bytes[2], MSL, 16);
531 return;
532 }
533
534 // Immediate is of the form 0x0000MMFF.
535 if ((imm & 0xFFFF00FF) == 0x000000FF) {
536 movi(vd, bytes[1], MSL, 8);
537 return;
538 }
539
540 // Immediate is of the form 0xFFMM0000.
541 if ((imm & 0xFF00FFFF) == 0xFF000000) {
542 mvni(vd, ~bytes[2] & 0xFF, MSL, 16);
543 return;
544 }
545 // Immediate is of the form 0xFFFFMM00.
546 if ((imm & 0xFFFF00FF) == 0xFFFF0000) {
547 mvni(vd, ~bytes[1] & 0xFF, MSL, 8);
548 return;
549 }
550
551 // Top and bottom 16-bits are equal.
552 if (((imm >> 16) & 0xFFFF) == (imm & 0xFFFF)) {
553 Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xFFFF);
554 return;
555 }
556
557 // Default case.
558 {
559 UseScratchRegisterScope temps(this);
560 Register temp = temps.AcquireW();
561 Mov(temp, imm);
562 dup(vd, temp);
563 }
564}
565
566void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
567 // All bytes are either 0x00 or 0xFF.
568 {
569 bool all0orff = true;
570 for (int i = 0; i < 8; ++i) {
571 int byteval = (imm >> (i * 8)) & 0xFF;
572 if (byteval != 0 && byteval != 0xFF) {
573 all0orff = false;
574 break;
575 }
576 }
577 if (all0orff == true) {
578 movi(vd, imm);
579 return;
580 }
581 }
582
583 // Top and bottom 32-bits are equal.
584 if (((imm >> 32) & 0xFFFFFFFF) == (imm & 0xFFFFFFFF)) {
585 Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xFFFFFFFF);
586 return;
587 }
588
589 // Default case.
590 {
591 UseScratchRegisterScope temps(this);
592 Register temp = temps.AcquireX();
593 Mov(temp, imm);
594 if (vd.Is1D()) {
595 fmov(vd.D(), temp);
596 } else {
597 dup(vd.V2D(), temp);
598 }
599 }
600}
601
602void MacroAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift,
603 int shift_amount) {
604 DCHECK(allow_macro_instructions());
605 if (shift_amount != 0 || shift != LSL) {
606 movi(vd, imm, shift, shift_amount);
607 } else if (vd.Is8B() || vd.Is16B()) {
608 // 8-bit immediate.
609 DCHECK(is_uint8(imm));
610 movi(vd, imm);
611 } else if (vd.Is4H() || vd.Is8H()) {
612 // 16-bit immediate.
613 Movi16bitHelper(vd, imm);
614 } else if (vd.Is2S() || vd.Is4S()) {
615 // 32-bit immediate.
616 Movi32bitHelper(vd, imm);
617 } else {
618 // 64-bit immediate.
619 Movi64bitHelper(vd, imm);
620 }
621}
622
623void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
624 // TODO(v8:11033): Move 128-bit values in a more efficient way.
625 DCHECK(vd.Is128Bits());
626 if (hi == lo) {
627 Movi(vd.V2D(), lo);
628 return;
629 }
630
631 Movi(vd.V1D(), lo);
632
633 if (hi != 0) {
634 UseScratchRegisterScope temps(this);
635 Register temp = temps.AcquireX();
636 Mov(temp, hi);
637 Ins(vd.V2D(), 1, temp);
638 }
639}
640
641void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
642 DCHECK(allow_macro_instructions());
643
644 if (operand.NeedsRelocation(this)) {
645 Ldr(rd, operand.immediate());
646 mvn(rd, rd);
647
648 } else if (operand.IsImmediate()) {
649 // Call the macro assembler for generic immediates.
650 Mov(rd, ~operand.ImmediateValue());
651
652 } else if (operand.IsExtendedRegister()) {
653 // Emit two instructions for the extend case. This differs from Mov, as
654 // the extend and invert can't be achieved in one instruction.
655 EmitExtendShift(rd, operand.reg(), operand.extend(),
656 operand.shift_amount());
657 mvn(rd, rd);
658
659 } else {
660 mvn(rd, operand);
661 }
662}
663
664unsigned MacroAssembler::CountSetHalfWords(uint64_t imm, unsigned reg_size) {
665 DCHECK_EQ(reg_size % 16, 0);
666
667#define HALFWORD(idx) (((imm >> ((idx)*16)) & 0xFFFF) ? 1u : 0u)
668 switch (reg_size / 16) {
669 case 1:
670 return HALFWORD(0);
671 case 2:
672 return HALFWORD(0) + HALFWORD(1);
673 case 4:
674 return HALFWORD(0) + HALFWORD(1) + HALFWORD(2) + HALFWORD(3);
675 }
676#undef HALFWORD
677 UNREACHABLE();
678}
679
680// The movz instruction can generate immediates containing an arbitrary 16-bit
681// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
682bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
683 DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
684 return CountSetHalfWords(imm, reg_size) <= 1;
685}
686
687// The movn instruction can generate immediates containing an arbitrary 16-bit
688// half-word, with remaining bits set, eg. 0xFFFF1234, 0xFFFF1234FFFFFFFF.
689bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
690 return IsImmMovz(~imm, reg_size);
691}
692
693void MacroAssembler::ConditionalCompareMacro(const Register& rn,
694 const Operand& operand,
695 StatusFlags nzcv, Condition cond,
697 DCHECK((cond != al) && (cond != nv));
698 if (operand.NeedsRelocation(this)) {
699 UseScratchRegisterScope temps(this);
700 Register temp = temps.AcquireX();
701 Ldr(temp, operand.immediate());
702 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
703
704 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
705 (operand.IsImmediate() &&
706 IsImmConditionalCompare(operand.ImmediateValue()))) {
707 // The immediate can be encoded in the instruction, or the operand is an
708 // unshifted register: call the assembler.
709 ConditionalCompare(rn, operand, nzcv, cond, op);
710
711 } else {
712 // The operand isn't directly supported by the instruction: perform the
713 // operation on a temporary register.
714 UseScratchRegisterScope temps(this);
715 Register temp = temps.AcquireSameSizeAs(rn);
716 Mov(temp, operand);
717 ConditionalCompare(rn, temp, nzcv, cond, op);
718 }
719}
720
721void MacroAssembler::Csel(const Register& rd, const Register& rn,
722 const Operand& operand, Condition cond) {
723 DCHECK(allow_macro_instructions());
724 DCHECK(!rd.IsZero());
725 DCHECK((cond != al) && (cond != nv));
726 if (operand.IsImmediate()) {
727 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
728 // register.
729 int64_t imm = operand.ImmediateValue();
731 if (imm == 0) {
732 csel(rd, rn, zr, cond);
733 } else if (imm == 1) {
734 csinc(rd, rn, zr, cond);
735 } else if (imm == -1) {
736 csinv(rd, rn, zr, cond);
737 } else {
738 UseScratchRegisterScope temps(this);
739 Register temp = temps.AcquireSameSizeAs(rn);
740 Mov(temp, imm);
741 csel(rd, rn, temp, cond);
742 }
743 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
744 // Unshifted register argument.
745 csel(rd, rn, operand.reg(), cond);
746 } else {
747 // All other arguments.
748 UseScratchRegisterScope temps(this);
749 Register temp = temps.AcquireSameSizeAs(rn);
750 Mov(temp, operand);
751 csel(rd, rn, temp, cond);
752 }
753}
754
755bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
756 int64_t imm) {
757 unsigned n, imm_s, imm_r;
758 int reg_size = dst.SizeInBits();
759 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
760 // Immediate can be represented in a move zero instruction. Movz can't write
761 // to the stack pointer.
762 movz(dst, imm);
763 return true;
764 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
765 // Immediate can be represented in a move not instruction. Movn can't write
766 // to the stack pointer.
767 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
768 return true;
769 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
770 // Immediate can be represented in a logical orr instruction.
771 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
772 return true;
773 }
774 return false;
775}
776
777Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
778 int64_t imm,
779 PreShiftImmMode mode) {
780 int reg_size = dst.SizeInBits();
781 // Encode the immediate in a single move instruction, if possible.
782 if (TryOneInstrMoveImmediate(dst, imm)) {
783 // The move was successful; nothing to do here.
784 } else {
785 // Pre-shift the immediate to the least-significant bits of the register.
786 int shift_low;
787 if (reg_size == 64) {
788 shift_low = base::bits::CountTrailingZeros(imm);
789 } else {
790 DCHECK_EQ(reg_size, 32);
791 shift_low = base::bits::CountTrailingZeros(static_cast<uint32_t>(imm));
792 }
793
794 if (mode == kLimitShiftForSP) {
795 // When applied to the stack pointer, the subsequent arithmetic operation
796 // can use the extend form to shift left by a maximum of four bits. Right
797 // shifts are not allowed, so we filter them out later before the new
798 // immediate is tested.
799 shift_low = std::min(shift_low, 4);
800 }
801 int64_t imm_low = imm >> shift_low;
802
803 // Pre-shift the immediate to the most-significant bits of the register. We
804 // insert set bits in the least-significant bits, as this creates a
805 // different immediate that may be encodable using movn or orr-immediate.
806 // If this new immediate is encodable, the set bits will be eliminated by
807 // the post shift on the following instruction.
808 int shift_high = CountLeadingZeros(imm, reg_size);
809 int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
810
811 if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) {
812 // The new immediate has been moved into the destination's low bits:
813 // return a new leftward-shifting operand.
814 return Operand(dst, LSL, shift_low);
815 } else if ((mode == kAnyShift) && TryOneInstrMoveImmediate(dst, imm_high)) {
816 // The new immediate has been moved into the destination's high bits:
817 // return a new rightward-shifting operand.
818 return Operand(dst, LSR, shift_high);
819 } else {
820 // Use the generic move operation to set up the immediate.
821 Mov(dst, imm);
822 }
823 }
824 return Operand(dst);
825}
826
827void MacroAssembler::AddSubMacro(const Register& rd, const Register& rn,
828 const Operand& operand, FlagsUpdate S,
829 AddSubOp op) {
830 if (operand.IsZero() && rd == rn && rd.Is64Bits() && rn.Is64Bits() &&
831 !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
832 // The instruction would be a nop. Avoid generating useless code.
833 return;
834 }
835
836 if (operand.NeedsRelocation(this)) {
837 UseScratchRegisterScope temps(this);
838 Register temp = temps.AcquireSameSizeAs(rn);
840 operand.ImmediateRMode()));
841 Ldr(temp, operand.immediate());
842 AddSubMacro(rd, rn, temp, S, op);
843 } else if ((operand.IsImmediate() &&
844 !IsImmAddSub(operand.ImmediateValue())) ||
845 (rn.IsZero() && !operand.IsShiftedRegister()) ||
846 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
847 UseScratchRegisterScope temps(this);
848 Register temp = temps.AcquireSameSizeAs(rn);
849 if (operand.IsImmediate()) {
851
852 // If the destination or source register is the stack pointer, we can
853 // only pre-shift the immediate right by values supported in the add/sub
854 // extend encoding.
855 if (rd == sp) {
856 // If the destination is SP and flags will be set, we can't pre-shift
857 // the immediate at all.
858 mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
859 } else if (rn == sp) {
860 mode = kLimitShiftForSP;
861 }
862
863 Operand imm_operand =
864 MoveImmediateForShiftedOp(temp, operand.ImmediateValue(), mode);
865 AddSub(rd, rn, imm_operand, S, op);
866 } else {
867 Mov(temp, operand);
868 AddSub(rd, rn, temp, S, op);
869 }
870 } else {
871 AddSub(rd, rn, operand, S, op);
872 }
873}
874
875void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
876 const Register& rn,
877 const Operand& operand, FlagsUpdate S,
879 DCHECK(rd.SizeInBits() == rn.SizeInBits());
880 UseScratchRegisterScope temps(this);
881
882 if (operand.NeedsRelocation(this)) {
883 Register temp = temps.AcquireX();
884 Ldr(temp, operand.immediate());
885 AddSubWithCarryMacro(rd, rn, temp, S, op);
886
887 } else if (operand.IsImmediate() ||
888 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
889 // Add/sub with carry (immediate or ROR shifted register.)
890 Register temp = temps.AcquireSameSizeAs(rn);
891 Mov(temp, operand);
892 AddSubWithCarry(rd, rn, temp, S, op);
893
894 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
895 // Add/sub with carry (shifted register).
896 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
897 DCHECK(operand.shift() != ROR);
898 DCHECK(is_uintn(operand.shift_amount(), rd.SizeInBits() == kXRegSizeInBits
901 Register temp = temps.AcquireSameSizeAs(rn);
902 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
903 AddSubWithCarry(rd, rn, temp, S, op);
904
905 } else if (operand.IsExtendedRegister()) {
906 // Add/sub with carry (extended register).
907 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
908 // Add/sub extended supports a shift <= 4. We want to support exactly the
909 // same modes.
910 DCHECK_LE(operand.shift_amount(), 4);
911 DCHECK(operand.reg().Is64Bits() ||
912 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
913 Register temp = temps.AcquireSameSizeAs(rn);
914 EmitExtendShift(temp, operand.reg(), operand.extend(),
915 operand.shift_amount());
916 AddSubWithCarry(rd, rn, temp, S, op);
917
918 } else {
919 // The addressing mode is directly supported by the instruction.
920 AddSubWithCarry(rd, rn, operand, S, op);
921 }
922}
923
924void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
925 const MemOperand& addr, LoadStoreOp op) {
926 // Call the most common addressing modes used by Liftoff directly for improved
927 // compilation performance: X register + immediate, X register + W register.
928 Instr memop = op | Rt(rt) | RnSP(addr.base());
929 if (addr.IsImmediateOffset()) {
930 int64_t offset = addr.offset();
931 unsigned size_log2 = CalcLSDataSizeLog2(op);
932 if (IsImmLSScaled(offset, size_log2)) {
933 LoadStoreScaledImmOffset(memop, static_cast<int>(offset), size_log2);
934 return;
935 } else if (IsImmLSUnscaled(offset)) {
936 LoadStoreUnscaledImmOffset(memop, static_cast<int>(offset));
937 return;
938 }
939 } else if (addr.IsRegisterOffset() && (addr.extend() == UXTW) &&
940 (addr.shift_amount() == 0)) {
941 LoadStoreWRegOffset(memop, addr.regoffset());
942 return;
943 }
944
945 // Remaining complex cases handled in sub-function.
946 LoadStoreMacroComplex(rt, addr, op);
947}
948
949void MacroAssembler::LoadStoreMacroComplex(const CPURegister& rt,
950 const MemOperand& addr,
951 LoadStoreOp op) {
952 int64_t offset = addr.offset();
953 bool is_imm_unscaled = IsImmLSUnscaled(offset);
954 if (addr.IsRegisterOffset() ||
955 (is_imm_unscaled && (addr.IsPostIndex() || addr.IsPreIndex()))) {
956 // Load/store encodable in one instruction.
957 LoadStore(rt, addr, op);
958 } else if (addr.IsImmediateOffset()) {
959 // Load/stores with immediate offset addressing should have been handled by
960 // the caller.
961 DCHECK(!IsImmLSScaled(offset, CalcLSDataSizeLog2(op)) && !is_imm_unscaled);
962 UseScratchRegisterScope temps(this);
963 Register temp = temps.AcquireSameSizeAs(addr.base());
964 Mov(temp, offset);
965 LoadStore(rt, MemOperand(addr.base(), temp), op);
966 } else if (addr.IsPostIndex()) {
967 // Post-index beyond unscaled addressing range.
968 DCHECK(!is_imm_unscaled);
969 LoadStore(rt, MemOperand(addr.base()), op);
970 add(addr.base(), addr.base(), offset);
971 } else {
972 // Pre-index beyond unscaled addressing range.
973 DCHECK(!is_imm_unscaled && addr.IsPreIndex());
974 add(addr.base(), addr.base(), offset);
975 LoadStore(rt, MemOperand(addr.base()), op);
976 }
977}
978
979void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
980 const CPURegister& rt2,
981 const MemOperand& addr,
982 LoadStorePairOp op) {
983 if (addr.IsRegisterOffset()) {
984 UseScratchRegisterScope temps(this);
985 Register base = addr.base();
986 Register temp = temps.AcquireSameSizeAs(base);
987 Add(temp, base, addr.regoffset());
988 LoadStorePair(rt, rt2, MemOperand(temp), op);
989 return;
990 }
991
992 int64_t offset = addr.offset();
993 unsigned size = CalcLSPairDataSize(op);
994
995 // Check if the offset fits in the immediate field of the appropriate
996 // instruction. If not, emit two instructions to perform the operation.
997 if (IsImmLSPair(offset, size)) {
998 // Encodable in one load/store pair instruction.
999 LoadStorePair(rt, rt2, addr, op);
1000 } else {
1001 Register base = addr.base();
1002 if (addr.IsImmediateOffset()) {
1003 UseScratchRegisterScope temps(this);
1004 Register temp = temps.AcquireSameSizeAs(base);
1005 Add(temp, base, offset);
1006 LoadStorePair(rt, rt2, MemOperand(temp), op);
1007 } else if (addr.IsPostIndex()) {
1008 LoadStorePair(rt, rt2, MemOperand(base), op);
1009 Add(base, base, offset);
1010 } else {
1011 DCHECK(addr.IsPreIndex());
1012 Add(base, base, offset);
1013 LoadStorePair(rt, rt2, MemOperand(base), op);
1014 }
1015 }
1016}
1017
1018void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
1019 DCHECK(allow_macro_instructions());
1020 DCHECK(!rd.IsZero());
1021
1022 if (hint == kAdrNear) {
1023 adr(rd, label);
1024 return;
1025 }
1026
1028 if (label->is_bound()) {
1029 int label_offset = label->pos() - pc_offset();
1030 if (Instruction::IsValidPCRelOffset(label_offset)) {
1031 adr(rd, label);
1032 } else {
1033 DCHECK_LE(label_offset, 0);
1034 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
1035 adr(rd, min_adr_offset);
1036 Add(rd, rd, label_offset - min_adr_offset);
1037 }
1038 } else {
1039 UseScratchRegisterScope temps(this);
1040 Register scratch = temps.AcquireX();
1041
1042 InstructionAccurateScope scope(this,
1044 adr(rd, label);
1045 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
1047 }
1048 movz(scratch, 0);
1049 }
1050}
1051
1052void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
1053 DCHECK((reg == NoReg || type >= kBranchTypeFirstUsingReg) &&
1054 (bit == -1 || type >= kBranchTypeFirstUsingBit));
1055 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
1056 B(static_cast<Condition>(type), label);
1057 } else {
1058 switch (type) {
1059 case always:
1060 B(label);
1061 break;
1062 case never:
1063 break;
1064 case reg_zero:
1065 Cbz(reg, label);
1066 break;
1067 case reg_not_zero:
1068 Cbnz(reg, label);
1069 break;
1070 case reg_bit_clear:
1071 Tbz(reg, bit, label);
1072 break;
1073 case reg_bit_set:
1074 Tbnz(reg, bit, label);
1075 break;
1076 default:
1077 UNREACHABLE();
1078 }
1079 }
1080}
1081
1082void MacroAssembler::B(Label* label, Condition cond) {
1083 DCHECK(allow_macro_instructions());
1084 DCHECK((cond != al) && (cond != nv));
1085
1086 bool need_extra_instructions =
1088
1089 if (V8_UNLIKELY(need_extra_instructions)) {
1090 Label done;
1091 b(&done, NegateCondition(cond));
1092 B(label);
1093 bind(&done);
1094 } else {
1095 b(label, cond);
1096 }
1097}
1098
1099void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
1100 DCHECK(allow_macro_instructions());
1101
1102 bool need_extra_instructions =
1104
1105 if (V8_UNLIKELY(need_extra_instructions)) {
1106 Label done;
1107 tbz(rt, bit_pos, &done);
1108 B(label);
1109 bind(&done);
1110 } else {
1111 tbnz(rt, bit_pos, label);
1112 }
1113}
1114
1115void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
1116 DCHECK(allow_macro_instructions());
1117
1118 bool need_extra_instructions =
1120
1121 if (V8_UNLIKELY(need_extra_instructions)) {
1122 Label done;
1123 tbnz(rt, bit_pos, &done);
1124 B(label);
1125 bind(&done);
1126 } else {
1127 tbz(rt, bit_pos, label);
1128 }
1129}
1130
1131void MacroAssembler::Cbnz(const Register& rt, Label* label) {
1132 DCHECK(allow_macro_instructions());
1133
1134 bool need_extra_instructions =
1136
1137 if (V8_UNLIKELY(need_extra_instructions)) {
1138 Label done;
1139 cbz(rt, &done);
1140 B(label);
1141 bind(&done);
1142 } else {
1143 cbnz(rt, label);
1144 }
1145}
1146
1147void MacroAssembler::Cbz(const Register& rt, Label* label) {
1148 DCHECK(allow_macro_instructions());
1149
1150 bool need_extra_instructions =
1152
1153 if (V8_UNLIKELY(need_extra_instructions)) {
1154 Label done;
1155 cbnz(rt, &done);
1156 B(label);
1157 bind(&done);
1158 } else {
1159 cbz(rt, label);
1160 }
1161}
1162
1163// Pseudo-instructions.
1164
1165void MacroAssembler::Abs(const Register& rd, const Register& rm,
1166 Label* is_not_representable, Label* is_representable) {
1167 DCHECK(allow_macro_instructions());
1168 DCHECK(AreSameSizeAndType(rd, rm));
1169
1170 Cmp(rm, 1);
1171 Cneg(rd, rm, lt);
1172
1173 // If the comparison sets the v flag, the input was the smallest value
1174 // representable by rm, and the mathematical result of abs(rm) is not
1175 // representable using two's complement.
1176 if ((is_not_representable != nullptr) && (is_representable != nullptr)) {
1177 B(is_not_representable, vs);
1178 B(is_representable);
1179 } else if (is_not_representable != nullptr) {
1180 B(is_not_representable, vs);
1181 } else if (is_representable != nullptr) {
1182 B(is_representable, vc);
1183 }
1184}
1185
1186void MacroAssembler::Switch(Register scratch, Register value,
1187 int case_value_base, Label** labels,
1188 int num_labels) {
1189 Register table = scratch;
1190 Label fallthrough, jump_table;
1191 if (case_value_base != 0) {
1192 Sub(value, value, case_value_base);
1193 }
1194 Cmp(value, Immediate(num_labels));
1195 B(&fallthrough, hs);
1196 Adr(table, &jump_table);
1197 Ldr(table, MemOperand(table, value, LSL, kSystemPointerSizeLog2));
1198 Br(table);
1199 // Emit the jump table inline, under the assumption that it's not too big.
1200 // Make sure there are no veneer pool entries in the middle of the table.
1201 const int jump_table_size = num_labels * kSystemPointerSize;
1202 CheckVeneerPool(false, false, jump_table_size);
1203 BlockPoolsScope no_pool_inbetween(this, jump_table_size);
1205 bind(&jump_table);
1206 for (int i = 0; i < num_labels; ++i) {
1207 dcptr(labels[i]);
1208 }
1209 bind(&fallthrough);
1210}
1211
1212// Abstracted stack operations.
1213
1214void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
1215 const CPURegister& src2, const CPURegister& src3,
1216 const CPURegister& src4, const CPURegister& src5,
1217 const CPURegister& src6, const CPURegister& src7) {
1218 DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
1219
1220 int count = 5 + src5.is_valid() + src6.is_valid() + src6.is_valid();
1221 int size = src0.SizeInBytes();
1222 DCHECK_EQ(0, (size * count) % 16);
1223
1224 PushHelper(4, size, src0, src1, src2, src3);
1225 PushHelper(count - 4, size, src4, src5, src6, src7);
1226}
1227
1228void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
1229 const CPURegister& dst2, const CPURegister& dst3,
1230 const CPURegister& dst4, const CPURegister& dst5,
1231 const CPURegister& dst6, const CPURegister& dst7) {
1232 // It is not valid to pop into the same register more than once in one
1233 // instruction, not even into the zero register.
1234 DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
1235 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
1236 DCHECK(dst0.is_valid());
1237
1238 int count = 5 + dst5.is_valid() + dst6.is_valid() + dst7.is_valid();
1239 int size = dst0.SizeInBytes();
1240 DCHECK_EQ(0, (size * count) % 16);
1241
1242 PopHelper(4, size, dst0, dst1, dst2, dst3);
1243 PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
1244}
1245
1246void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1247 UseScratchRegisterScope temps(this);
1248 Register temp = temps.AcquireSameSizeAs(count);
1249
1250 Label loop, leftover2, leftover1, done;
1251
1252 Subs(temp, count, 4);
1253 B(mi, &leftover2);
1254
1255 // Push groups of four first.
1256 Bind(&loop);
1257 Subs(temp, temp, 4);
1258 PushHelper(4, src.SizeInBytes(), src, src, src, src);
1259 B(pl, &loop);
1260
1261 // Push groups of two.
1262 Bind(&leftover2);
1263 Tbz(count, 1, &leftover1);
1264 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
1265
1266 // Push the last one (if required).
1267 Bind(&leftover1);
1268 Tbz(count, 0, &done);
1269 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1270
1271 Bind(&done);
1272}
1273
1274void MacroAssembler::PushHelper(int count, int size, const CPURegister& src0,
1275 const CPURegister& src1,
1276 const CPURegister& src2,
1277 const CPURegister& src3) {
1278 // Ensure that we don't unintentially modify scratch or debug registers.
1279 InstructionAccurateScope scope(this);
1280
1281 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1282 DCHECK(size == src0.SizeInBytes());
1283
1284 // When pushing multiple registers, the store order is chosen such that
1285 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1286 switch (count) {
1287 case 1:
1288 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1289 str(src0, MemOperand(sp, -1 * size, PreIndex));
1290 break;
1291 case 2:
1292 DCHECK(src2.IsNone() && src3.IsNone());
1293 stp(src1, src0, MemOperand(sp, -2 * size, PreIndex));
1294 break;
1295 case 3:
1296 DCHECK(src3.IsNone());
1297 stp(src2, src1, MemOperand(sp, -3 * size, PreIndex));
1298 str(src0, MemOperand(sp, 2 * size));
1299 break;
1300 case 4:
1301 // Skip over 4 * size, then fill in the gap. This allows four W registers
1302 // to be pushed using sp, whilst maintaining 16-byte alignment for sp
1303 // at all times.
1304 stp(src3, src2, MemOperand(sp, -4 * size, PreIndex));
1305 stp(src1, src0, MemOperand(sp, 2 * size));
1306 break;
1307 default:
1308 UNREACHABLE();
1309 }
1310}
1311
1312void MacroAssembler::PopHelper(int count, int size, const CPURegister& dst0,
1313 const CPURegister& dst1, const CPURegister& dst2,
1314 const CPURegister& dst3) {
1315 // Ensure that we don't unintentially modify scratch or debug registers.
1316 InstructionAccurateScope scope(this);
1317
1318 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1319 DCHECK(size == dst0.SizeInBytes());
1320
1321 // When popping multiple registers, the load order is chosen such that
1322 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1323 switch (count) {
1324 case 1:
1325 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1326 ldr(dst0, MemOperand(sp, 1 * size, PostIndex));
1327 break;
1328 case 2:
1329 DCHECK(dst2.IsNone() && dst3.IsNone());
1330 ldp(dst0, dst1, MemOperand(sp, 2 * size, PostIndex));
1331 break;
1332 case 3:
1333 DCHECK(dst3.IsNone());
1334 ldr(dst2, MemOperand(sp, 2 * size));
1335 ldp(dst0, dst1, MemOperand(sp, 3 * size, PostIndex));
1336 break;
1337 case 4:
1338 // Load the higher addresses first, then load the lower addresses and
1339 // skip the whole block in the second instruction. This allows four W
1340 // registers to be popped using sp, whilst maintaining 16-byte alignment
1341 // for sp at all times.
1342 ldp(dst2, dst3, MemOperand(sp, 2 * size));
1343 ldp(dst0, dst1, MemOperand(sp, 4 * size, PostIndex));
1344 break;
1345 default:
1346 UNREACHABLE();
1347 }
1348}
1349
1350void MacroAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
1351 int offset) {
1352 DCHECK(AreSameSizeAndType(src1, src2));
1353 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1354 Stp(src1, src2, MemOperand(sp, offset));
1355}
1356
1357void MacroAssembler::PeekPair(const CPURegister& dst1, const CPURegister& dst2,
1358 int offset) {
1359 DCHECK(AreSameSizeAndType(dst1, dst2));
1360 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1361 Ldp(dst1, dst2, MemOperand(sp, offset));
1362}
1363
1365 ASM_CODE_COMMENT(this);
1366 // Ensure that the macro-assembler doesn't use any scratch registers.
1367 InstructionAccurateScope scope(this);
1368
1369 MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
1370
1371 stp(d14, d15, tos);
1372 stp(d12, d13, tos);
1373 stp(d10, d11, tos);
1374 stp(d8, d9, tos);
1375
1376 stp(x27, x28, tos);
1377 stp(x25, x26, tos);
1378 stp(x23, x24, tos);
1379 stp(x21, x22, tos);
1380 stp(x19, x20, tos);
1381
1382 static_assert(
1384 18 * kSystemPointerSize);
1385
1386#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
1387 // Use the stack pointer's value immediately before pushing the LR as the
1388 // context for signing it. This is what the StackFrameIterator expects.
1389 pacibsp();
1390#endif
1391
1392 stp(x29, x30, tos); // fp, lr
1393
1394 static_assert(
1396}
1397
1399 ASM_CODE_COMMENT(this);
1400 // Ensure that the macro-assembler doesn't use any scratch registers.
1401 InstructionAccurateScope scope(this);
1402
1403 MemOperand tos(sp, 2 * kXRegSize, PostIndex);
1404
1405 ldp(x29, x30, tos); // fp, lr
1406
1407#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
1408 // The context (stack pointer value) for authenticating the LR here must
1409 // match the one used for signing it (see `PushCalleeSavedRegisters`).
1410 autibsp();
1411#endif
1412
1413 ldp(x19, x20, tos);
1414 ldp(x21, x22, tos);
1415 ldp(x23, x24, tos);
1416 ldp(x25, x26, tos);
1417 ldp(x27, x28, tos);
1418
1419 ldp(d8, d9, tos);
1420 ldp(d10, d11, tos);
1421 ldp(d12, d13, tos);
1422 ldp(d14, d15, tos);
1423}
1424
1425namespace {
1426
1427#ifndef V8_ENABLE_LEAPTIERING
1428// Only used when leaptiering is disabled.
1429void TailCallOptimizedCodeSlot(MacroAssembler* masm,
1430 Register optimized_code_entry,
1431 Register scratch) {
1432 // ----------- S t a t e -------------
1433 // -- x0 : actual argument count
1434 // -- x3 : new target (preserved for callee if needed, and caller)
1435 // -- x1 : target function (preserved for callee if needed, and caller)
1436 // -----------------------------------
1437 ASM_CODE_COMMENT(masm);
1438 DCHECK(!AreAliased(x1, x3, optimized_code_entry, scratch));
1439
1440 Label heal_optimized_code_slot;
1441
1442 // If the optimized code is cleared, go to runtime to update the optimization
1443 // marker field.
1444 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
1445 &heal_optimized_code_slot);
1446
1447 // The entry references a CodeWrapper object. Unwrap it now.
1448 __ LoadCodePointerField(
1449 optimized_code_entry,
1450 FieldMemOperand(optimized_code_entry, CodeWrapper::kCodeOffset));
1451
1452 // Check if the optimized code is marked for deopt. If it is, call the
1453 // runtime to clear it.
1454 __ AssertCode(optimized_code_entry);
1455 __ JumpIfCodeIsMarkedForDeoptimization(optimized_code_entry, scratch,
1456 &heal_optimized_code_slot);
1457
1458 // Optimized code is good, get it into the closure and link the closure into
1459 // the optimized functions list, then tail call the optimized code.
1460 __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, x1);
1461 static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
1462 __ Move(x2, optimized_code_entry);
1463 __ JumpCodeObject(x2, kJSEntrypointTag);
1464
1465 // Optimized code slot contains deoptimized code or code is cleared and
1466 // optimized code marker isn't updated. Evict the code, update the marker
1467 // and re-enter the closure's code.
1468 __ bind(&heal_optimized_code_slot);
1469 __ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
1470}
1471#endif // V8_ENABLE_LEAPTIERING
1472
1473} // namespace
1474
1475#ifdef V8_ENABLE_DEBUG_CODE
1476void MacroAssembler::AssertFeedbackCell(Register object, Register scratch) {
1477 if (v8_flags.debug_code) {
1478 IsObjectType(object, scratch, scratch, FEEDBACK_CELL_TYPE);
1479 Assert(eq, AbortReason::kExpectedFeedbackCell);
1480 }
1481}
1482void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
1483 if (v8_flags.debug_code) {
1484 IsObjectType(object, scratch, scratch, FEEDBACK_VECTOR_TYPE);
1485 Assert(eq, AbortReason::kExpectedFeedbackVector);
1486 }
1487}
1488#endif // V8_ENABLE_DEBUG_CODE
1489
1491 Register optimized_code, Register closure) {
1492 ASM_CODE_COMMENT(this);
1493 DCHECK(!AreAliased(optimized_code, closure));
1494
1495#ifdef V8_ENABLE_LEAPTIERING
1496 UNREACHABLE();
1497#else
1498 // Store code entry in the closure.
1499 AssertCode(optimized_code);
1500 StoreCodePointerField(optimized_code,
1501 FieldMemOperand(closure, JSFunction::kCodeOffset));
1502 RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
1505#endif // V8_ENABLE_LEAPTIERING
1506}
1507
1509 Runtime::FunctionId function_id) {
1510 ASM_CODE_COMMENT(this);
1511 // ----------- S t a t e -------------
1512 // -- x0 : actual argument count (preserved for callee)
1513 // -- x1 : target function (preserved for callee)
1514 // -- x3 : new target (preserved for callee)
1515 // -- x4 : dispatch handle (preserved for callee)
1516 // -----------------------------------
1517 {
1518 FrameScope scope(this, StackFrame::INTERNAL);
1519 // Push a copy of the target function, the new target, the actual
1520 // argument count, and the dispatch handle.
1523 : padreg;
1525 // No need to SmiTag the dispatch handle as it always looks like a Smi.
1526 static_assert(kJSDispatchHandleShift > 0);
1529 // Push another copy as a parameter to the runtime call.
1531
1532 CallRuntime(function_id, 1);
1533 Mov(x2, x0);
1534
1535 // Restore target function, new target, actual argument count, and dispatch
1536 // handle.
1540 }
1541
1542 static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
1544}
1545
1546#ifndef V8_ENABLE_LEAPTIERING
1547
1548// Read off the flags in the feedback vector and check if there
1549// is optimized code or a tiering state that needs to be processed.
1551 Register flags, Register feedback_vector, CodeKind current_code_kind) {
1552 ASM_CODE_COMMENT(this);
1553 DCHECK(!AreAliased(flags, feedback_vector));
1554 DCHECK(CodeKindCanTierUp(current_code_kind));
1555 uint32_t flag_mask =
1557 Ldrh(flags, FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1558 Tst(flags, flag_mask);
1559 return ne;
1560}
1561
1563 Register flags, Register feedback_vector, CodeKind current_code_kind,
1564 Label* flags_need_processing) {
1565 ASM_CODE_COMMENT(this);
1567 current_code_kind),
1568 flags_need_processing);
1569}
1570
1572 Register flags, Register feedback_vector) {
1573 ASM_CODE_COMMENT(this);
1574 DCHECK(!AreAliased(flags, feedback_vector));
1575 Label maybe_has_optimized_code, maybe_needs_logging;
1576 // Check if optimized code is available.
1579 &maybe_needs_logging);
1580 GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
1581
1582 bind(&maybe_needs_logging);
1583 TestAndBranchIfAllClear(flags, FeedbackVector::LogNextExecutionBit::kMask,
1584 &maybe_has_optimized_code);
1585 GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution);
1586
1587 bind(&maybe_has_optimized_code);
1588 // This tiering logic is only needed if leaptiering is disabled. Otherwise
1589 // we'll automatically tier up through the dispatch table.
1590 Register optimized_code_entry = x7;
1591 LoadTaggedField(optimized_code_entry,
1592 FieldMemOperand(feedback_vector,
1593 FeedbackVector::kMaybeOptimizedCodeOffset));
1594 TailCallOptimizedCodeSlot(this, optimized_code_entry, x4);
1595}
1596
1597#endif // !V8_ENABLE_LEAPTIERING
1598
1599Condition MacroAssembler::CheckSmi(Register object) {
1600 static_assert(kSmiTag == 0);
1601 Tst(object, kSmiTagMask);
1602 return eq;
1603}
1604
1605#ifdef V8_ENABLE_DEBUG_CODE
1607 if (!v8_flags.debug_code) return;
1608 ASM_CODE_COMMENT(this);
1609 HardAbortScope hard_abort(this); // Avoid calls to Abort.
1610 // Arm64 requires the stack pointer to be 16-byte aligned prior to address
1611 // calculation.
1612 UseScratchRegisterScope scope(this);
1613 Register temp = scope.AcquireX();
1614 Mov(temp, sp);
1615 Tst(temp, 15);
1616 Check(eq, AbortReason::kUnexpectedStackPointer);
1617}
1618
1619void MacroAssembler::AssertFPCRState(Register fpcr) {
1620 // TODO(olivf, 382005099) This check is currently behind `slow_debug_code` as
1621 // a temporary hack to not have it enabled on dcheck enabled canaries. The
1622 // reason is that this check is violated by callbacks from webaudio.
1623 if (!v8_flags.slow_debug_code) return;
1624 ASM_CODE_COMMENT(this);
1625 Label unexpected_mode, done;
1626 UseScratchRegisterScope temps(this);
1627 if (fpcr.IsNone()) {
1628 fpcr = temps.AcquireX();
1629 Mrs(fpcr, FPCR);
1630 }
1631
1632 // Settings left to their default values:
1633 // - Assert that flush-to-zero is not set.
1634 // TODO(leszeks): Reenable check based on isolate flag.
1635 // Tbnz(fpcr, FZ_offset, &unexpected_mode);
1636 // - Assert that the rounding mode is nearest-with-ties-to-even.
1637 static_assert(FPTieEven == 0);
1638 Tst(fpcr, RMode_mask);
1639 B(eq, &done);
1640
1641 Bind(&unexpected_mode);
1642 Abort(AbortReason::kUnexpectedFPCRMode);
1643
1644 Bind(&done);
1645}
1646
1647void MacroAssembler::AssertSmi(Register object, AbortReason reason) {
1648 if (!v8_flags.debug_code) return;
1649 ASM_CODE_COMMENT(this);
1650 static_assert(kSmiTag == 0);
1651 Tst(object, kSmiTagMask);
1652 Check(eq, reason);
1653}
1654
1655void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
1656 if (!v8_flags.debug_code) return;
1657 ASM_CODE_COMMENT(this);
1658 static_assert(kSmiTag == 0);
1659 Tst(object, kSmiTagMask);
1660 Check(ne, reason);
1661}
1662
1663void MacroAssembler::AssertZeroExtended(Register int32_register) {
1664 if (!v8_flags.slow_debug_code) return;
1665 ASM_CODE_COMMENT(this);
1666 Tst(int32_register.X(), kMaxUInt32);
1667 Check(ls, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
1668}
1669
1670void MacroAssembler::AssertMap(Register object) {
1671 if (!v8_flags.debug_code) return;
1672 ASM_CODE_COMMENT(this);
1673 AssertNotSmi(object, AbortReason::kOperandIsNotAMap);
1674
1675 UseScratchRegisterScope temps(this);
1676 Register temp = temps.AcquireX();
1677
1678 IsObjectType(object, temp, temp, MAP_TYPE);
1679 Check(eq, AbortReason::kOperandIsNotAMap);
1680}
1681
1682void MacroAssembler::AssertCode(Register object) {
1683 if (!v8_flags.debug_code) return;
1684 ASM_CODE_COMMENT(this);
1685 AssertNotSmi(object, AbortReason::kOperandIsNotACode);
1686
1687 UseScratchRegisterScope temps(this);
1688 Register temp = temps.AcquireX();
1689
1690 IsObjectType(object, temp, temp, CODE_TYPE);
1691 Check(eq, AbortReason::kOperandIsNotACode);
1692}
1693
1694void MacroAssembler::AssertConstructor(Register object) {
1695 if (!v8_flags.debug_code) return;
1696 ASM_CODE_COMMENT(this);
1697 AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAConstructor);
1698
1699 UseScratchRegisterScope temps(this);
1700 Register temp = temps.AcquireX();
1701
1702 LoadMap(temp, object);
1703 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1704 Tst(temp, Operand(Map::Bits1::IsConstructorBit::kMask));
1705
1706 Check(ne, AbortReason::kOperandIsNotAConstructor);
1707}
1708
1709void MacroAssembler::AssertFunction(Register object) {
1710 if (!v8_flags.debug_code) return;
1711 ASM_CODE_COMMENT(this);
1712 AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction);
1713
1714 UseScratchRegisterScope temps(this);
1715 Register temp = temps.AcquireX();
1716 LoadMap(temp, object);
1717 CompareInstanceTypeRange(temp, temp, FIRST_JS_FUNCTION_TYPE,
1718 LAST_JS_FUNCTION_TYPE);
1719 Check(ls, AbortReason::kOperandIsNotAFunction);
1720}
1721
1722void MacroAssembler::AssertCallableFunction(Register object) {
1723 if (!v8_flags.debug_code) return;
1724 ASM_CODE_COMMENT(this);
1725 AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction);
1726
1727 UseScratchRegisterScope temps(this);
1728 Register temp = temps.AcquireX();
1729 LoadMap(temp, object);
1732 Check(ls, AbortReason::kOperandIsNotACallableFunction);
1733}
1734
1735void MacroAssembler::AssertBoundFunction(Register object) {
1736 if (!v8_flags.debug_code) return;
1737 ASM_CODE_COMMENT(this);
1738 AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotABoundFunction);
1739
1740 UseScratchRegisterScope temps(this);
1741 Register temp = temps.AcquireX();
1742
1743 IsObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
1744 Check(eq, AbortReason::kOperandIsNotABoundFunction);
1745}
1746
1748 Register object) {
1749 if (!PointerCompressionIsEnabled()) return;
1750 if (!v8_flags.debug_code) return;
1751 ASM_CODE_COMMENT(this);
1752 // We may not have any scratch registers so we preserve our input register.
1753 Push(object, xzr);
1754 Label ok;
1755 B(&ok, CheckSmi(object));
1756 Mov(object, Operand(object, LSR, 32));
1757 // Either the value is now equal to the right-shifted pointer compression
1758 // cage base or it's zero if we got a compressed pointer register as input.
1759 Cmp(object, 0);
1760 B(kEqual, &ok);
1761 Cmp(object, Operand(kPtrComprCageBaseRegister, LSR, 32));
1762 Check(kEqual, AbortReason::kObjectNotTagged);
1763 bind(&ok);
1764 Pop(xzr, object);
1765}
1766
1767void MacroAssembler::AssertGeneratorObject(Register object) {
1768 if (!v8_flags.debug_code) return;
1769 ASM_CODE_COMMENT(this);
1770 AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
1771
1772 // Load map
1773 UseScratchRegisterScope temps(this);
1774 Register temp = temps.AcquireX();
1775 LoadMap(temp, object);
1776
1777 // Load instance type and check if JSGeneratorObject
1778 CompareInstanceTypeRange(temp, temp, FIRST_JS_GENERATOR_OBJECT_TYPE,
1779 LAST_JS_GENERATOR_OBJECT_TYPE);
1780 // Restore generator object to register and perform assertion
1781 Check(ls, AbortReason::kOperandIsNotAGeneratorObject);
1782}
1783
1785 if (!v8_flags.debug_code) return;
1786 ASM_CODE_COMMENT(this);
1787 UseScratchRegisterScope temps(this);
1788 Register scratch = temps.AcquireX();
1789 Label done_checking;
1790 AssertNotSmi(object);
1791 JumpIfRoot(object, RootIndex::kUndefinedValue, &done_checking);
1792 LoadMap(scratch, object);
1793 CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
1794 Assert(eq, AbortReason::kExpectedUndefinedOrCell);
1795 Bind(&done_checking);
1796}
1797
1798void MacroAssembler::AssertPositiveOrZero(Register value) {
1799 if (!v8_flags.debug_code) return;
1800 ASM_CODE_COMMENT(this);
1801 Label done;
1802 int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
1803 Tbz(value, sign_bit, &done);
1804 Abort(AbortReason::kUnexpectedNegativeValue);
1805 Bind(&done);
1806}
1807
1808void MacroAssembler::AssertJSAny(Register object, Register map_tmp,
1809 Register tmp, AbortReason abort_reason) {
1810 if (!v8_flags.debug_code) return;
1811
1812 ASM_CODE_COMMENT(this);
1813 DCHECK(!AreAliased(object, map_tmp, tmp));
1814 Label ok;
1815
1816 JumpIfSmi(object, &ok);
1817
1818 LoadMap(map_tmp, object);
1819 CompareInstanceType(map_tmp, tmp, LAST_NAME_TYPE);
1821
1822 CompareInstanceType(map_tmp, tmp, FIRST_JS_RECEIVER_TYPE);
1824
1825 CompareRoot(map_tmp, RootIndex::kHeapNumberMap);
1826 B(kEqual, &ok);
1827
1828 CompareRoot(map_tmp, RootIndex::kBigIntMap);
1829 B(kEqual, &ok);
1830
1831 CompareRoot(object, RootIndex::kUndefinedValue);
1832 B(kEqual, &ok);
1833
1834 CompareRoot(object, RootIndex::kTrueValue);
1835 B(kEqual, &ok);
1836
1837 CompareRoot(object, RootIndex::kFalseValue);
1838 B(kEqual, &ok);
1839
1840 CompareRoot(object, RootIndex::kNullValue);
1841 B(kEqual, &ok);
1842
1843 Abort(abort_reason);
1844
1845 bind(&ok);
1846}
1847
1848void MacroAssembler::Assert(Condition cond, AbortReason reason) {
1849 if (v8_flags.debug_code) {
1850 Check(cond, reason);
1851 }
1852}
1853
1855 if (v8_flags.debug_code) Abort(reason);
1856}
1857#endif // V8_ENABLE_DEBUG_CODE
1858
1859void MacroAssembler::CopySlots(int dst, Register src, Register slot_count) {
1860 DCHECK(!src.IsZero());
1861 UseScratchRegisterScope scope(this);
1862 Register dst_reg = scope.AcquireX();
1863 SlotAddress(dst_reg, dst);
1864 SlotAddress(src, src);
1865 CopyDoubleWords(dst_reg, src, slot_count);
1866}
1867
1868void MacroAssembler::CopySlots(Register dst, Register src,
1869 Register slot_count) {
1870 DCHECK(!dst.IsZero() && !src.IsZero());
1871 SlotAddress(dst, dst);
1872 SlotAddress(src, src);
1873 CopyDoubleWords(dst, src, slot_count);
1874}
1875
1876void MacroAssembler::CopyDoubleWords(Register dst, Register src, Register count,
1877 CopyDoubleWordsMode mode) {
1878 ASM_CODE_COMMENT(this);
1879 DCHECK(!AreAliased(dst, src, count));
1880
1881 if (v8_flags.debug_code) {
1882 Register pointer1 = dst;
1883 Register pointer2 = src;
1884 if (mode == kSrcLessThanDst) {
1885 pointer1 = src;
1886 pointer2 = dst;
1887 }
1888 // Copy requires pointer1 < pointer2 || (pointer1 - pointer2) >= count.
1889 Label pointer1_below_pointer2;
1890 Subs(pointer1, pointer1, pointer2);
1891 B(lt, &pointer1_below_pointer2);
1892 Cmp(pointer1, count);
1893 Check(ge, AbortReason::kOffsetOutOfRange);
1894 Bind(&pointer1_below_pointer2);
1895 Add(pointer1, pointer1, pointer2);
1896 }
1897 static_assert(kSystemPointerSize == kDRegSize,
1898 "pointers must be the same size as doubles");
1899
1900 if (mode == kDstLessThanSrcAndReverse) {
1901 Add(src, src, Operand(count, LSL, kSystemPointerSizeLog2));
1902 Sub(src, src, kSystemPointerSize);
1903 }
1904
1905 int src_direction = (mode == kDstLessThanSrc) ? 1 : -1;
1906 int dst_direction = (mode == kSrcLessThanDst) ? -1 : 1;
1907
1908 UseScratchRegisterScope scope(this);
1909 VRegister temp0 = scope.AcquireD();
1910 VRegister temp1 = scope.AcquireD();
1911
1912 Label pairs, loop, done;
1913
1914 Tbz(count, 0, &pairs);
1915 Ldr(temp0, MemOperand(src, src_direction * kSystemPointerSize, PostIndex));
1916 Sub(count, count, 1);
1917 Str(temp0, MemOperand(dst, dst_direction * kSystemPointerSize, PostIndex));
1918
1919 Bind(&pairs);
1920 if (mode == kSrcLessThanDst) {
1921 // Adjust pointers for post-index ldp/stp with negative offset:
1922 Sub(dst, dst, kSystemPointerSize);
1923 Sub(src, src, kSystemPointerSize);
1924 } else if (mode == kDstLessThanSrcAndReverse) {
1925 Sub(src, src, kSystemPointerSize);
1926 }
1927 Bind(&loop);
1928 Cbz(count, &done);
1929 Ldp(temp0, temp1,
1930 MemOperand(src, 2 * src_direction * kSystemPointerSize, PostIndex));
1931 Sub(count, count, 2);
1932 if (mode == kDstLessThanSrcAndReverse) {
1933 Stp(temp1, temp0,
1934 MemOperand(dst, 2 * dst_direction * kSystemPointerSize, PostIndex));
1935 } else {
1936 Stp(temp0, temp1,
1937 MemOperand(dst, 2 * dst_direction * kSystemPointerSize, PostIndex));
1938 }
1939 B(&loop);
1940
1941 // TODO(all): large copies may benefit from using temporary Q registers
1942 // to copy four double words per iteration.
1943
1944 Bind(&done);
1945}
1946
1947void MacroAssembler::SlotAddress(Register dst, int slot_offset) {
1948 Add(dst, sp, slot_offset << kSystemPointerSizeLog2);
1949}
1950
1951void MacroAssembler::SlotAddress(Register dst, Register slot_offset) {
1952 Add(dst, sp, Operand(slot_offset, LSL, kSystemPointerSizeLog2));
1953}
1954
1955void MacroAssembler::CanonicalizeNaN(const VRegister& dst,
1956 const VRegister& src) {
1958
1959 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
1960 // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
1961 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
1962 Fsub(dst, src, fp_zero);
1963}
1964
1966 ASM_CODE_COMMENT(this);
1967 if (CanBeImmediate(index)) {
1969 Immediate(ReadOnlyRootPtr(index), RelocInfo::Mode::NO_INFO));
1970 return;
1971 }
1972 LoadRoot(destination, index);
1973}
1974
1975void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
1976 ASM_CODE_COMMENT(this);
1978 IsImmAddSub(ReadOnlyRootPtr(index))) {
1980 return;
1981 }
1982 // Many roots have addresses that are too large to fit into addition immediate
1983 // operands. Evidence suggests that the extra instruction for decompression
1984 // costs us more than the load.
1987}
1988
1990 ASM_CODE_COMMENT(this);
1991 UseScratchRegisterScope temps(this);
1992 Register tmp = temps.AcquireX();
1993 LoadRoot(tmp, index);
1994 Push(tmp);
1995}
1996
1997void MacroAssembler::Move(Register dst, Tagged<Smi> src) { Mov(dst, src); }
1998void MacroAssembler::Move(Register dst, MemOperand src) { Ldr(dst, src); }
1999void MacroAssembler::Move(Register dst, Register src) {
2000 if (dst == src) return;
2001 Mov(dst, src);
2002}
2003
2004void MacroAssembler::MovePair(Register dst0, Register src0, Register dst1,
2005 Register src1) {
2006 DCHECK_NE(dst0, dst1);
2007 if (dst0 != src1) {
2008 Mov(dst0, src0);
2009 Mov(dst1, src1);
2010 } else if (dst1 != src0) {
2011 // Swap the order of the moves to resolve the overlap.
2012 Mov(dst1, src1);
2013 Mov(dst0, src0);
2014 } else {
2015 // Worse case scenario, this is a swap.
2016 Swap(dst0, src0);
2017 }
2018}
2019
2020void MacroAssembler::Swap(Register lhs, Register rhs) {
2021 DCHECK(lhs.IsSameSizeAndType(rhs));
2022 DCHECK_NE(lhs, rhs);
2023 UseScratchRegisterScope temps(this);
2024 Register temp = temps.AcquireX();
2025 Mov(temp, rhs);
2026 Mov(rhs, lhs);
2027 Mov(lhs, temp);
2028}
2029
2030void MacroAssembler::Swap(VRegister lhs, VRegister rhs) {
2031 DCHECK(lhs.IsSameSizeAndType(rhs));
2032 DCHECK_NE(lhs, rhs);
2033 UseScratchRegisterScope temps(this);
2034 VRegister temp = VRegister::no_reg();
2035 if (lhs.IsS()) {
2036 temp = temps.AcquireS();
2037 } else if (lhs.IsD()) {
2038 temp = temps.AcquireD();
2039 } else {
2040 DCHECK(lhs.IsQ());
2041 temp = temps.AcquireQ();
2042 }
2043 Mov(temp, rhs);
2044 Mov(rhs, lhs);
2045 Mov(lhs, temp);
2046}
2047
2048void MacroAssembler::CallRuntime(const Runtime::Function* f,
2049 int num_arguments) {
2050 ASM_CODE_COMMENT(this);
2051 // All arguments must be on the stack before this function is called.
2052 // x0 holds the return value after the call.
2053
2054 // Check that the number of arguments matches what the function expects.
2055 // If f->nargs is -1, the function can accept a variable number of arguments.
2056 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2057
2058 // Place the necessary arguments.
2059 Mov(x0, num_arguments);
2061
2062 bool switch_to_central = options().is_wasm;
2063 CallBuiltin(Builtins::RuntimeCEntry(f->result_size, switch_to_central));
2064}
2065
2066void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
2067 bool builtin_exit_frame) {
2068 ASM_CODE_COMMENT(this);
2069 Mov(x1, builtin);
2070 TailCallBuiltin(Builtins::CEntry(1, ArgvMode::kStack, builtin_exit_frame));
2071}
2072
2074 ASM_CODE_COMMENT(this);
2075 const Runtime::Function* function = Runtime::FunctionForId(fid);
2076 DCHECK_EQ(1, function->result_size);
2077 if (function->nargs >= 0) {
2078 // TODO(1236192): Most runtime routines don't need the number of
2079 // arguments passed in because it is constant. At some point we
2080 // should remove this need and make the runtime routine entry code
2081 // smarter.
2082 Mov(x0, function->nargs);
2083 }
2085}
2086
2088#if V8_HOST_ARCH_ARM64
2089 // Running on the real platform. Use the alignment as mandated by the local
2090 // environment.
2091 // Note: This will break if we ever start generating snapshots on one ARM
2092 // platform for another ARM platform with a different alignment.
2094#else // V8_HOST_ARCH_ARM64
2095 // If we are using the simulator then we should always align to the expected
2096 // alignment. As the simulator is used to generate snapshots we do not know
2097 // if the target platform will need alignment, so this is controlled from a
2098 // flag.
2099 return v8_flags.sim_stack_alignment;
2100#endif // V8_HOST_ARCH_ARM64
2101}
2102
2103int MacroAssembler::CallCFunction(ExternalReference function,
2104 int num_of_reg_args,
2105 SetIsolateDataSlots set_isolate_data_slots,
2106 Label* return_location) {
2107 return CallCFunction(function, num_of_reg_args, 0, set_isolate_data_slots,
2108 return_location);
2109}
2110
2111int MacroAssembler::CallCFunction(ExternalReference function,
2112 int num_of_reg_args, int num_of_double_args,
2113 SetIsolateDataSlots set_isolate_data_slots,
2114 Label* return_location) {
2115 // Note: The "CallCFunction" code comment will be generated by the other
2116 // CallCFunction method called below.
2117 UseScratchRegisterScope temps(this);
2118 Register temp = temps.AcquireX();
2119 Mov(temp, function);
2120 return CallCFunction(temp, num_of_reg_args, num_of_double_args,
2121 set_isolate_data_slots, return_location);
2122}
2123
2124int MacroAssembler::CallCFunction(Register function, int num_of_reg_args,
2125 int num_of_double_args,
2126 SetIsolateDataSlots set_isolate_data_slots,
2127 Label* return_location) {
2128 ASM_CODE_COMMENT(this);
2129 DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters);
2130 DCHECK(has_frame());
2131
2132 Label get_pc;
2133 UseScratchRegisterScope temps(this);
2134 // We're doing a C call, which means non-parameter caller-saved registers
2135 // (x8-x17) will be clobbered and so are available to use as scratches.
2136 // In the worst-case scenario, we'll need 2 scratch registers. We pick 3
2137 // registers minus the `function` register, in case `function` aliases with
2138 // any of the registers.
2139 temps.Include(CPURegList(64, {x8, x9, x10, function}));
2140 temps.Exclude(function);
2141
2142 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
2143 // Save the frame pointer and PC so that the stack layout remains iterable,
2144 // even without an ExitFrame which normally exists between JS and C frames.
2145 UseScratchRegisterScope temps(this);
2146 Register pc_scratch = temps.AcquireX();
2147
2148 Adr(pc_scratch, &get_pc);
2149
2151 // Note that the field for PC is just before the FP. This ensures that in
2152 // simulator builds the `Stp` below stores the PC (the lower address) first
2153 // and only then the FP. This is necessary because during profiling we
2154 // assume that once the FP field is set, the PC is also set already.
2155 static_assert(IsolateData::GetOffset(IsolateFieldId::kFastCCallCallerFP) ==
2156 IsolateData::GetOffset(IsolateFieldId::kFastCCallCallerPC) +
2157 8);
2158 Stp(pc_scratch, fp,
2159 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerPC));
2160 }
2161
2162 // Call directly. The function called cannot cause a GC, or allow preemption,
2163 // so the return address in the link register stays correct.
2164 Call(function);
2165 int call_pc_offset = pc_offset();
2166 bind(&get_pc);
2167 if (return_location) bind(return_location);
2168
2169 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
2170 // We don't unset the PC; the FP is the source of truth.
2171 Str(xzr, ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
2172 }
2173
2174 if (num_of_reg_args > kRegisterPassedArguments) {
2175 // Drop the register passed arguments.
2176 int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
2177 Drop(claim_slots);
2178 }
2179
2180 if (num_of_double_args > kFPRegisterPassedArguments) {
2181 // Drop the register passed arguments.
2182 int claim_slots =
2183 RoundUp(num_of_double_args - kFPRegisterPassedArguments, 2);
2184 Drop(claim_slots);
2185 }
2186
2187 return call_pc_offset;
2188}
2189
2191 int constant_index) {
2192 ASM_CODE_COMMENT(this);
2193 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
2194 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
2197 constant_index)));
2198}
2199
2200void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
2202}
2203
2204void MacroAssembler::StoreRootRelative(int32_t offset, Register value) {
2205 Str(value, MemOperand(kRootRegister, offset));
2206}
2207
2209 intptr_t offset) {
2210 if (offset == 0) {
2212 } else {
2214 }
2215}
2216
2218 ExternalReference reference, Register scratch) {
2219 if (root_array_available()) {
2220 if (reference.IsIsolateFieldId()) {
2221 return MemOperand(kRootRegister, reference.offset_from_root_register());
2222 }
2223 if (options().enable_root_relative_access) {
2224 intptr_t offset =
2226 if (is_int32(offset)) {
2227 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
2228 }
2229 }
2230 if (options().isolate_independent_code) {
2231 if (IsAddressableThroughRootRegister(isolate(), reference)) {
2232 // Some external references can be efficiently loaded as an offset from
2233 // kRootRegister.
2234 intptr_t offset =
2236 CHECK(is_int32(offset));
2237 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
2238 } else {
2239 // Otherwise, do a memory load from the external reference table.
2240 Ldr(scratch,
2243 isolate(), reference)));
2244 return MemOperand(scratch, 0);
2245 }
2246 }
2247 }
2248 Mov(scratch, reference);
2249 return MemOperand(scratch, 0);
2250}
2251
2252void MacroAssembler::Jump(Register target, Condition cond) {
2253 if (cond == nv) return;
2254 Label done;
2255 if (cond != al) B(NegateCondition(cond), &done);
2256 Br(target);
2257 Bind(&done);
2258}
2259
2261 Condition cond) {
2262 if (cond == nv) return;
2263 Label done;
2264 if (cond != al) B(NegateCondition(cond), &done);
2265 if (CanUseNearCallOrJump(rmode)) {
2267 near_jump(static_cast<int>(offset), rmode);
2268 } else {
2269 UseScratchRegisterScope temps(this);
2270 Register temp = temps.AcquireX();
2271 uint64_t imm = reinterpret_cast<uint64_t>(pc_) + offset * kInstrSize;
2272 Mov(temp, Immediate(imm, rmode));
2273 Br(temp);
2274 }
2275 Bind(&done);
2276}
2277
2278// The calculated offset is either:
2279// * the 'target' input unmodified if this is a Wasm call, or
2280// * the offset of the target from the current PC, in instructions, for any
2281// other type of call.
2282// static
2284 RelocInfo::Mode rmode,
2285 uint8_t* pc) {
2286 int64_t offset = static_cast<int64_t>(target);
2287 if (rmode == RelocInfo::WASM_CALL || rmode == RelocInfo::WASM_STUB_CALL) {
2288 // The target of WebAssembly calls is still an index instead of an actual
2289 // address at this point, and needs to be encoded as-is.
2290 return offset;
2291 }
2292 offset -= reinterpret_cast<int64_t>(pc);
2294 offset = offset / static_cast<int>(kInstrSize);
2295 return offset;
2296}
2297
2299 Condition cond) {
2300 int64_t offset = CalculateTargetOffset(target, rmode, pc_);
2301 JumpHelper(offset, rmode, cond);
2302}
2303
2305 Condition cond) {
2307 DCHECK_IMPLIES(options().isolate_independent_code,
2309
2311 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
2312 TailCallBuiltin(builtin, cond);
2313 return;
2314 }
2316 if (CanUseNearCallOrJump(rmode)) {
2318 DCHECK(is_int32(index));
2319 JumpHelper(static_cast<int64_t>(index), rmode, cond);
2320 } else {
2321 Jump(code.address(), rmode, cond);
2322 }
2323}
2324
2325void MacroAssembler::Jump(const ExternalReference& reference) {
2326 UseScratchRegisterScope temps(this);
2327 Register scratch = temps.AcquireX();
2328 Mov(scratch, reference);
2329 Jump(scratch);
2330}
2331
2332void MacroAssembler::Call(Register target) {
2333 BlockPoolsScope scope(this);
2334 Blr(target);
2335}
2336
2337void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
2338 BlockPoolsScope scope(this);
2339 if (CanUseNearCallOrJump(rmode)) {
2340 int64_t offset = CalculateTargetOffset(target, rmode, pc_);
2342 near_call(static_cast<int>(offset), rmode);
2343 } else {
2344 IndirectCall(target, rmode);
2345 }
2346}
2347
2349 DCHECK_IMPLIES(options().isolate_independent_code,
2351 BlockPoolsScope scope(this);
2352
2354 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
2355 CallBuiltin(builtin);
2356 return;
2357 }
2358
2360
2361 if (CanUseNearCallOrJump(rmode)) {
2363 DCHECK(is_int32(index));
2364 near_call(static_cast<int32_t>(index), rmode);
2365 } else {
2366 IndirectCall(code.address(), rmode);
2367 }
2368}
2369
2370void MacroAssembler::Call(ExternalReference target) {
2371 UseScratchRegisterScope temps(this);
2372 Register temp = temps.AcquireX();
2373 Mov(temp, target);
2374 Call(temp);
2375}
2376
2377void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index,
2378 Register target) {
2379 ASM_CODE_COMMENT(this);
2380 // The builtin_index register contains the builtin index as a Smi.
2381 if (SmiValuesAre32Bits()) {
2382 Asr(target, builtin_index, kSmiShift - kSystemPointerSizeLog2);
2383 Add(target, target, IsolateData::builtin_entry_table_offset());
2384 Ldr(target, MemOperand(kRootRegister, target));
2385 } else {
2388 Add(target, kRootRegister,
2389 Operand(builtin_index.W(), SXTW, kSystemPointerSizeLog2 - kSmiShift));
2390 } else {
2391 Add(target, kRootRegister,
2392 Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiShift));
2393 }
2394 Ldr(target, MemOperand(target, IsolateData::builtin_entry_table_offset()));
2395 }
2396}
2397
2399 Register destination) {
2401}
2402
2404 ASM_CODE_COMMENT(this);
2408}
2409
2410void MacroAssembler::CallBuiltinByIndex(Register builtin_index,
2411 Register target) {
2412 ASM_CODE_COMMENT(this);
2413 LoadEntryFromBuiltinIndex(builtin_index, target);
2414 Call(target);
2415}
2416
2419 switch (options().builtin_call_jump_mode) {
2421 UseScratchRegisterScope temps(this);
2422 Register scratch = temps.AcquireX();
2423 Ldr(scratch, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
2424 Call(scratch);
2425 break;
2426 }
2428 near_call(static_cast<int>(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
2429 break;
2431 UseScratchRegisterScope temps(this);
2432 Register scratch = temps.AcquireX();
2433 LoadEntryFromBuiltin(builtin, scratch);
2434 Call(scratch);
2435 break;
2436 }
2438 if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
2439 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
2441 DCHECK(is_int32(index));
2442 near_call(static_cast<int32_t>(index), RelocInfo::CODE_TARGET);
2443 } else {
2444 UseScratchRegisterScope temps(this);
2445 Register scratch = temps.AcquireX();
2446 LoadEntryFromBuiltin(builtin, scratch);
2447 Call(scratch);
2448 }
2449 break;
2450 }
2451 }
2452}
2453
2454// TODO(ishell): remove cond parameter from here to simplify things.
2457 CommentForOffHeapTrampoline("tail call", builtin));
2458
2459 // The control flow integrity (CFI) feature allows us to "sign" code entry
2460 // points as a target for calls, jumps or both. Arm64 has special
2461 // instructions for this purpose, so-called "landing pads" (see
2462 // MacroAssembler::CallTarget(), MacroAssembler::JumpTarget() and
2463 // MacroAssembler::JumpOrCallTarget()). Currently, we generate "Call"
2464 // landing pads for CPP builtins. In order to allow tail calling to those
2465 // builtins we have to use a workaround.
2466 // x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump"
2467 // (i.e. `bti j`) landing pads for the tail-called code.
2468 Register temp = x17;
2469
2470 switch (options().builtin_call_jump_mode) {
2472 Ldr(temp, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
2473 Jump(temp, cond);
2474 break;
2475 }
2477 if (cond != nv) {
2478 Label done;
2479 if (cond != al) B(NegateCondition(cond), &done);
2480 near_jump(static_cast<int>(builtin), RelocInfo::NEAR_BUILTIN_ENTRY);
2481 Bind(&done);
2482 }
2483 break;
2484 }
2486 LoadEntryFromBuiltin(builtin, temp);
2487 Jump(temp, cond);
2488 break;
2489 }
2491 if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
2492 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
2494 DCHECK(is_int32(index));
2495 JumpHelper(static_cast<int64_t>(index), RelocInfo::CODE_TARGET, cond);
2496 } else {
2497 LoadEntryFromBuiltin(builtin, temp);
2498 Jump(temp, cond);
2499 }
2500 break;
2501 }
2502 }
2503}
2504
2506 Register code_object,
2507 CodeEntrypointTag tag) {
2508 ASM_CODE_COMMENT(this);
2509#ifdef V8_ENABLE_SANDBOX
2510 LoadCodeEntrypointViaCodePointer(
2512 FieldMemOperand(code_object, Code::kSelfIndirectPointerOffset), tag);
2513#else
2514 Ldr(destination, FieldMemOperand(code_object, Code::kInstructionStartOffset));
2515#endif
2516}
2517
2518void MacroAssembler::CallCodeObject(Register code_object,
2519 CodeEntrypointTag tag) {
2520 ASM_CODE_COMMENT(this);
2521 LoadCodeInstructionStart(code_object, code_object, tag);
2522 Call(code_object);
2523}
2524
2525void MacroAssembler::JumpCodeObject(Register code_object, CodeEntrypointTag tag,
2526 JumpMode jump_mode) {
2527 // TODO(saelo): can we avoid using this for JavaScript functions
2528 // (kJSEntrypointTag) and instead use a variant that ensures that the caller
2529 // and callee agree on the signature (i.e. parameter count)?
2530 ASM_CODE_COMMENT(this);
2531 DCHECK_EQ(JumpMode::kJump, jump_mode);
2532 LoadCodeInstructionStart(code_object, code_object, tag);
2533 // We jump through x17 here because for Branch Identification (BTI) we use
2534 // "Call" (`bti c`) rather than "Jump" (`bti j`) landing pads for tail-called
2535 // code. See TailCallBuiltin for more information.
2536 if (code_object != x17) {
2537 Mov(x17, code_object);
2538 }
2539 Jump(x17);
2540}
2541
2542void MacroAssembler::CallJSFunction(Register function_object,
2543 uint16_t argument_count) {
2545#ifdef V8_ENABLE_LEAPTIERING
2548 Register scratch = x21;
2549
2550 Ldr(dispatch_handle.W(),
2551 FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
2552 LoadEntrypointAndParameterCountFromJSDispatchTable(code, parameter_count,
2553 dispatch_handle, scratch);
2554 // Force a safe crash if the parameter count doesn't match.
2555 Cmp(parameter_count, Immediate(argument_count));
2556 SbxCheck(le, AbortReason::kJSSignatureMismatch);
2557 Call(code);
2558#else
2560 LoadTaggedField(code,
2561 FieldMemOperand(function_object, JSFunction::kCodeOffset));
2563#endif
2564}
2565
2566#if V8_ENABLE_LEAPTIERING
2567void MacroAssembler::CallJSDispatchEntry(JSDispatchHandle dispatch_handle,
2568 uint16_t argument_count) {
2570 Register scratch = x21;
2572 Immediate(dispatch_handle.value(), RelocInfo::JS_DISPATCH_HANDLE));
2573 // WARNING: This entrypoint load is only safe because we are storing a
2574 // RelocInfo for the dispatch handle in the movl above (thus keeping the
2575 // dispatch entry alive) _and_ because the entrypoints are not compactable
2576 // (thus meaning that the calculation in the entrypoint load is not
2577 // invalidated by a compaction).
2578 // TODO(leszeks): Make this less of a footgun.
2579 static_assert(!JSDispatchTable::kSupportsCompaction);
2580 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
2581 CHECK_EQ(argument_count,
2582 IsolateGroup::current()->js_dispatch_table()->GetParameterCount(
2583 dispatch_handle));
2584 Call(code);
2585}
2586#endif
2587
2588void MacroAssembler::JumpJSFunction(Register function_object,
2589 JumpMode jump_mode) {
2591#ifdef V8_ENABLE_LEAPTIERING
2592 // This implementation is not currently used because callers usually need
2593 // to load both entry point and parameter count and then do something with
2594 // the latter before the actual call.
2595 UNREACHABLE();
2596#else
2598 LoadTaggedField(code,
2599 FieldMemOperand(function_object, JSFunction::kCodeOffset));
2600 JumpCodeObject(code, kJSEntrypointTag, jump_mode);
2601#endif
2602}
2603
2604#ifdef V8_ENABLE_WEBASSEMBLY
2605
2606void MacroAssembler::ResolveWasmCodePointer(Register target,
2607 uint64_t signature_hash) {
2608 ASM_CODE_COMMENT(this);
2609 ExternalReference global_jump_table =
2610 ExternalReference::wasm_code_pointer_table();
2611 UseScratchRegisterScope temps(this);
2612 Register scratch = temps.AcquireX();
2613 Mov(scratch, global_jump_table);
2614#ifdef V8_ENABLE_SANDBOX
2615 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == 16);
2616 Add(target, scratch, Operand(target, LSL, 4));
2617 Ldr(scratch,
2618 MemOperand(target, wasm::WasmCodePointerTable::kOffsetOfSignatureHash));
2619 bool has_second_tmp = temps.CanAcquire();
2620 Register signature_hash_register = has_second_tmp ? temps.AcquireX() : target;
2621 if (!has_second_tmp) {
2622 Push(signature_hash_register, padreg);
2623 }
2624 Mov(signature_hash_register, signature_hash);
2625 Cmp(scratch, signature_hash_register);
2626 SbxCheck(Condition::kEqual, AbortReason::kWasmSignatureMismatch);
2627 if (!has_second_tmp) {
2628 Pop(padreg, signature_hash_register);
2629 }
2630#else
2631 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == 8);
2632 Add(target, scratch, Operand(target, LSL, 3));
2633#endif
2634
2635 Ldr(target, MemOperand(target));
2636}
2637
2638void MacroAssembler::CallWasmCodePointer(Register target,
2639 uint64_t signature_hash,
2640 CallJumpMode call_jump_mode) {
2641 ResolveWasmCodePointer(target, signature_hash);
2642 if (call_jump_mode == CallJumpMode::kTailCall) {
2643 Jump(target);
2644 } else {
2645 Call(target);
2646 }
2647}
2648
2649void MacroAssembler::CallWasmCodePointerNoSignatureCheck(Register target) {
2650 ExternalReference global_jump_table =
2651 ExternalReference::wasm_code_pointer_table();
2652 UseScratchRegisterScope temps(this);
2653 Register scratch = temps.AcquireX();
2654 Mov(scratch, global_jump_table);
2655 constexpr unsigned int kEntrySizeLog2 =
2656 std::bit_width(sizeof(wasm::WasmCodePointerTableEntry)) - 1;
2657 Add(target, scratch, Operand(target, LSL, kEntrySizeLog2));
2658 Ldr(target, MemOperand(target));
2659
2660 Call(target);
2661}
2662
2663void MacroAssembler::LoadWasmCodePointer(Register dst, MemOperand src) {
2664 static_assert(sizeof(WasmCodePointer) == 4);
2665 Ldr(dst.W(), src);
2666}
2667
2668#endif
2669
2670void MacroAssembler::StoreReturnAddressAndCall(Register target) {
2671 ASM_CODE_COMMENT(this);
2672 // This generates the final instruction sequence for calls to C functions
2673 // once an exit frame has been constructed.
2674 //
2675 // Note that this assumes the caller code (i.e. the InstructionStream object
2676 // currently being generated) is immovable or that the callee function cannot
2677 // trigger GC, since the callee function will return to it.
2678
2679 UseScratchRegisterScope temps(this);
2680 temps.Exclude(x16, x17);
2681 DCHECK(!AreAliased(x16, x17, target));
2682
2683 Label return_location;
2684 Adr(x17, &return_location);
2685#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
2686 Add(x16, sp, kSystemPointerSize);
2687 Pacib1716();
2688#endif
2689 Str(x17, MemOperand(sp));
2690
2691 if (v8_flags.debug_code) {
2692 ASM_CODE_COMMENT_STRING(this, "Verify fp[kSPOffset]-8");
2693 // Verify that the slot below fp[kSPOffset]-8 points to the signed return
2694 // location.
2696 Ldr(x16, MemOperand(x16, -static_cast<int64_t>(kXRegSize)));
2697 Cmp(x16, x17);
2698 Check(eq, AbortReason::kReturnAddressNotFoundInFrame);
2699 }
2700
2701 Blr(target);
2702 Bind(&return_location);
2703}
2704
2706 ASM_CODE_COMMENT(this);
2707 UseScratchRegisterScope temps(this);
2708 Register temp = temps.AcquireX();
2709 Mov(temp, Immediate(target, rmode));
2710 Blr(temp);
2711}
2712
2714 return is_int26(offset);
2715}
2716
2717// Check if the code object is marked for deoptimization. If it is, then it
2718// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
2719// to:
2720// 1. read from memory the word that contains that bit, which can be found in
2721// the flags in the referenced {Code} object;
2722// 2. test kMarkedForDeoptimizationBit in those flags; and
2723// 3. if it is not zero then it jumps to the builtin.
2724//
2725// Note: With leaptiering we simply assert the code is not deoptimized.
2727 UseScratchRegisterScope temps(this);
2728 Register scratch = temps.AcquireX();
2729 if (v8_flags.debug_code || !V8_ENABLE_LEAPTIERING_BOOL) {
2730 int offset =
2731 InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
2734 Ldr(scratch.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
2735 }
2736#ifdef V8_ENABLE_LEAPTIERING
2737 if (v8_flags.debug_code) {
2738 Label not_deoptimized;
2739 Tbz(scratch.W(), Code::kMarkedForDeoptimizationBit, &not_deoptimized);
2740 Abort(AbortReason::kInvalidDeoptimizedCode);
2741 Bind(&not_deoptimized);
2742 }
2743#else
2744 Label not_deoptimized;
2745 Tbz(scratch.W(), Code::kMarkedForDeoptimizationBit, &not_deoptimized);
2746 TailCallBuiltin(Builtin::kCompileLazyDeoptimizedCode);
2747 Bind(&not_deoptimized);
2748#endif
2749}
2750
2752 Builtin target, int deopt_id, Label* exit, DeoptimizeKind kind, Label* ret,
2753 Label* jump_deoptimization_entry_label) {
2754 ASM_CODE_COMMENT(this);
2755 BlockPoolsScope scope(this);
2756 bl(jump_deoptimization_entry_label);
2760}
2761
2763 ASM_CODE_COMMENT(this);
2767 : IsolateData::jslimit_offset();
2768
2770}
2771
2772void MacroAssembler::StackOverflowCheck(Register num_args,
2773 Label* stack_overflow) {
2774 ASM_CODE_COMMENT(this);
2775 UseScratchRegisterScope temps(this);
2776 Register scratch = temps.AcquireX();
2777
2778 // Check the stack for overflow.
2779 // We are not trying to catch interruptions (e.g. debug break and
2780 // preemption) here, so the "real stack limit" is checked.
2781
2783 // Make scratch the space we have left. The stack might already be overflowed
2784 // here which will cause scratch to become negative.
2785 Sub(scratch, sp, scratch);
2786 // Check if the arguments will overflow the stack.
2787 Cmp(scratch, Operand(num_args, LSL, kSystemPointerSizeLog2));
2788 B(le, stack_overflow);
2789}
2790
2791void MacroAssembler::InvokePrologue(Register formal_parameter_count,
2792 Register actual_argument_count,
2793 InvokeType type) {
2794 ASM_CODE_COMMENT(this);
2795 // x0: actual arguments count.
2796 // x1: function (passed through to callee).
2797 // x2: expected arguments count.
2798 // x3: new target
2799 Label regular_invoke;
2800 DCHECK_EQ(actual_argument_count, x0);
2801 DCHECK_EQ(formal_parameter_count, x2);
2802
2803 // If overapplication or if the actual argument count is equal to the
2804 // formal parameter count, no need to push extra undefined values.
2805 Register extra_argument_count = x2;
2806 Subs(extra_argument_count, formal_parameter_count, actual_argument_count);
2807 B(le, &regular_invoke);
2808
2809 // The stack pointer in arm64 needs to be 16-byte aligned. We might need to
2810 // (1) add an extra padding or (2) remove (reuse) the extra padding already
2811 // in the stack. Let {slots_to_copy} be the number of slots (arguments) to
2812 // move up in the stack and let {slots_to_claim} be the number of extra stack
2813 // slots to claim.
2814 Label even_extra_count, skip_move;
2815 Register slots_to_copy = x5;
2816 Register slots_to_claim = x6;
2817
2818 Mov(slots_to_copy, actual_argument_count);
2819 Mov(slots_to_claim, extra_argument_count);
2820 Tbz(extra_argument_count, 0, &even_extra_count);
2821
2822 // Calculate {slots_to_claim} when {extra_argument_count} is odd.
2823 // If {actual_argument_count} is even, we need one extra padding slot
2824 // {slots_to_claim = extra_argument_count + 1}.
2825 // If {actual_argument_count} is odd, we know that the
2826 // original arguments will have a padding slot that we can reuse
2827 // {slots_to_claim = extra_argument_count - 1}.
2828 {
2829 Register scratch = x11;
2830 Add(slots_to_claim, extra_argument_count, 1);
2831 And(scratch, actual_argument_count, 1);
2832 Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
2833 }
2834
2835 Bind(&even_extra_count);
2836 Cbz(slots_to_claim, &skip_move);
2837
2838 Label stack_overflow;
2839 StackOverflowCheck(slots_to_claim, &stack_overflow);
2840 Claim(slots_to_claim);
2841
2842 // Move the arguments already in the stack including the receiver.
2843 {
2844 Register src = x7;
2845 Register dst = x8;
2846 SlotAddress(src, slots_to_claim);
2847 SlotAddress(dst, 0);
2848 CopyDoubleWords(dst, src, slots_to_copy);
2849 }
2850
2851 Bind(&skip_move);
2852 Register pointer_next_value = x6;
2853
2854 // Copy extra arguments as undefined values.
2855 {
2856 Label loop;
2857 Register undefined_value = x7;
2858 Register count = x8;
2859 LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2860 SlotAddress(pointer_next_value, actual_argument_count);
2861 Mov(count, extra_argument_count);
2862 Bind(&loop);
2863 Str(undefined_value,
2864 MemOperand(pointer_next_value, kSystemPointerSize, PostIndex));
2865 Subs(count, count, 1);
2866 Cbnz(count, &loop);
2867 }
2868
2869 // Set padding if needed.
2870 {
2871 Label skip;
2872 Register total_args_slots = x5;
2873 Add(total_args_slots, actual_argument_count, extra_argument_count);
2874 Tbz(total_args_slots, 0, &skip);
2875 Str(padreg, MemOperand(pointer_next_value));
2876 Bind(&skip);
2877 }
2878 B(&regular_invoke);
2879
2880 bind(&stack_overflow);
2881 {
2882 FrameScope frame(
2883 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
2884 CallRuntime(Runtime::kThrowStackOverflow);
2885 Unreachable();
2886 }
2887
2888 Bind(&regular_invoke);
2889}
2890
2892 Register fun, Register new_target,
2893 Register expected_parameter_count_or_dispatch_handle,
2894 Register actual_parameter_count) {
2895 ASM_CODE_COMMENT(this);
2896 DCHECK(!AreAliased(x5, fun, new_target,
2897 expected_parameter_count_or_dispatch_handle,
2898 actual_parameter_count));
2899 // Load receiver to pass it later to DebugOnFunctionCall hook.
2900 Peek(x5, ReceiverOperand());
2901 FrameScope frame(
2902 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
2903
2904 if (!new_target.is_valid()) new_target = padreg;
2905
2906 // Save values on stack.
2907 SmiTag(expected_parameter_count_or_dispatch_handle);
2908 SmiTag(actual_parameter_count);
2909 Push(expected_parameter_count_or_dispatch_handle, actual_parameter_count,
2910 new_target, fun);
2911 Push(fun, x5);
2912 CallRuntime(Runtime::kDebugOnFunctionCall);
2913
2914 // Restore values from stack.
2915 Pop(fun, new_target, actual_parameter_count,
2916 expected_parameter_count_or_dispatch_handle);
2917 SmiUntag(actual_parameter_count);
2918 SmiUntag(expected_parameter_count_or_dispatch_handle);
2919}
2920
2921#ifdef V8_ENABLE_LEAPTIERING
2923 Register function, Register actual_parameter_count, InvokeType type,
2924 ArgumentAdaptionMode argument_adaption_mode) {
2925 ASM_CODE_COMMENT(this);
2926 // You can't call a function without a valid frame.
2927 DCHECK(type == InvokeType::kJump || has_frame());
2928
2929 // Contract with called JS functions requires that function is passed in x1.
2930 // (See FullCodeGenerator::Generate().)
2931 DCHECK_EQ(function, x1);
2932
2933 // Set up the context.
2934 LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2935
2936 InvokeFunctionCode(function, no_reg, actual_parameter_count, type,
2937 argument_adaption_mode);
2938}
2939
2941 Register function, Register new_target, Register actual_parameter_count,
2942 InvokeType type) {
2943 ASM_CODE_COMMENT(this);
2944 // You can't call a function without a valid frame.
2945 DCHECK(type == InvokeType::kJump || has_frame());
2946
2947 // Contract with called JS functions requires that function is passed in x1.
2948 // (See FullCodeGenerator::Generate().)
2949 DCHECK_EQ(function, x1);
2950
2951 LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2952
2953 InvokeFunctionCode(function, new_target, actual_parameter_count, type);
2954}
2955
2957 Register function, Register new_target, Register actual_parameter_count,
2958 InvokeType type, ArgumentAdaptionMode argument_adaption_mode) {
2959 ASM_CODE_COMMENT(this);
2960 // You can't call a function without a valid frame.
2962 DCHECK_EQ(function, x1);
2963 DCHECK_IMPLIES(new_target.is_valid(), new_target == x3);
2964
2966 Ldr(dispatch_handle.W(),
2967 FieldMemOperand(function, JSFunction::kDispatchHandleOffset));
2968
2969 // On function call, call into the debugger if necessary.
2970 Label debug_hook, continue_after_hook;
2971 {
2972 Mov(x5, ExternalReference::debug_hook_on_function_call_address(isolate()));
2973 Ldrsb(x5, MemOperand(x5));
2974 Cbnz(x5, &debug_hook);
2975 }
2976 bind(&continue_after_hook);
2977
2978 // Clear the new.target register if not given.
2979 if (!new_target.is_valid()) {
2980 LoadRoot(x3, RootIndex::kUndefinedValue);
2981 }
2982
2983 Register scratch = x20;
2984 if (argument_adaption_mode == ArgumentAdaptionMode::kAdapt) {
2985 Register expected_parameter_count = x2;
2986 LoadParameterCountFromJSDispatchTable(expected_parameter_count,
2987 dispatch_handle, scratch);
2988 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
2989 }
2990
2991 // We call indirectly through the code field in the function to
2992 // allow recompilation to take effect without changing any of the
2993 // call sites.
2994 LoadEntrypointFromJSDispatchTable(kJavaScriptCallCodeStartRegister,
2995 dispatch_handle, scratch);
2996 switch (type) {
2997 case InvokeType::kCall:
2999 break;
3000 case InvokeType::kJump:
3001 // We jump through x17 here because for Branch Identification (BTI) we use
3002 // "Call" (`bti c`) rather than "Jump" (`bti j`) landing pads for
3003 // tail-called code. See TailCallBuiltin for more information.
3005 Jump(x17);
3006 break;
3007 }
3008 Label done;
3009 B(&done);
3010
3011 // Deferred debug hook.
3012 bind(&debug_hook);
3013 CallDebugOnFunctionCall(function, new_target, dispatch_handle,
3014 actual_parameter_count);
3015 B(&continue_after_hook);
3016
3017 bind(&done);
3018}
3019#else
3020void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
3021 Register expected_parameter_count,
3022 Register actual_parameter_count,
3023 InvokeType type) {
3024 ASM_CODE_COMMENT(this);
3025 // You can't call a function without a valid frame.
3027 DCHECK_EQ(function, x1);
3028 DCHECK_IMPLIES(new_target.is_valid(), new_target == x3);
3029
3030 // On function call, call into the debugger if necessary.
3031 Label debug_hook, continue_after_hook;
3032 {
3033 Mov(x5, ExternalReference::debug_hook_on_function_call_address(isolate()));
3034 Ldrsb(x5, MemOperand(x5));
3035 Cbnz(x5, &debug_hook);
3036 }
3037 bind(&continue_after_hook);
3038
3039 // Clear the new.target register if not given.
3040 if (!new_target.is_valid()) {
3041 LoadRoot(x3, RootIndex::kUndefinedValue);
3042 }
3043
3044 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
3045
3046 // The called function expects the call kind in x5.
3047 // We call indirectly through the code field in the function to
3048 // allow recompilation to take effect without changing any of the
3049 // call sites.
3050 constexpr int unused_argument_count = 0;
3051 switch (type) {
3052 case InvokeType::kCall:
3053 CallJSFunction(function, unused_argument_count);
3054 break;
3055 case InvokeType::kJump:
3056 JumpJSFunction(function);
3057 break;
3058 }
3059 Label done;
3060 B(&done);
3061
3062 // Deferred debug hook.
3063 bind(&debug_hook);
3064 CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
3065 actual_parameter_count);
3066 B(&continue_after_hook);
3067
3068 bind(&done);
3069}
3070
3072 Register function, Register new_target, Register actual_parameter_count,
3073 InvokeType type) {
3074 ASM_CODE_COMMENT(this);
3075 // You can't call a function without a valid frame.
3076 DCHECK(type == InvokeType::kJump || has_frame());
3077
3078 // Contract with called JS functions requires that function is passed in x1.
3079 // (See FullCodeGenerator::Generate().)
3080 DCHECK_EQ(function, x1);
3081
3082 Register expected_parameter_count = x2;
3083
3084 LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
3085 // The number of arguments is stored as an int32_t, and -1 is a marker
3086 // (kDontAdaptArgumentsSentinel), so we need sign
3087 // extension to correctly handle it.
3089 expected_parameter_count,
3090 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3091 Ldrh(expected_parameter_count,
3092 FieldMemOperand(expected_parameter_count,
3093 SharedFunctionInfo::kFormalParameterCountOffset));
3094
3095 InvokeFunctionCode(function, new_target, expected_parameter_count,
3096 actual_parameter_count, type);
3097}
3098
3099void MacroAssembler::InvokeFunction(Register function,
3100 Register expected_parameter_count,
3101 Register actual_parameter_count,
3102 InvokeType type) {
3103 ASM_CODE_COMMENT(this);
3104 // You can't call a function without a valid frame.
3105 DCHECK(type == InvokeType::kJump || has_frame());
3106
3107 // Contract with called JS functions requires that function is passed in x1.
3108 // (See FullCodeGenerator::Generate().)
3109 DCHECK_EQ(function, x1);
3110
3111 // Set up the context.
3112 LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
3113
3114 InvokeFunctionCode(function, no_reg, expected_parameter_count,
3115 actual_parameter_count, type);
3116}
3117#endif // V8_ENABLE_LEAPTIERING
3118
3120 Register code, Register scratch, Label* if_marked_for_deoptimization) {
3121 Ldr(scratch.W(), FieldMemOperand(code, Code::kFlagsOffset));
3123 if_marked_for_deoptimization);
3124}
3125
3126void MacroAssembler::JumpIfCodeIsTurbofanned(Register code, Register scratch,
3127 Label* if_turbofanned) {
3128 Ldr(scratch.W(), FieldMemOperand(code, Code::kFlagsOffset));
3129 Tbnz(scratch.W(), Code::kIsTurbofannedBit, if_turbofanned);
3130}
3131
3132Operand MacroAssembler::ClearedValue() const {
3133 return Operand(static_cast<int32_t>(i::ClearedValue(isolate()).ptr()));
3134}
3135
3136Operand MacroAssembler::ReceiverOperand() { return Operand(0); }
3137
3139 DoubleRegister double_input,
3140 Label* done) {
3141 ASM_CODE_COMMENT(this);
3142 // Try to convert with an FPU convert instruction. It's trivial to compute
3143 // the modulo operation on an integer register so we convert to a 64-bit
3144 // integer.
3145 //
3146 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF)
3147 // when the double is out of range. NaNs and infinities will be converted to 0
3148 // (as ECMA-262 requires).
3149 Fcvtzs(result.X(), double_input);
3150
3151 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF) are not
3152 // representable using a double, so if the result is one of those then we know
3153 // that saturation occurred, and we need to manually handle the conversion.
3154 //
3155 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
3156 // 1 will cause signed overflow.
3157 Cmp(result.X(), 1);
3158 Ccmp(result.X(), -1, VFlag, vc);
3159
3160 B(vc, done);
3161}
3162
3163void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
3164 Register result,
3165 DoubleRegister double_input,
3166 StubCallMode stub_mode,
3167 LinkRegisterStatus lr_status) {
3168 ASM_CODE_COMMENT(this);
3169 if (CpuFeatures::IsSupported(JSCVT)) {
3170 Fjcvtzs(result.W(), double_input);
3171 return;
3172 }
3173
3174 Label done;
3175
3176 // Try to convert the double to an int64. If successful, the bottom 32 bits
3177 // contain our truncated int32 result.
3178 TryConvertDoubleToInt64(result, double_input, &done);
3179
3180 // If we fell through then inline version didn't succeed - call stub instead.
3181 if (lr_status == kLRHasNotBeenSaved) {
3182 Push<MacroAssembler::kSignLR>(lr, double_input);
3183 } else {
3184 Push<MacroAssembler::kDontStoreLR>(xzr, double_input);
3185 }
3186
3187 // DoubleToI preserves any registers it needs to clobber.
3188#if V8_ENABLE_WEBASSEMBLY
3189 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
3190 Call(static_cast<Address>(Builtin::kDoubleToI), RelocInfo::WASM_STUB_CALL);
3191#else
3192 // For balance.
3193 if (false) {
3194#endif // V8_ENABLE_WEBASSEMBLY
3195 } else {
3196 CallBuiltin(Builtin::kDoubleToI);
3197 }
3198 Ldr(result, MemOperand(sp, 0));
3199
3200 DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
3201
3202 if (lr_status == kLRHasNotBeenSaved) {
3203 // Pop into xzr here to drop the double input on the stack:
3205 } else {
3206 Drop(2);
3207 }
3208
3209 Bind(&done);
3210 // Keep our invariant that the upper 32 bits are zero.
3211 Uxtw(result.W(), result.W());
3212}
3213
3215 ASM_CODE_COMMENT(this);
3217 mov(fp, sp);
3218 static_assert(kExtraSlotClaimedByPrologue == 1);
3220}
3221
3223 UseScratchRegisterScope temps(this);
3224
3225 if (StackFrame::IsJavaScript(type)) {
3226 // Just push a minimal "machine frame", saving the frame pointer and return
3227 // address, without any markers.
3229 Mov(fp, sp);
3230 // sp[1] : lr
3231 // sp[0] : fp
3232 } else {
3233 Register type_reg = temps.AcquireX();
3234 Mov(type_reg, StackFrame::TypeToMarker(type));
3235 Register fourth_reg = padreg;
3236 if (type == StackFrame::CONSTRUCT || type == StackFrame::FAST_CONSTRUCT) {
3237 fourth_reg = cp;
3238 }
3239#if V8_ENABLE_WEBASSEMBLY
3240 if (type == StackFrame::WASM || type == StackFrame::WASM_LIFTOFF_SETUP ||
3241 type == StackFrame::WASM_EXIT) {
3242 fourth_reg = kWasmImplicitArgRegister;
3243 }
3244#endif // V8_ENABLE_WEBASSEMBLY
3245 Push<MacroAssembler::kSignLR>(lr, fp, type_reg, fourth_reg);
3246 static constexpr int kSPToFPDelta = 2 * kSystemPointerSize;
3247 Add(fp, sp, kSPToFPDelta);
3248 // sp[3] : lr
3249 // sp[2] : fp
3250 // sp[1] : type
3251 // sp[0] : cp | wasm instance | for alignment
3252 }
3253}
3254
3256 ASM_CODE_COMMENT(this);
3257 // Drop the execution stack down to the frame pointer and restore
3258 // the caller frame pointer and return address.
3259 Mov(sp, fp);
3261}
3262
3263void MacroAssembler::EnterExitFrame(const Register& scratch, int extra_space,
3264 StackFrame::Type frame_type) {
3265 ASM_CODE_COMMENT(this);
3266 DCHECK(frame_type == StackFrame::EXIT ||
3267 frame_type == StackFrame::BUILTIN_EXIT ||
3268 frame_type == StackFrame::API_ACCESSOR_EXIT ||
3269 frame_type == StackFrame::API_CALLBACK_EXIT);
3270
3271 // Set up the new stack frame.
3273 Mov(fp, sp);
3274 Mov(scratch, StackFrame::TypeToMarker(frame_type));
3275 Push(scratch, xzr);
3276 // fp[8]: CallerPC (lr)
3277 // fp -> fp[0]: CallerFP (old fp)
3278 // fp[-8]: STUB marker
3279 // sp -> fp[-16]: Space reserved for SPOffset.
3280 static_assert((2 * kSystemPointerSize) ==
3282 static_assert((1 * kSystemPointerSize) ==
3284 static_assert((0 * kSystemPointerSize) ==
3286 static_assert((-2 * kSystemPointerSize) == ExitFrameConstants::kSPOffset);
3287
3288 // Save the frame pointer and context pointer in the top frame.
3289 Mov(scratch,
3290 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
3291 Str(fp, MemOperand(scratch));
3292 Mov(scratch,
3293 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
3294 Str(cp, MemOperand(scratch));
3295
3296 static_assert((-2 * kSystemPointerSize) ==
3298
3299 // Round the number of space we need to claim to a multiple of two.
3300 int slots_to_claim = RoundUp(extra_space + 1, 2);
3301
3302 // Reserve space for the return address and for user requested memory.
3303 // We do this before aligning to make sure that we end up correctly
3304 // aligned with the minimum of wasted space.
3305 Claim(slots_to_claim, kXRegSize);
3306 // fp[8]: CallerPC (lr)
3307 // fp -> fp[0]: CallerFP (old fp)
3308 // fp[-8]: STUB marker
3309 // fp[-16]: Space reserved for SPOffset.
3310 // sp[8]: Extra space reserved for caller (if extra_space != 0).
3311 // sp -> sp[0]: Space reserved for the return address.
3312
3313 // ExitFrame::GetStateForFramePointer expects to find the return address at
3314 // the memory address immediately below the pointer stored in SPOffset.
3315 // It is not safe to derive much else from SPOffset, because the size of the
3316 // padding can vary.
3317 Add(scratch, sp, kXRegSize);
3318 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
3319}
3320
3321// Leave the current exit frame.
3322void MacroAssembler::LeaveExitFrame(const Register& scratch,
3323 const Register& scratch2) {
3324 ASM_CODE_COMMENT(this);
3325
3326 // Restore the context pointer from the top frame.
3327 Mov(scratch,
3328 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
3329 Ldr(cp, MemOperand(scratch));
3330
3331 if (v8_flags.debug_code) {
3332 // Also emit debug code to clear the cp in the top frame.
3333 Mov(scratch2, Operand(Context::kInvalidContext));
3334 Mov(scratch, ExternalReference::Create(IsolateAddressId::kContextAddress,
3335 isolate()));
3336 Str(scratch2, MemOperand(scratch));
3337 }
3338 // Clear the frame pointer from the top frame.
3339 Mov(scratch,
3340 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
3341 Str(xzr, MemOperand(scratch));
3342
3343 // Pop the exit frame.
3344 // fp[8]: CallerPC (lr)
3345 // fp -> fp[0]: CallerFP (old fp)
3346 // fp[...]: The rest of the frame.
3347 Mov(sp, fp);
3349}
3350
3351void MacroAssembler::LoadGlobalProxy(Register dst) {
3352 ASM_CODE_COMMENT(this);
3353 LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
3354}
3355
3356void MacroAssembler::LoadWeakValue(Register out, Register in,
3357 Label* target_if_cleared) {
3358 ASM_CODE_COMMENT(this);
3360 target_if_cleared);
3361
3362 and_(out, in, Operand(~kWeakHeapObjectMask));
3363}
3364
3365void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
3366 Register scratch1,
3367 Register scratch2) {
3368 ASM_CODE_COMMENT(this);
3369 DCHECK_NE(value, 0);
3370 if (v8_flags.native_code_counters && counter->Enabled()) {
3371 // This operation has to be exactly 32-bit wide in case the external
3372 // reference table redirects the counter to a uint32_t dummy_stats_counter_
3373 // field.
3374 Mov(scratch2, ExternalReference::Create(counter));
3375 Ldr(scratch1.W(), MemOperand(scratch2));
3376 Add(scratch1.W(), scratch1.W(), value);
3377 Str(scratch1.W(), MemOperand(scratch2));
3378 }
3379}
3380
3381void MacroAssembler::JumpIfObjectType(Register object, Register map,
3382 Register type_reg, InstanceType type,
3383 Label* if_cond_pass, Condition cond) {
3384 ASM_CODE_COMMENT(this);
3385 CompareObjectType(object, map, type_reg, type);
3386 B(cond, if_cond_pass);
3387}
3388
3389void MacroAssembler::JumpIfJSAnyIsNotPrimitive(Register heap_object,
3390 Register scratch, Label* target,
3391 Label::Distance distance,
3392 Condition cc) {
3393 CHECK(cc == Condition::kUnsignedLessThan ||
3394 cc == Condition::kUnsignedGreaterThanEqual);
3396#ifdef DEBUG
3397 Label ok;
3398 LoadMap(scratch, heap_object);
3399 CompareInstanceTypeRange(scratch, scratch, FIRST_JS_RECEIVER_TYPE,
3400 LAST_JS_RECEIVER_TYPE);
3401 B(Condition::kUnsignedLessThanEqual, &ok);
3402 LoadMap(scratch, heap_object);
3403 CompareInstanceTypeRange(scratch, scratch, FIRST_PRIMITIVE_HEAP_OBJECT_TYPE,
3404 LAST_PRIMITIVE_HEAP_OBJECT_TYPE);
3405 B(Condition::kUnsignedLessThanEqual, &ok);
3406 Abort(AbortReason::kInvalidReceiver);
3407 bind(&ok);
3408#endif // DEBUG
3409
3410 // All primitive object's maps are allocated at the start of the read only
3411 // heap. Thus JS_RECEIVER's must have maps with larger (compressed)
3412 // addresses.
3413 LoadCompressedMap(scratch, heap_object);
3415 } else {
3416 static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
3417 CompareObjectType(heap_object, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
3418 }
3419 B(cc, target);
3420}
3421
3422#if V8_STATIC_ROOTS_BOOL
3423void MacroAssembler::CompareInstanceTypeWithUniqueCompressedMap(
3424 Register map, Register scratch, InstanceType type) {
3425 std::optional<RootIndex> expected =
3427 CHECK(expected);
3428 Tagged_t expected_ptr = ReadOnlyRootPtr(*expected);
3429 DCHECK_NE(map, scratch);
3430 UseScratchRegisterScope temps(this);
3431 CHECK(IsImmAddSub(expected_ptr) || scratch != Register::no_reg() ||
3432 temps.CanAcquire());
3433 if (!IsImmAddSub(expected_ptr)) {
3434 if (scratch == Register::no_reg()) {
3435 scratch = temps.AcquireX();
3436 DCHECK_NE(map, scratch);
3437 }
3438 Operand imm_operand =
3439 MoveImmediateForShiftedOp(scratch, expected_ptr, kAnyShift);
3440 CmpTagged(map, imm_operand);
3441 } else {
3442 CmpTagged(map, Immediate(expected_ptr));
3443 }
3444}
3445
3446void MacroAssembler::IsObjectTypeFast(Register object,
3447 Register compressed_map_scratch,
3448 InstanceType type) {
3449 ASM_CODE_COMMENT(this);
3451 LoadCompressedMap(compressed_map_scratch, object);
3452 CompareInstanceTypeWithUniqueCompressedMap(compressed_map_scratch,
3453 Register::no_reg(), type);
3454}
3455#endif // V8_STATIC_ROOTS_BOOL
3456
3457// Sets equality condition flags.
3458void MacroAssembler::IsObjectType(Register object, Register scratch1,
3459 Register scratch2, InstanceType type) {
3460 ASM_CODE_COMMENT(this);
3461
3462#if V8_STATIC_ROOTS_BOOL
3464 LoadCompressedMap(scratch1, object);
3465 CompareInstanceTypeWithUniqueCompressedMap(
3466 scratch1, scratch1 != scratch2 ? scratch2 : Register::no_reg(), type);
3467 return;
3468 }
3469#endif // V8_STATIC_ROOTS_BOOL
3470
3471 CompareObjectType(object, scratch1, scratch2, type);
3472}
3473
3474// Sets equality condition flags.
3475void MacroAssembler::IsObjectTypeInRange(Register heap_object, Register scratch,
3476 InstanceType lower_limit,
3477 InstanceType higher_limit) {
3478 DCHECK_LT(lower_limit, higher_limit);
3479#if V8_STATIC_ROOTS_BOOL
3480 if (auto range = InstanceTypeChecker::UniqueMapRangeOfInstanceTypeRange(
3481 lower_limit, higher_limit)) {
3482 LoadCompressedMap(scratch.W(), heap_object);
3483 CompareRange(scratch.W(), scratch.W(), range->first, range->second);
3484 return;
3485 }
3486#endif // V8_STATIC_ROOTS_BOOL
3487 LoadMap(scratch, heap_object);
3488 CompareInstanceTypeRange(scratch, scratch, lower_limit, higher_limit);
3489}
3490
3491// Sets condition flags based on comparison, and returns type in type_reg.
3492void MacroAssembler::CompareObjectType(Register object, Register map,
3493 Register type_reg, InstanceType type) {
3494 ASM_CODE_COMMENT(this);
3495 LoadMap(map, object);
3496 CompareInstanceType(map, type_reg, type);
3497}
3498
3499void MacroAssembler::CompareRange(Register value, Register scratch,
3500 unsigned lower_limit, unsigned higher_limit) {
3501 ASM_CODE_COMMENT(this);
3502 DCHECK_LT(lower_limit, higher_limit);
3503 if (lower_limit != 0) {
3504 Sub(scratch.W(), value.W(), Operand(lower_limit));
3505 Cmp(scratch.W(), Operand(higher_limit - lower_limit));
3506 } else {
3507 Cmp(value.W(), Immediate(higher_limit));
3508 }
3509}
3510
3511void MacroAssembler::JumpIfIsInRange(Register value, Register scratch,
3512 unsigned lower_limit,
3513 unsigned higher_limit,
3514 Label* on_in_range) {
3515 CompareRange(value, scratch, lower_limit, higher_limit);
3516 B(ls, on_in_range);
3517}
3518
3519void MacroAssembler::LoadCompressedMap(Register dst, Register object) {
3520 ASM_CODE_COMMENT(this);
3521 Ldr(dst.W(), FieldMemOperand(object, HeapObject::kMapOffset));
3522}
3523
3524void MacroAssembler::LoadMap(Register dst, Register object) {
3525 ASM_CODE_COMMENT(this);
3527}
3528
3529void MacroAssembler::LoadFeedbackVector(Register dst, Register closure,
3530 Register scratch, Label* fbv_undef) {
3531 Label done;
3532
3533 // Load the feedback vector from the closure.
3534 LoadTaggedField(dst,
3535 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
3536 LoadTaggedField(dst, FieldMemOperand(dst, FeedbackCell::kValueOffset));
3537
3538 // Check if feedback vector is valid.
3540 Ldrh(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3541 Cmp(scratch, FEEDBACK_VECTOR_TYPE);
3542 B(eq, &done);
3543
3544 // Not valid, load undefined.
3545 LoadRoot(dst, RootIndex::kUndefinedValue);
3546 B(fbv_undef);
3547
3548 Bind(&done);
3549}
3550
3551// Sets condition flags based on comparison, and returns type in type_reg.
3552void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
3553 InstanceType type) {
3554 ASM_CODE_COMMENT(this);
3555 Ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3556 Cmp(type_reg, type);
3557}
3558
3559// Sets condition flags based on comparison, and returns type in type_reg.
3560void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
3561 InstanceType lower_limit,
3562 InstanceType higher_limit) {
3563 ASM_CODE_COMMENT(this);
3564 DCHECK_LT(lower_limit, higher_limit);
3565 UseScratchRegisterScope temps(this);
3566 Register scratch = temps.AcquireX();
3567 Ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3568 CompareRange(type_reg, scratch, lower_limit, higher_limit);
3569}
3570
3571void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3572 ASM_CODE_COMMENT(this);
3573 // Load the map's "bit field 2".
3574 Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3575 // Retrieve elements_kind from bit field 2.
3577}
3578
3579void MacroAssembler::CompareTaggedRoot(const Register& obj, RootIndex index) {
3580 ASM_CODE_COMMENT(this);
3582 UseScratchRegisterScope temps(this);
3584 CmpTagged(obj, Immediate(ReadOnlyRootPtr(index)));
3585 return;
3586 }
3587 // Some smi roots contain system pointer size values like stack limits.
3590 Register temp = temps.AcquireX();
3591 DCHECK(!AreAliased(obj, temp));
3592 LoadRoot(temp, index);
3593 CmpTagged(obj, temp);
3594}
3595
3596void MacroAssembler::CompareRoot(const Register& obj, RootIndex index,
3597 ComparisonMode mode) {
3598 ASM_CODE_COMMENT(this);
3599 if (mode == ComparisonMode::kFullPointer ||
3602 // Some smi roots contain system pointer size values like stack limits.
3603 UseScratchRegisterScope temps(this);
3604 Register temp = temps.AcquireX();
3605 DCHECK(!AreAliased(obj, temp));
3606 LoadRoot(temp, index);
3607 Cmp(obj, temp);
3608 return;
3609 }
3610 CompareTaggedRoot(obj, index);
3611}
3612
3613void MacroAssembler::JumpIfRoot(const Register& obj, RootIndex index,
3614 Label* if_equal) {
3615 CompareRoot(obj, index);
3616 B(eq, if_equal);
3617}
3618
3619void MacroAssembler::JumpIfNotRoot(const Register& obj, RootIndex index,
3620 Label* if_not_equal) {
3621 CompareRoot(obj, index);
3622 B(ne, if_not_equal);
3623}
3624
3625void MacroAssembler::JumpIfIsInRange(const Register& value,
3626 unsigned lower_limit,
3627 unsigned higher_limit,
3628 Label* on_in_range) {
3629 ASM_CODE_COMMENT(this);
3630 if (lower_limit != 0) {
3631 UseScratchRegisterScope temps(this);
3632 Register scratch = temps.AcquireW();
3633 Sub(scratch, value, Operand(lower_limit));
3634 CompareAndBranch(scratch, Operand(higher_limit - lower_limit), ls,
3635 on_in_range);
3636 } else {
3637 CompareAndBranch(value, Operand(higher_limit - lower_limit), ls,
3638 on_in_range);
3639 }
3640}
3641
3642void MacroAssembler::LoadTaggedField(const Register& destination,
3643 const MemOperand& field_operand) {
3645 DecompressTagged(destination, field_operand);
3646 } else {
3647 Ldr(destination, field_operand);
3648 }
3649}
3650
3652 const Register& destination, const MemOperand& field_operand) {
3654 Ldr(destination.W(), field_operand);
3655 } else {
3656 Ldr(destination, field_operand);
3657 }
3658}
3659
3661 const MemOperand& field_operand) {
3663 DecompressTaggedSigned(destination, field_operand);
3664 } else {
3665 Ldr(destination, field_operand);
3666 }
3667}
3668
3669void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) {
3670 SmiUntag(dst, src);
3671}
3672
3673void MacroAssembler::StoreTwoTaggedFields(const Register& value,
3674 const MemOperand& dst_field_operand) {
3676 Stp(value.W(), value.W(), dst_field_operand);
3677 } else {
3678 Stp(value, value, dst_field_operand);
3679 }
3680}
3681
3682void MacroAssembler::StoreTaggedField(const Register& value,
3683 const MemOperand& dst_field_operand) {
3685 Str(value.W(), dst_field_operand);
3686 } else {
3687 Str(value, dst_field_operand);
3688 }
3689}
3690
3691void MacroAssembler::AtomicStoreTaggedField(const Register& value,
3692 const Register& dst_base,
3693 const Register& dst_index,
3694 const Register& temp) {
3695 Add(temp, dst_base, dst_index);
3697 Stlr(value.W(), temp);
3698 } else {
3699 Stlr(value, temp);
3700 }
3701}
3702
3704 const MemOperand& field_operand) {
3705 ASM_CODE_COMMENT(this);
3706 Ldr(destination.W(), field_operand);
3707 if (v8_flags.slow_debug_code) {
3708 // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
3710 ((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32);
3711 }
3712}
3713
3715 const MemOperand& field_operand) {
3716 ASM_CODE_COMMENT(this);
3717 Ldr(destination.W(), field_operand);
3719}
3720
3722 const Register& source) {
3723 ASM_CODE_COMMENT(this);
3724 Add(destination, kPtrComprCageBaseRegister, Operand(source, UXTW));
3725}
3726
3728 Tagged_t immediate) {
3729 ASM_CODE_COMMENT(this);
3730 if (IsImmAddSub(immediate)) {
3732 Immediate(immediate, RelocInfo::Mode::NO_INFO));
3733 } else {
3734 // Immediate is larger than 12 bit and therefore can't be encoded directly.
3735 // Use destination as a temporary to not acquire a scratch register.
3737 Operand imm_operand =
3740 }
3741}
3742
3744 const MemOperand& field_operand) {
3745#if V8_ENABLE_SANDBOX
3746 ASM_CODE_COMMENT(this);
3747 UseScratchRegisterScope temps(this);
3748 Register scratch = temps.AcquireX();
3749 Ldr(destination.W(), field_operand);
3750 Ldr(scratch,
3751 MemOperand(kRootRegister, IsolateData::trusted_cage_base_offset()));
3752 Orr(destination, destination, scratch);
3753#else
3754 UNREACHABLE();
3755#endif // V8_ENABLE_SANDBOX
3756}
3757
3759 const Register& base,
3760 const Register& index,
3761 const Register& temp) {
3762 ASM_CODE_COMMENT(this);
3763 Add(temp, base, index);
3764 Ldar(destination.W(), temp);
3765 if (v8_flags.slow_debug_code) {
3766 // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
3768 ((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32);
3769 }
3770}
3771
3773 const Register& base,
3774 const Register& index,
3775 const Register& temp) {
3776 ASM_CODE_COMMENT(this);
3777 Add(temp, base, index);
3778 Ldar(destination.W(), temp);
3780}
3781
3782void MacroAssembler::CheckPageFlag(const Register& object, int mask,
3783 Condition cc, Label* condition_met) {
3784 ASM_CODE_COMMENT(this);
3785 UseScratchRegisterScope temps(this);
3786 Register scratch = temps.AcquireX();
3788 Ldr(scratch, MemOperand(scratch, MemoryChunk::FlagsOffset()));
3789 if (cc == ne) {
3790 TestAndBranchIfAnySet(scratch, mask, condition_met);
3791 } else {
3792 DCHECK_EQ(cc, eq);
3793 TestAndBranchIfAllClear(scratch, mask, condition_met);
3794 }
3795}
3796
3797void MacroAssembler::JumpIfMarking(Label* is_marking,
3798 Label::Distance condition_met_distance) {
3799 UseScratchRegisterScope temps(this);
3800 Register scratch = temps.AcquireX();
3801 Ldrb(scratch,
3802 MemOperand(kRootRegister, IsolateData::is_marking_flag_offset()));
3803 Cbnz(scratch, is_marking);
3804}
3805
3806void MacroAssembler::JumpIfNotMarking(Label* not_marking,
3807 Label::Distance condition_met_distance) {
3808 UseScratchRegisterScope temps(this);
3809 Register scratch = temps.AcquireX();
3810 Ldrb(scratch,
3811 MemOperand(kRootRegister, IsolateData::is_marking_flag_offset()));
3812 Cbz(scratch, not_marking);
3813}
3814
3816 Register object, int offset, Register value, LinkRegisterStatus lr_status,
3817 SaveFPRegsMode save_fp, SmiCheck smi_check, ReadOnlyCheck ro_check,
3818 SlotDescriptor slot) {
3819 ASM_CODE_COMMENT(this);
3820 DCHECK(!AreAliased(object, value));
3821 // First, check if a write barrier is even needed. The tests below
3822 // catch stores of Smis and read-only objects.
3823 Label done;
3824
3825#if V8_STATIC_ROOTS_BOOL
3826 if (ro_check == ReadOnlyCheck::kInline) {
3827 // Quick check for Read-only and small Smi values.
3828 static_assert(StaticReadOnlyRoot::kLastAllocatedRoot < kRegularPageSize);
3830 }
3831#endif // V8_STATIC_ROOTS_BOOL
3832
3833 // Skip the barrier if writing a smi.
3834 if (smi_check == SmiCheck::kInline) {
3835 JumpIfSmi(value, &done);
3836 }
3837
3838 // Although the object register is tagged, the offset is relative to the start
3839 // of the object, so offset must be a multiple of kTaggedSize.
3841
3842 if (v8_flags.slow_debug_code) {
3843 ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
3844 Label ok;
3845 UseScratchRegisterScope temps(this);
3846 Register scratch = temps.AcquireX();
3847 DCHECK(!AreAliased(object, value, scratch));
3848 Add(scratch, object, offset - kHeapObjectTag);
3849 Tst(scratch, kTaggedSize - 1);
3850 B(eq, &ok);
3851 Abort(AbortReason::kUnalignedCellInWriteBarrier);
3852 Bind(&ok);
3853 }
3854
3855 RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status,
3856 save_fp, SmiCheck::kOmit, ReadOnlyCheck::kOmit, slot);
3857
3858 Bind(&done);
3859}
3860
3861void MacroAssembler::DecodeSandboxedPointer(Register value) {
3862 ASM_CODE_COMMENT(this);
3863#ifdef V8_ENABLE_SANDBOX
3865 Operand(value, LSR, kSandboxedPointerShift));
3866#else
3867 UNREACHABLE();
3868#endif
3869}
3870
3872 MemOperand field_operand) {
3873#ifdef V8_ENABLE_SANDBOX
3874 ASM_CODE_COMMENT(this);
3875 Ldr(destination, field_operand);
3877#else
3878 UNREACHABLE();
3879#endif
3880}
3881
3883 MemOperand dst_field_operand) {
3884#ifdef V8_ENABLE_SANDBOX
3885 ASM_CODE_COMMENT(this);
3886 UseScratchRegisterScope temps(this);
3887 Register scratch = temps.AcquireX();
3888 Sub(scratch, value, kPtrComprCageBaseRegister);
3889 Mov(scratch, Operand(scratch, LSL, kSandboxedPointerShift));
3890 Str(scratch, dst_field_operand);
3891#else
3892 UNREACHABLE();
3893#endif
3894}
3895
3897 MemOperand field_operand,
3898 ExternalPointerTagRange tag_range,
3899 Register isolate_root) {
3900 DCHECK(!AreAliased(destination, isolate_root));
3901 ASM_CODE_COMMENT(this);
3902#ifdef V8_ENABLE_SANDBOX
3903 DCHECK(!tag_range.IsEmpty());
3905 UseScratchRegisterScope temps(this);
3906 Register external_table = temps.AcquireX();
3907 if (isolate_root == no_reg) {
3909 isolate_root = kRootRegister;
3910 }
3911 Ldr(external_table,
3912 MemOperand(isolate_root,
3913 IsolateData::external_pointer_table_offset() +
3915 Ldr(destination.W(), field_operand);
3916 Mov(destination, Operand(destination, LSR, kExternalPointerIndexShift));
3917 Ldr(destination, MemOperand(external_table, destination, LSL,
3918 kExternalPointerTableEntrySizeLog2));
3919
3920 // We don't expect to see empty fields here. If this is ever needed, consider
3921 // using an dedicated empty value entry for those tags instead (i.e. an entry
3922 // with the right tag and nullptr payload).
3923 DCHECK(!ExternalPointerCanBeEmpty(tag_range));
3924
3925 // We need another scratch register for the 64-bit tag constant. Instead of
3926 // forcing the `And` to allocate a new temp register (which we may not have),
3927 // reuse the temp register that we used for the external pointer table base.
3928 Register scratch = external_table;
3929 if (tag_range.Size() == 1) {
3930 // The common and simple case: we expect exactly one tag.
3931 static_assert(kExternalPointerShiftedTagMask == 0x7f);
3933 Cmp(scratch, Immediate(tag_range.first));
3934 SbxCheck(eq, AbortReason::kExternalPointerTagMismatch);
3936 } else {
3937 // Not currently supported. Implement once needed.
3939 UNREACHABLE();
3940 }
3941#else
3942 Ldr(destination, field_operand);
3943#endif // V8_ENABLE_SANDBOX
3944}
3945
3947 MemOperand field_operand,
3948 IndirectPointerTag tag) {
3949#ifdef V8_ENABLE_SANDBOX
3950 LoadIndirectPointerField(destination, field_operand, tag);
3951#else
3952 LoadTaggedField(destination, field_operand);
3953#endif
3954}
3955
3957 MemOperand dst_field_operand) {
3958#ifdef V8_ENABLE_SANDBOX
3959 StoreIndirectPointerField(value, dst_field_operand);
3960#else
3961 StoreTaggedField(value, dst_field_operand);
3962#endif
3963}
3964
3966 MemOperand field_operand,
3967 IndirectPointerTag tag) {
3968#ifdef V8_ENABLE_SANDBOX
3969 ASM_CODE_COMMENT(this);
3970 UseScratchRegisterScope temps(this);
3971
3972 Register handle = temps.AcquireX();
3973 Ldr(handle.W(), field_operand);
3974 ResolveIndirectPointerHandle(destination, handle, tag);
3975#else
3976 UNREACHABLE();
3977#endif // V8_ENABLE_SANDBOX
3978}
3979
3981 MemOperand dst_field_operand) {
3982#ifdef V8_ENABLE_SANDBOX
3983 ASM_CODE_COMMENT(this);
3984 UseScratchRegisterScope temps(this);
3985 Register scratch = temps.AcquireX();
3986 Ldr(scratch.W(),
3987 FieldMemOperand(value, ExposedTrustedObject::kSelfIndirectPointerOffset));
3988 Str(scratch.W(), dst_field_operand);
3989#else
3990 UNREACHABLE();
3991#endif // V8_ENABLE_SANDBOX
3992}
3993
3994#ifdef V8_ENABLE_SANDBOX
3995void MacroAssembler::ResolveIndirectPointerHandle(Register destination,
3996 Register handle,
3997 IndirectPointerTag tag) {
3998 // The tag implies which pointer table to use.
3999 if (tag == kUnknownIndirectPointerTag) {
4000 // In this case we have to rely on the handle marking to determine which
4001 // pointer table to use.
4002 Label is_trusted_pointer_handle, done;
4003 constexpr int kCodePointerHandleMarkerBit = 0;
4004 static_assert((1 << kCodePointerHandleMarkerBit) ==
4006 Tbz(handle, kCodePointerHandleMarkerBit, &is_trusted_pointer_handle);
4007 ResolveCodePointerHandle(destination, handle);
4008 B(&done);
4009 Bind(&is_trusted_pointer_handle);
4010 ResolveTrustedPointerHandle(destination, handle,
4012 Bind(&done);
4013 } else if (tag == kCodeIndirectPointerTag) {
4014 ResolveCodePointerHandle(destination, handle);
4015 } else {
4016 ResolveTrustedPointerHandle(destination, handle, tag);
4017 }
4018}
4019
4020void MacroAssembler::ResolveTrustedPointerHandle(Register destination,
4021 Register handle,
4022 IndirectPointerTag tag) {
4023 DCHECK_NE(tag, kCodeIndirectPointerTag);
4025
4026 Register table = destination;
4028 Ldr(table,
4029 MemOperand{kRootRegister, IsolateData::trusted_pointer_table_offset()});
4033 // Untag the pointer and remove the marking bit in one operation.
4034 Register tag_reg = handle;
4035 Mov(tag_reg, Immediate(~(tag | kTrustedPointerTableMarkBit)));
4036 And(destination, destination, tag_reg);
4037}
4038
4039void MacroAssembler::ResolveCodePointerHandle(Register destination,
4040 Register handle) {
4042
4043 Register table = destination;
4044 LoadCodePointerTableBase(table);
4050 // The LSB is used as marking bit by the code pointer table, so here we have
4051 // to set it using a bitwise OR as it may or may not be set.
4053}
4054
4055void MacroAssembler::LoadCodeEntrypointViaCodePointer(Register destination,
4056 MemOperand field_operand,
4057 CodeEntrypointTag tag) {
4059 ASM_CODE_COMMENT(this);
4060 UseScratchRegisterScope temps(this);
4061 Register scratch = temps.AcquireX();
4062 LoadCodePointerTableBase(scratch);
4063 Ldr(destination.W(), field_operand);
4064 // TODO(saelo): can the offset computation be done more efficiently?
4068 if (tag != 0) {
4069 Mov(scratch, Immediate(tag));
4070 Eor(destination, destination, scratch);
4071 }
4072}
4073
4074void MacroAssembler::LoadCodePointerTableBase(Register destination) {
4075#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
4076 if (!options().isolate_independent_code && isolate()) {
4077 // Embed the code pointer table address into the code.
4079 ExternalReference::code_pointer_table_base_address(isolate()));
4080 } else {
4081 // Force indirect load via root register as a workaround for
4082 // isolate-independent code (for example, for Wasm).
4086 destination));
4087 }
4088#else
4089 // Embed the code pointer table address into the code.
4090 Mov(destination, ExternalReference::global_code_pointer_table_base_address());
4091#endif // V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
4092}
4093#endif // V8_ENABLE_SANDBOX
4094
4095#ifdef V8_ENABLE_LEAPTIERING
4096void MacroAssembler::LoadEntrypointFromJSDispatchTable(Register destination,
4097 Register dispatch_handle,
4098 Register scratch) {
4099 DCHECK(!AreAliased(destination, dispatch_handle, scratch));
4100 ASM_CODE_COMMENT(this);
4101
4102 Register index = destination;
4103 Mov(scratch, ExternalReference::js_dispatch_table_address());
4104 Mov(index, Operand(dispatch_handle, LSR, kJSDispatchHandleShift));
4105 Add(scratch, scratch, Operand(index, LSL, kJSDispatchTableEntrySizeLog2));
4106 Ldr(destination, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
4107}
4108
4109void MacroAssembler::LoadEntrypointFromJSDispatchTable(
4110 Register destination, JSDispatchHandle dispatch_handle, Register scratch) {
4111 DCHECK(!AreAliased(destination, scratch));
4112 ASM_CODE_COMMENT(this);
4113
4114 Mov(scratch, ExternalReference::js_dispatch_table_address());
4115 // WARNING: This offset calculation is only safe if we have already stored a
4116 // RelocInfo for the dispatch handle, e.g. in CallJSDispatchEntry, (thus
4117 // keeping the dispatch entry alive) _and_ because the entrypoints are not
4118 // compatible (thus meaning that the offset calculation is not invalidated by
4119 // a compaction).
4120 // TODO(leszeks): Make this less of a footgun.
4121 static_assert(!JSDispatchTable::kSupportsCompaction);
4122 int offset = JSDispatchTable::OffsetOfEntry(dispatch_handle) +
4123 JSDispatchEntry::kEntrypointOffset;
4124 Ldr(destination, MemOperand(scratch, offset));
4125}
4126
4127void MacroAssembler::LoadParameterCountFromJSDispatchTable(
4128 Register destination, Register dispatch_handle, Register scratch) {
4129 DCHECK(!AreAliased(destination, dispatch_handle, scratch));
4130 ASM_CODE_COMMENT(this);
4131
4132 Register index = destination;
4133 Mov(scratch, ExternalReference::js_dispatch_table_address());
4134 Mov(index, Operand(dispatch_handle, LSR, kJSDispatchHandleShift));
4135 Add(scratch, scratch, Operand(index, LSL, kJSDispatchTableEntrySizeLog2));
4136 static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
4137 Ldrh(destination, MemOperand(scratch, JSDispatchEntry::kCodeObjectOffset));
4138}
4139
4140void MacroAssembler::LoadEntrypointAndParameterCountFromJSDispatchTable(
4141 Register entrypoint, Register parameter_count, Register dispatch_handle,
4142 Register scratch) {
4143 DCHECK(!AreAliased(entrypoint, parameter_count, dispatch_handle, scratch));
4144 ASM_CODE_COMMENT(this);
4145
4146 Register index = parameter_count;
4147 Mov(scratch, ExternalReference::js_dispatch_table_address());
4148 Mov(index, Operand(dispatch_handle, LSR, kJSDispatchHandleShift));
4149 Add(scratch, scratch, Operand(index, LSL, kJSDispatchTableEntrySizeLog2));
4150 Ldr(entrypoint, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
4151 static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
4152 Ldrh(parameter_count,
4153 MemOperand(scratch, JSDispatchEntry::kCodeObjectOffset));
4154}
4155#endif
4156
4158 MemOperand field_operand) {
4160#ifdef V8_ENABLE_SANDBOX
4161 DecompressProtected(destination, field_operand);
4162#else
4163 LoadTaggedField(destination, field_operand);
4164#endif
4165}
4166
4168 if (registers.is_empty()) return;
4169 ASM_CODE_COMMENT(this);
4170 CPURegList regs(kXRegSizeInBits, registers);
4171 // If we were saving LR, we might need to sign it.
4172 DCHECK(!regs.IncludesAliasOf(lr));
4173 regs.Align();
4174 PushCPURegList(regs);
4175}
4176
4178 if (registers.is_empty()) return;
4179 ASM_CODE_COMMENT(this);
4180 CPURegList regs(kXRegSizeInBits, registers);
4181 // If we were saving LR, we might need to sign it.
4182 DCHECK(!regs.IncludesAliasOf(lr));
4183 regs.Align();
4184 PopCPURegList(regs);
4185}
4186
4187void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
4188 SaveFPRegsMode fp_mode) {
4189 ASM_CODE_COMMENT(this);
4192
4195 offset);
4196
4199}
4200
4201void MacroAssembler::CallIndirectPointerBarrier(Register object, Operand offset,
4202 SaveFPRegsMode fp_mode,
4203 IndirectPointerTag tag) {
4204 ASM_CODE_COMMENT(this);
4208
4212 offset);
4214 Operand(tag));
4215
4218}
4219
4221 Operand offset,
4222 SaveFPRegsMode fp_mode,
4223 StubCallMode mode) {
4224 ASM_CODE_COMMENT(this);
4227
4229 Register slot_address_parameter =
4231 MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
4232
4233 CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode);
4234
4236}
4237
4238void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
4239 SaveFPRegsMode fp_mode,
4240 StubCallMode mode) {
4241 ASM_CODE_COMMENT(this);
4244#if V8_ENABLE_WEBASSEMBLY
4245 if (mode == StubCallMode::kCallWasmRuntimeStub) {
4246 auto wasm_target =
4247 static_cast<Address>(wasm::WasmCode::GetRecordWriteBuiltin(fp_mode));
4248 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
4249#else
4250 if (false) {
4251#endif
4252 } else {
4254 }
4255}
4256
4257void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
4258 Register object, Operand offset) {
4259 ASM_CODE_COMMENT(this);
4260 DCHECK_NE(dst_object, dst_slot);
4261 // If `offset` is a register, it cannot overlap with `object`.
4262 DCHECK_IMPLIES(!offset.IsImmediate(), offset.reg() != object);
4263
4264 // If the slot register does not overlap with the object register, we can
4265 // overwrite it.
4266 if (dst_slot != object) {
4267 Add(dst_slot, object, offset);
4268 Mov(dst_object, object);
4269 return;
4270 }
4271
4272 DCHECK_EQ(dst_slot, object);
4273
4274 // If the destination object register does not overlap with the offset
4275 // register, we can overwrite it.
4276 if (offset.IsImmediate() || (offset.reg() != dst_object)) {
4277 Mov(dst_object, dst_slot);
4278 Add(dst_slot, dst_slot, offset);
4279 return;
4280 }
4281
4282 DCHECK_EQ(dst_object, offset.reg());
4283
4284 // We only have `dst_slot` and `dst_object` left as distinct registers so we
4285 // have to swap them. We write this as a add+sub sequence to avoid using a
4286 // scratch register.
4287 Add(dst_slot, dst_slot, dst_object);
4288 Sub(dst_object, dst_slot, dst_object);
4289}
4290
4291// If lr_status is kLRHasBeenSaved, lr will be clobbered.
4292//
4293// The register 'object' contains a heap object pointer. The heap object tag is
4294// shifted away.
4295void MacroAssembler::RecordWrite(Register object, Operand offset,
4296 Register value, LinkRegisterStatus lr_status,
4297 SaveFPRegsMode fp_mode, SmiCheck smi_check,
4298 ReadOnlyCheck ro_check, SlotDescriptor slot) {
4299 ASM_CODE_COMMENT(this);
4300 ASM_LOCATION_IN_ASSEMBLER("MacroAssembler::RecordWrite");
4301 DCHECK(!AreAliased(object, value));
4302
4303 if (v8_flags.slow_debug_code) {
4304 ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
4305 UseScratchRegisterScope temps(this);
4306 Register temp = temps.AcquireX();
4307 DCHECK(!AreAliased(object, value, temp));
4308 Add(temp, object, offset);
4309 if (slot.contains_indirect_pointer()) {
4311 slot.indirect_pointer_tag());
4312 } else {
4313 DCHECK(slot.contains_direct_pointer());
4314 LoadTaggedField(temp, MemOperand(temp));
4315 }
4316 Cmp(temp, value);
4317 Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
4318 }
4319
4320 if (v8_flags.disable_write_barriers) {
4321 return;
4322 }
4323
4324 // First, check if a write barrier is even needed. The tests below
4325 // catch stores of smisand read-only objects, as well as stores into the
4326 // young generation.
4327 Label done;
4328
4329#if V8_STATIC_ROOTS_BOOL
4330 if (ro_check == ReadOnlyCheck::kInline) {
4331 // Quick check for Read-only and small Smi values.
4332 static_assert(StaticReadOnlyRoot::kLastAllocatedRoot < kRegularPageSize);
4334 }
4335#endif // V8_STATIC_ROOTS_BOOL
4336
4337 if (smi_check == SmiCheck::kInline) {
4338 DCHECK_EQ(0, kSmiTag);
4339 JumpIfSmi(value, &done);
4340 }
4341
4342 if (slot.contains_indirect_pointer()) {
4343 // The indirect pointer write barrier is only enabled during marking.
4344 JumpIfNotMarking(&done);
4345 } else {
4347 &done);
4348
4350 &done);
4351 }
4352
4353 // Record the actual write.
4354 if (lr_status == kLRHasNotBeenSaved) {
4356 }
4358 DCHECK(!AreAliased(object, slot_address, value));
4359 if (slot.contains_direct_pointer()) {
4360 // TODO(cbruni): Turn offset into int.
4361 DCHECK(offset.IsImmediate());
4362 Add(slot_address, object, offset);
4363 CallRecordWriteStub(object, slot_address, fp_mode,
4365 } else {
4366 DCHECK(slot.contains_indirect_pointer());
4367 CallIndirectPointerBarrier(object, offset, fp_mode,
4368 slot.indirect_pointer_tag());
4369 }
4370 if (lr_status == kLRHasNotBeenSaved) {
4372 }
4373 if (v8_flags.slow_debug_code) Mov(slot_address, Operand(kZapValue));
4374
4375 Bind(&done);
4376}
4377
4378void MacroAssembler::Check(Condition cond, AbortReason reason) {
4379 Label ok;
4380 B(cond, &ok);
4381 Abort(reason);
4382 // Will not return here.
4383 Bind(&ok);
4384}
4385
4387 Check(cc, reason);
4388}
4389
4390void MacroAssembler::Trap() { Brk(0); }
4391void MacroAssembler::DebugBreak() { Debug("DebugBreak", 0, BREAK); }
4392
4394 ASM_CODE_COMMENT(this);
4395 if (v8_flags.code_comments) {
4396 RecordComment("Abort message:", SourceLocation{});
4397 RecordComment(GetAbortReason(reason), SourceLocation{});
4398 }
4399
4400 // Without debug code, save the code size and just trap.
4401 if (!v8_flags.debug_code || v8_flags.trap_on_abort) {
4402 Brk(0);
4403 return;
4404 }
4405
4406 // We need some scratch registers for the MacroAssembler, so make sure we have
4407 // some. This is safe here because Abort never returns.
4408 uint64_t old_tmp_list = TmpList()->bits();
4410
4411 if (should_abort_hard()) {
4412 // We don't care if we constructed a frame. Just pretend we did.
4413 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
4414 Mov(w0, static_cast<int>(reason));
4415 Call(ExternalReference::abort_with_reason());
4416 return;
4417 }
4418
4419 // Avoid infinite recursion; Push contains some assertions that use Abort.
4420 HardAbortScope hard_aborts(this);
4421
4422 Mov(x1, Smi::FromInt(static_cast<int>(reason)));
4423
4424 {
4425 // We don't actually want to generate a pile of code for this, so just
4426 // claim there is a stack frame, without generating one.
4427 FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
4428 if (root_array_available()) {
4429 // Generate an indirect call via builtins entry table here in order to
4430 // ensure that the interpreter_entry_return_pc_offset is the same for
4431 // InterpreterEntryTrampoline and InterpreterEntryTrampolineForProfiling
4432 // when v8_flags.debug_code is enabled.
4433 UseScratchRegisterScope temps(this);
4434 Register scratch = temps.AcquireX();
4435 LoadEntryFromBuiltin(Builtin::kAbort, scratch);
4436 Call(scratch);
4437 } else {
4438 CallBuiltin(Builtin::kAbort);
4439 }
4440 }
4441
4442 TmpList()->set_bits(old_tmp_list);
4443}
4444
4445void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
4446 LoadMap(dst, cp);
4448 dst, FieldMemOperand(
4449 dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
4451}
4452
4453void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
4454 CodeKind min_opt_level,
4455 Register feedback_vector,
4456 FeedbackSlot slot,
4457 Label* on_result,
4459 Label fallthrough, clear_slot;
4461 scratch_and_result,
4462 FieldMemOperand(feedback_vector,
4463 FeedbackVector::OffsetOfElementAt(slot.ToInt())));
4464 LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
4465
4466 // Is it marked_for_deoptimization? If yes, clear the slot.
4467 {
4468 UseScratchRegisterScope temps(this);
4469
4470 // The entry references a CodeWrapper object. Unwrap it now.
4472 scratch_and_result,
4473 FieldMemOperand(scratch_and_result, CodeWrapper::kCodeOffset));
4474
4475 Register temp = temps.AcquireX();
4476 JumpIfCodeIsMarkedForDeoptimization(scratch_and_result, temp, &clear_slot);
4477 if (min_opt_level == CodeKind::TURBOFAN_JS) {
4478 JumpIfCodeIsTurbofanned(scratch_and_result, temp, on_result);
4479 B(&fallthrough);
4480 } else {
4481 B(on_result);
4482 }
4483 }
4484
4485 bind(&clear_slot);
4486 Mov(scratch_and_result, ClearedValue());
4488 scratch_and_result,
4489 FieldMemOperand(feedback_vector,
4490 FeedbackVector::OffsetOfElementAt(slot.ToInt())));
4491
4492 bind(&fallthrough);
4493 Mov(scratch_and_result, 0);
4494}
4495
4496// This is the main Printf implementation. All other Printf variants call
4497// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
4498void MacroAssembler::PrintfNoPreserve(const char* format,
4499 const CPURegister& arg0,
4500 const CPURegister& arg1,
4501 const CPURegister& arg2,
4502 const CPURegister& arg3) {
4503 ASM_CODE_COMMENT(this);
4504 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4505 // in most cases anyway, so this restriction shouldn't be too serious.
4506 DCHECK(!kCallerSaved.IncludesAliasOf(sp));
4507
4508 // The provided arguments, and their proper procedure-call standard registers.
4509 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
4510 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
4511
4512 int arg_count = kPrintfMaxArgCount;
4513
4514 // The PCS varargs registers for printf. Note that x0 is used for the printf
4515 // format string.
4516 static const CPURegList kPCSVarargs =
4517 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
4518 static const CPURegList kPCSVarargsFP =
4519 CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, arg_count - 1);
4520
4521 // We can use caller-saved registers as scratch values, except for the
4522 // arguments and the PCS registers where they might need to go.
4523 CPURegList tmp_list = kCallerSaved;
4524 tmp_list.Remove(x0); // Used to pass the format string.
4525 tmp_list.Remove(kPCSVarargs);
4526 tmp_list.Remove(arg0, arg1, arg2, arg3);
4527
4528 CPURegList fp_tmp_list = kCallerSavedV;
4529 fp_tmp_list.Remove(kPCSVarargsFP);
4530 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4531
4532 // Override the MacroAssembler's scratch register list. The lists will be
4533 // reset automatically at the end of the UseScratchRegisterScope.
4534 UseScratchRegisterScope temps(this);
4535 TmpList()->set_bits(tmp_list.bits());
4536 FPTmpList()->set_bits(fp_tmp_list.bits());
4537
4538 // Copies of the printf vararg registers that we can pop from.
4539 CPURegList pcs_varargs = kPCSVarargs;
4540#ifndef V8_OS_WIN
4541 CPURegList pcs_varargs_fp = kPCSVarargsFP;
4542#endif
4543
4544 // Place the arguments. There are lots of clever tricks and optimizations we
4545 // could use here, but Printf is a debug tool so instead we just try to keep
4546 // it simple: Move each input that isn't already in the right place to a
4547 // scratch register, then move everything back.
4548 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
4549 // Work out the proper PCS register for this argument.
4550 if (args[i].IsRegister()) {
4551 pcs[i] = pcs_varargs.PopLowestIndex().X();
4552 // We might only need a W register here. We need to know the size of the
4553 // argument so we can properly encode it for the simulator call.
4554 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
4555 } else if (args[i].IsVRegister()) {
4556 // In C, floats are always cast to doubles for varargs calls.
4557#ifdef V8_OS_WIN
4558 // In case of variadic functions SIMD and Floating-point registers
4559 // aren't used. The general x0-x7 should be used instead.
4560 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
4561 pcs[i] = pcs_varargs.PopLowestIndex().X();
4562#else
4563 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
4564#endif
4565 } else {
4566 DCHECK(args[i].IsNone());
4567 arg_count = i;
4568 break;
4569 }
4570
4571 // If the argument is already in the right place, leave it where it is.
4572 if (args[i].Aliases(pcs[i])) continue;
4573
4574 // Otherwise, if the argument is in a PCS argument register, allocate an
4575 // appropriate scratch register and then move it out of the way.
4576 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
4577 kPCSVarargsFP.IncludesAliasOf(args[i])) {
4578 if (args[i].IsRegister()) {
4579 Register old_arg = args[i].Reg();
4580 Register new_arg = temps.AcquireSameSizeAs(old_arg);
4581 Mov(new_arg, old_arg);
4582 args[i] = new_arg;
4583 } else {
4584 VRegister old_arg = args[i].VReg();
4585 VRegister new_arg = temps.AcquireSameSizeAs(old_arg);
4586 Fmov(new_arg, old_arg);
4587 args[i] = new_arg;
4588 }
4589 }
4590 }
4591
4592 // Do a second pass to move values into their final positions and perform any
4593 // conversions that may be required.
4594 for (int i = 0; i < arg_count; i++) {
4595#ifdef V8_OS_WIN
4596 if (args[i].IsVRegister()) {
4597 if (pcs[i].SizeInBytes() != args[i].SizeInBytes()) {
4598 // If the argument is half- or single-precision
4599 // converts to double-precision before that is
4600 // moved into the one of X scratch register.
4601 VRegister temp0 = temps.AcquireD();
4602 Fcvt(temp0.VReg(), args[i].VReg());
4603 Fmov(pcs[i].Reg(), temp0);
4604 } else {
4605 Fmov(pcs[i].Reg(), args[i].VReg());
4606 }
4607 } else {
4608 Mov(pcs[i].Reg(), args[i].Reg(), kDiscardForSameWReg);
4609 }
4610#else
4611 DCHECK(pcs[i].type() == args[i].type());
4612 if (pcs[i].IsRegister()) {
4613 Mov(pcs[i].Reg(), args[i].Reg(), kDiscardForSameWReg);
4614 } else {
4615 DCHECK(pcs[i].IsVRegister());
4616 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
4617 Fmov(pcs[i].VReg(), args[i].VReg());
4618 } else {
4619 Fcvt(pcs[i].VReg(), args[i].VReg());
4620 }
4621 }
4622#endif
4623 }
4624
4625 // Load the format string into x0, as per the procedure-call standard.
4626 //
4627 // To make the code as portable as possible, the format string is encoded
4628 // directly in the instruction stream. It might be cleaner to encode it in a
4629 // literal pool, but since Printf is usually used for debugging, it is
4630 // beneficial for it to be minimally dependent on other features.
4631 Label format_address;
4632 Adr(x0, &format_address);
4633
4634 // Emit the format string directly in the instruction stream.
4635 {
4636 BlockPoolsScope scope(this);
4637 Label after_data;
4638 B(&after_data);
4639 Bind(&format_address);
4640 EmitStringData(format);
4641 Unreachable();
4642 Bind(&after_data);
4643 }
4644
4645 CallPrintf(arg_count, pcs);
4646}
4647
4648void MacroAssembler::CallPrintf(int arg_count, const CPURegister* args) {
4649 ASM_CODE_COMMENT(this);
4650 // A call to printf needs special handling for the simulator, since the system
4651 // printf function will use a different instruction set and the procedure-call
4652 // standard will not be compatible.
4653 if (options().enable_simulator_code) {
4654 InstructionAccurateScope scope(this, kPrintfLength / kInstrSize);
4656 dc32(arg_count); // kPrintfArgCountOffset
4657
4658 // Determine the argument pattern.
4659 uint32_t arg_pattern_list = 0;
4660 for (int i = 0; i < arg_count; i++) {
4661 uint32_t arg_pattern;
4662 if (args[i].IsRegister()) {
4663 arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
4664 } else {
4665 DCHECK(args[i].Is64Bits());
4666 arg_pattern = kPrintfArgD;
4667 }
4668 DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
4669 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
4670 }
4671 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
4672 return;
4673 }
4674
4675 Call(ExternalReference::printf_function());
4676}
4677
4678void MacroAssembler::Printf(const char* format, CPURegister arg0,
4679 CPURegister arg1, CPURegister arg2,
4680 CPURegister arg3) {
4681 ASM_CODE_COMMENT(this);
4682 // Printf is expected to preserve all registers, so make sure that none are
4683 // available as scratch registers until we've preserved them.
4684 uint64_t old_tmp_list = TmpList()->bits();
4685 uint64_t old_fp_tmp_list = FPTmpList()->bits();
4686 TmpList()->set_bits(0);
4687 FPTmpList()->set_bits(0);
4688
4689 CPURegList saved_registers = kCallerSaved;
4690 saved_registers.Align();
4691
4692 // Preserve all caller-saved registers as well as NZCV.
4693 // PushCPURegList asserts that the size of each list is a multiple of 16
4694 // bytes.
4695 PushCPURegList(saved_registers);
4697
4698 // We can use caller-saved registers as scratch values (except for argN).
4699 CPURegList tmp_list = saved_registers;
4700 CPURegList fp_tmp_list = kCallerSavedV;
4701 tmp_list.Remove(arg0, arg1, arg2, arg3);
4702 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4703 TmpList()->set_bits(tmp_list.bits());
4704 FPTmpList()->set_bits(fp_tmp_list.bits());
4705
4706 {
4707 UseScratchRegisterScope temps(this);
4708 // If any of the arguments are the current stack pointer, allocate a new
4709 // register for them, and adjust the value to compensate for pushing the
4710 // caller-saved registers.
4711 bool arg0_sp = arg0.is_valid() && sp.Aliases(arg0);
4712 bool arg1_sp = arg1.is_valid() && sp.Aliases(arg1);
4713 bool arg2_sp = arg2.is_valid() && sp.Aliases(arg2);
4714 bool arg3_sp = arg3.is_valid() && sp.Aliases(arg3);
4715 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
4716 // Allocate a register to hold the original stack pointer value, to pass
4717 // to PrintfNoPreserve as an argument.
4718 Register arg_sp = temps.AcquireX();
4719 Add(arg_sp, sp,
4720 saved_registers.TotalSizeInBytes() +
4721 kCallerSavedV.TotalSizeInBytes());
4722 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
4723 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
4724 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
4725 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
4726 }
4727
4728 // Preserve NZCV.
4729 {
4730 UseScratchRegisterScope temps(this);
4731 Register tmp = temps.AcquireX();
4732 Mrs(tmp, NZCV);
4733 Push(tmp, xzr);
4734 }
4735
4736 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
4737
4738 // Restore NZCV.
4739 {
4740 UseScratchRegisterScope temps(this);
4741 Register tmp = temps.AcquireX();
4742 Pop(xzr, tmp);
4743 Msr(NZCV, tmp);
4744 }
4745 }
4746
4748 PopCPURegList(saved_registers);
4749
4750 TmpList()->set_bits(old_tmp_list);
4751 FPTmpList()->set_bits(old_fp_tmp_list);
4752}
4753
4754void MacroAssembler::ComputeCodeStartAddress(const Register& rd) {
4755 // We can use adr to load a pc relative location.
4756 adr(rd, -pc_offset());
4757}
4758
4762 "Offsets must be consecutive for ldp!");
4763#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
4764 // Make sure we can use x16 and x17.
4765 UseScratchRegisterScope temps(this);
4766 temps.Exclude(x16, x17);
4767 // We can load the return address directly into x17.
4770 Autib1716();
4771 Mov(lr, x17);
4772#else
4774#endif
4775}
4776
4777#if V8_ENABLE_WEBASSEMBLY
4778void MacroAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
4779 UseScratchRegisterScope temps(this);
4780 temps.Exclude(x16, x17);
4781 Adr(x17, return_location);
4782#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
4783 Add(x16, fp, WasmExitFrameConstants::kCallingPCOffset + kSystemPointerSize);
4784 Pacib1716();
4785#endif
4786 Str(x17, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
4787}
4788#endif // V8_ENABLE_WEBASSEMBLY
4789
4790void MacroAssembler::PopcntHelper(Register dst, Register src) {
4791 UseScratchRegisterScope temps(this);
4792 VRegister scratch = temps.AcquireV(kFormat8B);
4793 VRegister tmp = src.Is32Bits() ? scratch.S() : scratch.D();
4794 Fmov(tmp, src);
4795 Cnt(scratch, scratch);
4796 Addv(scratch.B(), scratch);
4797 Fmov(dst, tmp);
4798}
4799
4800void MacroAssembler::I8x16BitMask(Register dst, VRegister src, VRegister temp) {
4801 ASM_CODE_COMMENT(this);
4802 UseScratchRegisterScope temps(this);
4803 VRegister tmp = temps.AcquireQ();
4804 VRegister mask = temps.AcquireQ();
4805
4806 if (CpuFeatures::IsSupported(PMULL1Q) && temp.is_valid()) {
4807 CpuFeatureScope scope(this, PMULL1Q);
4808
4809 Movi(mask.V2D(), 0x0102'0408'1020'4080);
4810 // Normalize the input - at most 1 bit per vector element should be set.
4811 Ushr(tmp.V16B(), src.V16B(), 7);
4812 // Collect the input bits into a byte of the output - once for each
4813 // half of the input.
4814 Pmull2(temp.V1Q(), mask.V2D(), tmp.V2D());
4815 Pmull(tmp.V1Q(), mask.V1D(), tmp.V1D());
4816 // Combine the bits from both input halves.
4817 Trn2(tmp.V8B(), tmp.V8B(), temp.V8B());
4818 Mov(dst.W(), tmp.V8H(), 3);
4819 } else {
4820 // Set i-th bit of each lane i. When AND with tmp, the lanes that
4821 // are signed will have i-th bit set, unsigned will be 0.
4822 Sshr(tmp.V16B(), src.V16B(), 7);
4823 Movi(mask.V2D(), 0x8040'2010'0804'0201);
4824 And(tmp.V16B(), mask.V16B(), tmp.V16B());
4825 Ext(mask.V16B(), tmp.V16B(), tmp.V16B(), 8);
4826 Zip1(tmp.V16B(), tmp.V16B(), mask.V16B());
4827 Addv(tmp.H(), tmp.V8H());
4828 Mov(dst.W(), tmp.V8H(), 0);
4829 }
4830}
4831
4832void MacroAssembler::I16x8BitMask(Register dst, VRegister src) {
4833 ASM_CODE_COMMENT(this);
4834 UseScratchRegisterScope temps(this);
4835 VRegister tmp = temps.AcquireQ();
4836 VRegister mask = temps.AcquireQ();
4837
4838 if (CpuFeatures::IsSupported(PMULL1Q)) {
4839 CpuFeatureScope scope(this, PMULL1Q);
4840
4841 // Normalize the input - at most 1 bit per vector element should be set.
4842 Ushr(tmp.V8H(), src.V8H(), 15);
4843 Movi(mask.V1D(), 0x0102'0408'1020'4080);
4844 // Trim some of the redundant 0 bits, so that we can operate on
4845 // only 64 bits.
4846 Xtn(tmp.V8B(), tmp.V8H());
4847 // Collect the input bits into a byte of the output.
4848 Pmull(tmp.V1Q(), tmp.V1D(), mask.V1D());
4849 Mov(dst.W(), tmp.V16B(), 7);
4850 } else {
4851 Sshr(tmp.V8H(), src.V8H(), 15);
4852 // Set i-th bit of each lane i. When AND with tmp, the lanes that
4853 // are signed will have i-th bit set, unsigned will be 0.
4854 Movi(mask.V2D(), 0x0080'0040'0020'0010, 0x0008'0004'0002'0001);
4855 And(tmp.V16B(), mask.V16B(), tmp.V16B());
4856 Addv(tmp.H(), tmp.V8H());
4857 Mov(dst.W(), tmp.V8H(), 0);
4858 }
4859}
4860
4861void MacroAssembler::I32x4BitMask(Register dst, VRegister src) {
4862 ASM_CODE_COMMENT(this);
4863 UseScratchRegisterScope temps(this);
4864 Register tmp = temps.AcquireX();
4865 Mov(dst.X(), src.D(), 1);
4866 Fmov(tmp.X(), src.D());
4867 And(dst.X(), dst.X(), 0x80000000'80000000);
4868 And(tmp.X(), tmp.X(), 0x80000000'80000000);
4869 Orr(dst.X(), dst.X(), Operand(dst.X(), LSL, 31));
4870 Orr(tmp.X(), tmp.X(), Operand(tmp.X(), LSL, 31));
4871 Lsr(dst.X(), dst.X(), 60);
4872 Bfxil(dst.X(), tmp.X(), 62, 2);
4873}
4874
4875void MacroAssembler::I64x2BitMask(Register dst, VRegister src) {
4876 ASM_CODE_COMMENT(this);
4877 UseScratchRegisterScope scope(this);
4878 Register tmp = scope.AcquireX();
4879 Mov(dst.X(), src.D(), 1);
4880 Fmov(tmp.X(), src.D());
4881 Lsr(dst.X(), dst.X(), 62);
4882 Bfxil(dst.X(), tmp.X(), 63, 1);
4883}
4884
4885void MacroAssembler::I64x2AllTrue(Register dst, VRegister src) {
4886 ASM_CODE_COMMENT(this);
4887 UseScratchRegisterScope scope(this);
4888 VRegister tmp = scope.AcquireV(kFormat2D);
4889 Cmeq(tmp.V2D(), src.V2D(), 0);
4890 Addp(tmp.D(), tmp);
4891 Fcmp(tmp.D(), tmp.D());
4892 Cset(dst, eq);
4893}
4894
4895// Calls an API function. Allocates HandleScope, extracts returned value
4896// from handle and propagates exceptions. Clobbers C argument registers
4897// and C caller-saved registers. Restores context. On return removes
4898// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
4899// (GCed, includes the call JS arguments space and the additional space
4900// allocated for the fast call).
4901void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
4902 Register function_address,
4903 ExternalReference thunk_ref, Register thunk_arg,
4904 int slots_to_drop_on_return,
4905 MemOperand* argc_operand,
4906 MemOperand return_value_operand) {
4907 ASM_CODE_COMMENT(masm);
4908 ASM_LOCATION("CallApiFunctionAndReturn");
4909
4910 using ER = ExternalReference;
4911
4912 Isolate* isolate = masm->isolate();
4914 ER::handle_scope_next_address(isolate), no_reg);
4916 ER::handle_scope_limit_address(isolate), no_reg);
4918 ER::handle_scope_level_address(isolate), no_reg);
4919
4920 Register return_value = x0;
4921 Register scratch = x4;
4922 Register scratch2 = x5;
4923
4924 // Allocate HandleScope in callee-saved registers.
4925 // We will need to restore the HandleScope after the call to the API function,
4926 // by allocating it in callee-saved registers it'll be preserved by C code.
4927 Register prev_next_address_reg = x19;
4928 Register prev_limit_reg = x20;
4929 Register prev_level_reg = w21;
4930
4931 // C arguments (kCArgRegs[0/1]) are expected to be initialized outside, so
4932 // this function must not corrupt them (return_value overlaps with
4933 // kCArgRegs[0] but that's ok because we start using it only after the C
4934 // call).
4935 DCHECK(!AreAliased(kCArgRegs[0], kCArgRegs[1], // C args
4936 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
4937 // function_address and thunk_arg might overlap but this function must not
4938 // corrupted them until the call is made (i.e. overlap with return_value is
4939 // fine).
4940 DCHECK(!AreAliased(function_address, // incoming parameters
4941 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
4942 DCHECK(!AreAliased(thunk_arg, // incoming parameters
4943 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
4944
4945 // Explicitly include x16/x17 to let StoreReturnAddressAndCall() use them.
4946 UseScratchRegisterScope fix_temps(masm);
4947 fix_temps.Include(x16, x17);
4948
4949 {
4951 "Allocate HandleScope in callee-save registers.");
4952 __ Ldr(prev_next_address_reg, next_mem_op);
4953 __ Ldr(prev_limit_reg, limit_mem_op);
4954 __ Ldr(prev_level_reg, level_mem_op);
4955 __ Add(scratch.W(), prev_level_reg, 1);
4956 __ Str(scratch.W(), level_mem_op);
4957 }
4958
4959 Label profiler_or_side_effects_check_enabled, done_api_call;
4960 if (with_profiling) {
4961 __ RecordComment("Check if profiler or side effects check is enabled");
4962 __ Ldrb(scratch.W(),
4963 __ ExternalReferenceAsOperand(IsolateFieldId::kExecutionMode));
4964 __ Cbnz(scratch.W(), &profiler_or_side_effects_check_enabled);
4965#ifdef V8_RUNTIME_CALL_STATS
4966 __ RecordComment("Check if RCS is enabled");
4967 __ Mov(scratch, ER::address_of_runtime_stats_flag());
4968 __ Ldrsw(scratch.W(), MemOperand(scratch));
4969 __ Cbnz(scratch.W(), &profiler_or_side_effects_check_enabled);
4970#endif // V8_RUNTIME_CALL_STATS
4971 }
4972
4973 __ RecordComment("Call the api function directly.");
4974 __ StoreReturnAddressAndCall(function_address);
4975 __ Bind(&done_api_call);
4976
4977 Label propagate_exception;
4978 Label delete_allocated_handles;
4979 Label leave_exit_frame;
4980
4981 __ RecordComment("Load the value from ReturnValue");
4982 __ Ldr(return_value, return_value_operand);
4983
4984 {
4986 masm,
4987 "No more valid handles (the result handle was the last one)."
4988 "Restore previous handle scope.");
4989 __ Str(prev_next_address_reg, next_mem_op);
4990 if (v8_flags.debug_code) {
4991 __ Ldr(scratch.W(), level_mem_op);
4992 __ Sub(scratch.W(), scratch.W(), 1);
4993 __ Cmp(scratch.W(), prev_level_reg);
4994 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
4995 }
4996 __ Str(prev_level_reg, level_mem_op);
4997
4998 __ Ldr(scratch, limit_mem_op);
4999 __ Cmp(prev_limit_reg, scratch);
5000 __ B(ne, &delete_allocated_handles);
5001 }
5002
5003 __ RecordComment("Leave the API exit frame.");
5004 __ Bind(&leave_exit_frame);
5005
5006 Register argc_reg = prev_limit_reg;
5007 if (argc_operand != nullptr) {
5008 // Load the number of stack slots to drop before LeaveExitFrame modifies sp.
5009 __ Ldr(argc_reg, *argc_operand);
5010 }
5011
5012 __ LeaveExitFrame(scratch, scratch2);
5013
5014 {
5016 "Check if the function scheduled an exception.");
5017 __ Mov(scratch, ER::exception_address(isolate));
5018 __ Ldr(scratch, MemOperand(scratch));
5019 __ JumpIfNotRoot(scratch, RootIndex::kTheHoleValue, &propagate_exception);
5020 }
5021
5022 __ AssertJSAny(return_value, scratch, scratch2,
5023 AbortReason::kAPICallReturnedInvalidObject);
5024
5025 if (argc_operand == nullptr) {
5026 DCHECK_NE(slots_to_drop_on_return, 0);
5027 __ DropSlots(slots_to_drop_on_return);
5028 } else {
5029 // {argc_operand} was loaded into {argc_reg} above.
5030 __ DropArguments(argc_reg, slots_to_drop_on_return);
5031 }
5032 __ Ret();
5033
5034 if (with_profiling) {
5035 ASM_CODE_COMMENT_STRING(masm, "Call the api function via thunk wrapper.");
5036 __ Bind(&profiler_or_side_effects_check_enabled);
5037 // Additional parameter is the address of the actual callback function.
5038 if (thunk_arg.is_valid()) {
5039 MemOperand thunk_arg_mem_op = __ ExternalReferenceAsOperand(
5040 IsolateFieldId::kApiCallbackThunkArgument);
5041 __ Str(thunk_arg, thunk_arg_mem_op);
5042 }
5043 __ Mov(scratch, thunk_ref);
5045 __ B(&done_api_call);
5046 }
5047
5048 __ RecordComment("An exception was thrown. Propagate it.");
5049 __ Bind(&propagate_exception);
5050 __ TailCallRuntime(Runtime::kPropagateException);
5051
5052 {
5054 masm, "HandleScope limit has changed. Delete allocated extensions.");
5055 __ Bind(&delete_allocated_handles);
5056 __ Str(prev_limit_reg, limit_mem_op);
5057 // Save the return value in a callee-save register.
5058 Register saved_result = prev_limit_reg;
5059 __ Mov(saved_result, x0);
5060 __ Mov(kCArgRegs[0], ER::isolate_address());
5061 __ CallCFunction(ER::delete_handle_scope_extensions(), 1);
5062 __ Mov(kCArgRegs[0], saved_result);
5063 __ B(&leave_exit_frame);
5064 }
5065}
5066
5067} // namespace internal
5068} // namespace v8
5069
5070#undef __
5071
5072#endif // V8_TARGET_ARCH_ARM64
friend Zone
Definition asm-types.cc:195
constexpr int kRegularPageSize
#define Assert(condition)
int16_t parameter_count
Definition builtins.cc:67
Builtins::Kind kind
Definition builtins.cc:40
static int ActivationFrameAlignment()
EmbeddedObjectIndex AddEmbeddedObject(IndirectHandle< HeapObject > object)
Definition assembler.cc:285
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
const AssemblerOptions & options() const
Definition assembler.h:339
void LogicalImmediate(const Register &rd, const Register &rn, unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
static Instr RnSP(Register rn)
void mvni(const VRegister &vd, const int imm8, Shift shift=LSL, const int shift_amount=0)
void dup(const VRegister &vd, const VRegister &vn, int vn_index)
static constexpr bool IsImmLSScaled(int64_t offset, unsigned size_log2)
void LoadStoreWRegOffset(Instr memop, const Register &regoffset)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void bit(const VRegister &vd, const VRegister &vn, const VRegister &vm)
static bool IsImmLSPair(int64_t offset, unsigned size)
void movn(const Register &rd, uint64_t imm, int shift=-1)
void hint(SystemHint code)
void LoadStoreScaledImmOffset(Instr memop, int offset, unsigned size)
void csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void AddSubWithCarry(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void bl(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void tbz(const Register &rt, unsigned bit_pos, Label *label)
static Instr Rt(CPURegister rt)
void EmitExtendShift(const Register &rd, const Register &rn, Extend extend, unsigned left_shift)
static constexpr bool IsImmAddSub(int64_t immediate)
friend class UseScratchRegisterScope
void dc32(uint32_t data)
void dcptr(Label *label)
void str(Register src, const MemOperand &dst, Condition cond=al)
void ConditionalCompare(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
void fmov(const VRegister &fd, double imm)
void shift(Operand dst, Immediate shift_amount, int subcode, int size)
void ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void LoadStore(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
void adr(const Register &rd, Label *label)
static unsigned CalcLSDataSizeLog2(LoadStoreOp op)
void mvn(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
void csel(const Register &rd, const Register &rn, const Register &rm, Condition cond)
static constexpr bool IsImmConditionalCompare(int64_t immediate)
void movz(const Register &rd, uint64_t imm, int shift=-1)
void tbnz(const Register &rt, unsigned bit_pos, Label *label)
void EmitShift(const Register &rd, const Register &rn, Shift shift, unsigned amount)
void movi(const VRegister &vd, const uint64_t imm, Shift shift=LSL, const int shift_amount=0)
void movk(const Register &rd, uint64_t imm, int shift=-1)
static constexpr bool IsImmLSUnscaled(int64_t offset)
void EmitStringData(const char *string)
void LoadStoreUnscaledImmOffset(Instr memop, int offset)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void LoadStorePair(const CPURegister &rt, const CPURegister &rt2, const MemOperand &addr, LoadStorePairOp op)
void stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void cbnz(const Register &rt, Label *label)
void AddSub(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
void cbz(const Register &rt, Label *label)
int SizeOfCodeGeneratedSince(Label *label)
void Logical(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
Instruction * pc() const
const Register & AppropriateZeroRegFor(const CPURegister &reg) const
void csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void CheckVeneerPool(bool force_emit, bool require_jump, size_t margin=kVeneerDistanceMargin)
static constexpr Builtin RecordWrite(SaveFPRegsMode fp_mode)
static bool IsIsolateIndependentBuiltin(Tagged< Code > code)
Definition builtins.cc:372
V8_EXPORT_PRIVATE Handle< Code > code_handle(Builtin builtin)
Definition builtins.cc:154
static constexpr Builtin RuntimeCEntry(int result_size, bool switch_to_central_stack=false)
static constexpr Builtin EphemeronKeyBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin IndirectPointerBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin CEntry(int result_size, ArgvMode argv_mode, bool builtin_exit_frame=false, bool switch_to_central_stack=false)
static CPURegList GetCallerSavedV(int size=kDRegSizeInBits)
void set_bits(uint64_t new_bits)
uint64_t bits() const
void Combine(const CPURegList &other)
static const int kIsTurbofannedBit
Definition code.h:458
static const int kMarkedForDeoptimizationBit
Definition code.h:456
static const int kInvalidContext
Definition contexts.h:578
static V8_INLINE constexpr int SlotOffset(int index)
Definition contexts.h:516
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static constexpr int kCalleeSavedRegisterBytesPushedAfterFpLrPair
static constexpr int kCalleeSavedRegisterBytesPushedBeforeFpLrPair
static constexpr int kLastExitFrameField
static V8_EXPORT_PRIVATE ExternalReference address_of_code_pointer_table_base_address()
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr uint32_t kFlagsTieringStateIsAnyRequested
static constexpr uint32_t FlagMaskForNeedsProcessingCheckFrom(CodeKind code_kind)
static constexpr int OffsetOfElementAt(int index)
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static bool IsValidPCRelOffset(ptrdiff_t offset)
static const int ImmPCRelRangeBitwidth
static const int kExternalPointerTableBasePointerOffset
static constexpr int BuiltinEntrySlotOffset(Builtin id)
static constexpr int real_jslimit_offset()
static constexpr intptr_t GetOffset(IsolateFieldId id)
static IsolateGroup * current()
Builtins * builtins()
Definition isolate.h:1443
Address BuiltinEntry(Builtin builtin)
static bool IsAddressableThroughRootRegister(Isolate *isolate, const ExternalReference &reference)
static constexpr bool CanBeImmediate(RootIndex index)
V8_INLINE std::string CommentForOffHeapTrampoline(const char *prefix, Builtin builtin)
static int32_t RootRegisterOffsetForExternalReferenceTableEntry(Isolate *isolate, const ExternalReference &reference)
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
Tagged_t ReadOnlyRootPtr(RootIndex index)
void IndirectLoadConstant(Register destination, Handle< HeapObject > object)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
void IndirectLoadExternalReference(Register destination, ExternalReference reference)
bool NeedExtraInstructionsOrRegisterBranch(Label *label)
void JumpIfRoot(Register with, RootIndex index, Label *if_equal)
void PushAll(RegList registers)
void Asr(const Register &rd, const Register &rn, unsigned shift)
void LoadStackLimit(Register destination, StackLimitKind kind)
void Fcvt(const VRegister &fd, const VRegister &fn)
void Call(Register target, Condition cond=al)
void CallJSFunction(Register function_object, uint16_t argument_count)
void AddSubWithCarryMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
void CallDebugOnFunctionCall(Register fun, Register new_target, Register expected_parameter_count, Register actual_parameter_count)
void Cmp(const Register &rn, int imm)
void AssertMap(Register object) NOOP_UNLESS_DEBUG_CODE
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void AddSubMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void DecompressTaggedSigned(const Register &destination, const MemOperand &field_operand)
void Drop(int count, Condition cond=al)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void Orr(const Register &rd, const Register &rn, const Operand &operand)
void mov(Register rd, Register rj)
void Add(const Register &rd, const Register &rn, const Operand &operand)
void IsObjectType(Register heap_object, Register scratch1, Register scratch2, InstanceType type)
void SmiUntag(Register reg, SBit s=LeaveCC)
void DecodeField(Register dst, Register src)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void SlotAddress(Register dst, int slot_offset)
void Bind(Label *label, BranchTargetIdentifier id=BranchTargetIdentifier::kNone)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void Ext(const VRegister &vd, const VRegister &vn, const VRegister &vm, int index)
void LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTagRange tag_range, Register isolate_root=Register::no_reg())
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
void PushMultipleTimes(CPURegister src, Register count)
static CPURegList DefaultTmpList()
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal)
void near_call(int offset, RelocInfo::Mode rmode)
void AssertPositiveOrZero(Register value) NOOP_UNLESS_DEBUG_CODE
void LoadStoreMacroComplex(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void CompareAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void Fmov(VRegister fd, VRegister fn)
void I64x2AllTrue(Register dst, QwNeonRegister src)
void CompareRoot(Register obj, RootIndex index)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void Msr(SystemRegister sysreg, const Register &rt)
void Move(Register dst, Tagged< Smi > smi)
void I32x4BitMask(Register dst, VRegister src)
void Lsr(const Register &rd, const Register &rn, unsigned shift)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void Tst(const Register &rn, const Operand &operand)
void AtomicDecompressTaggedSigned(const Register &destination, const Register &base, const Register &index, const Register &temp)
void Bfxil(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void StoreReturnAddressAndCall(Register target)
void CopyDoubleWords(Register dst, Register src, Register count, CopyDoubleWordsMode mode=kDstLessThanSrc)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void LoadRootRelative(Register destination, int32_t offset) final
void JumpIfSmi(Register value, Label *smi_label)
void PopHelper(int count, int size, const CPURegister &dst0, const CPURegister &dst1, const CPURegister &dst2, const CPURegister &dst3)
void JumpIfObjectType(Register object, Register map, Register type_reg, InstanceType type, Label *if_cond_pass, Condition cond=eq)
void CallCodeObject(Register code_object)
void LoadSandboxedPointerField(Register destination, MemOperand field_operand)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void Movi32bitHelper(const VRegister &vd, uint64_t imm)
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
void TestAndBranchIfAllClear(const Register &reg, const uint64_t bit_pattern, Label *label)
void B(Label *label, BranchType type, Register reg=NoReg, int bit=-1)
void AtomicDecompressTagged(const Register &destination, const Register &base, const Register &index, const Register &temp)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void CallPrintf(int arg_count=0, const CPURegister *args=nullptr)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Peek(const CPURegister &dst, const Operand &offset)
void AssertCode(Register object) NOOP_UNLESS_DEBUG_CODE
void TryConvertDoubleToInt64(Register result, DoubleRegister input, Label *done)
void near_jump(int offset, RelocInfo::Mode rmode)
void Uxtw(const Register &rd, const Register &rn)
void Tbz(const Register &rt, unsigned bit_pos, Label *label)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void JumpIfUnsignedLessThan(Register x, int32_t y, Label *dest)
void PopcntHelper(Register dst, Register src)
void IndirectCall(Address target, RelocInfo::Mode rmode)
void DecodeSandboxedPointer(Register value)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void Mrs(const Register &rt, SystemRegister sysreg)
void CompareTaggedRoot(Register with, RootIndex index)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Register scratch, Label *target, Label::Distance distance=Label::kFar, Condition condition=Condition::kUnsignedGreaterThanEqual)
void Debug(const char *message, uint32_t code, Instr params=BREAK)
bool CanUseNearCallOrJump(RelocInfo::Mode rmode)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void SmiTag(Register reg, SBit s=LeaveCC)
void SbxCheck(Condition cc, AbortReason reason)
void Eor(const Register &rd, const Register &rn, const Operand &operand)
void Cmeq(const VRegister &vd, const VRegister &vn, int imm)
void Fjcvtzs(const Register &rd, const VRegister &vn)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void Ins(const VRegister &vd, int vd_index, const VRegister &vn, int vn_index)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void Ldr(const CPURegister &rt, const Operand &imm)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void Mov(const Register &rd, const Operand &operand, DiscardMoveMode discard_mode=kDontDiscardForSameWReg)
void LoadIndirectPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void CallIndirectPointerBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode, IndirectPointerTag tag)
int LeaveFrame(StackFrame::Type type)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void LoadGlobalProxy(Register dst)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
static bool IsImmMovz(uint64_t imm, unsigned reg_size)
void JumpIfMarking(Label *is_marking, Label::Distance condition_met_distance=Label::kFar)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
void StoreCodePointerField(Register value, MemOperand dst_field_operand)
void IsObjectTypeInRange(Register heap_object, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void Jump(Register target, Condition cond=al)
void LoadRoot(Register destination, RootIndex index) final
void Fcmp(const VRegister &fn, const VRegister &fm)
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void DecompressProtected(const Register &destination, const MemOperand &field_operand)
static constexpr int kExtraSlotClaimedByPrologue
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
static bool IsImmMovn(uint64_t imm, unsigned reg_size)
void CanonicalizeNaN(const VRegister &dst, const VRegister &src)
void Csel(const Register &rd, const Register &rn, const Operand &operand, Condition cond)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void CompareRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void JumpIfCodeIsTurbofanned(Register code, Register scratch, Label *if_turbofanned)
void LoadFromConstantsTable(Register destination, int constant_index) final
void LoadProtectedPointerField(Register destination, MemOperand field_operand)
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void LogicalMacro(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
void PokePair(const CPURegister &src1, const CPURegister &src2, int offset)
void MaybeSaveRegisters(RegList registers)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void LoadStorePairMacro(const CPURegister &rt, const CPURegister &rt2, const MemOperand &addr, LoadStorePairOp op)
void Tbnz(const Register &rt, unsigned bit_pos, Label *label)
void LoadTaggedRoot(Register destination, RootIndex index)
void PushCPURegList(CPURegList registers)
void Subs(const Register &rd, const Register &rn, const Operand &operand)
void Movi(const VRegister &vd, uint64_t imm, Shift shift=LSL, int shift_amount=0)
void I16x8BitMask(Register dst, VRegister src)
void Cset(const Register &rd, Condition cond)
void Printf(const char *format, CPURegister arg0=NoCPUReg, CPURegister arg1=NoCPUReg, CPURegister arg2=NoCPUReg, CPURegister arg3=NoCPUReg)
void StoreIndirectPointerField(Register value, MemOperand dst_field_operand)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void MovePair(Register dst0, Register src0, Register dst1, Register src1)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void PrintfNoPreserve(const char *format, const CPURegister &arg0=NoCPUReg, const CPURegister &arg1=NoCPUReg, const CPURegister &arg2=NoCPUReg, const CPURegister &arg3=NoCPUReg)
void LoadStoreMacro(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
void JumpIfNotMarking(Label *not_marking, Label::Distance condition_met_distance=Label::kFar)
void I64x2BitMask(Register dst, QwNeonRegister src)
void MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset)
void Claim(int64_t count, uint64_t unit_size=kXRegSize)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
void LoadCompressedMap(Register dst, Register object)
Condition LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void Cneg(const Register &rd, const Register &rn, Condition cond)
void Fsub(const VRegister &fd, const VRegister &fn, const VRegister &fm)
void CallBuiltin(Builtin builtin, Condition cond=al)
void PopCPURegList(CPURegList registers)
void Ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void PeekPair(const CPURegister &dst1, const CPURegister &dst2, int offset)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode)
void Fcvtzs(const Register &rd, const VRegister &fn)
void LoadCodePointerField(Register destination, MemOperand field_operand)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void CmpTagged(const Register &r1, const Register &r2)
void Check(Condition cond, AbortReason reason)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void TestAndBranchIfAnySet(const Register &reg, const uint64_t bit_pattern, Label *label)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void StoreTrustedPointerField(Register value, MemOperand dst_field_operand)
static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode, uint8_t *pc)
void CopySlots(int dst, Register src, Register slot_count)
void StoreSandboxedPointerField(Register value, MemOperand dst_field_operand)
void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch, Label *if_marked_for_deoptimization)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void AssertZeroExtended(Register int32_register)
void Mvn(const Register &rd, uint64_t imm)
static bool IsNearCallOffset(int64_t offset)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void Adr(const Register &rd, Label *label, AdrHint=kAdrNear)
void Ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void Sub(const Register &rd, const Register &rn, const Operand &operand)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void I8x16BitMask(Register dst, VRegister src, VRegister temp=NoVReg)
void CompareInstanceTypeRange(Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
Operand MoveImmediateForShiftedOp(const Register &dst, int64_t imm, PreShiftImmMode mode)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void LoadElementsKindFromMap(Register result, Register map)
void ConditionalCompareMacro(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
void LoadIsolateField(Register dst, IsolateFieldId id)
void Cbnz(const Register &rt, Label *label)
void MaybeRestoreRegisters(RegList registers)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Cbz(const Register &rt, Label *label)
void TryLoadOptimizedOsrCode(Register scratch_and_result, CodeKind min_opt_level, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void Abs(const Register &rd, const Register &rm, Label *is_not_representable=nullptr, Label *is_representable=nullptr)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertSpAligned() NOOP_UNLESS_DEBUG_CODE
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void Movi16bitHelper(const VRegister &vd, uint64_t imm)
bool TryOneInstrMoveImmediate(const Register &dst, int64_t imm)
void StoreTwoTaggedFields(const Register &value, const MemOperand &dst_field_operand)
void SmiUntagField(Register dst, const MemOperand &src)
void PopAll(RegList registers)
void Movi64bitHelper(const VRegister &vd, uint64_t imm)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
Condition CheckSmi(Register src)
void StoreRootRelative(int32_t offset, Register value) final
void LoadTaggedSignedField(const Register &destination, const MemOperand &field_operand)
void LoadMap(Register destination, Register object)
void AtomicStoreTaggedField(const Register &value, const Register &dst_base, const Register &dst_index, const Register &temp)
void AssertFPCRState(Register fpcr=NoReg) NOOP_UNLESS_DEBUG_CODE
void TailCallRuntime(Runtime::FunctionId fid)
void PushHelper(int count, int size, const CPURegister &src0, const CPURegister &src1, const CPURegister &src2, const CPURegister &src3)
static unsigned CountSetHalfWords(uint64_t imm, unsigned reg_size)
void Swap(Register srcdst0, Register srcdst1)
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond=al)
void LoadNativeContextSlot(Register dst, int index)
static CPURegList DefaultFPTmpList()
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void Switch(Register scratch, Register value, int case_value_base, Label **labels, int num_labels)
void AssertSmiOrHeapObjectInMainCompressionCage(Register object) NOOP_UNLESS_DEBUG_CODE
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr intptr_t FlagsOffset()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static constexpr intptr_t GetAlignmentMaskForAssembler()
static constexpr int kAdrFarPatchableNNops
static constexpr int kAdrFarPatchableNInstrs
static constexpr Register Create(int code, int size)
static constexpr Register no_reg()
static constexpr bool IsCompressedEmbeddedObject(Mode mode)
Definition reloc-info.h:206
static constexpr bool IsCodeTarget(Mode mode)
Definition reloc-info.h:196
static constexpr bool IsEmbeddedObjectMode(Mode mode)
Definition reloc-info.h:209
static constexpr bool IsReadOnly(RootIndex root_index)
Definition roots.h:623
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
Definition runtime.cc:350
static SlotDescriptor ForCodePointerSlot()
Definition assembler.h:311
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static bool IsJavaScript(Type t)
Definition frames.h:284
static constexpr VRegister no_reg()
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode)
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define V8_ENABLE_LEAPTIERING_BOOL
Definition globals.h:151
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
DirectHandle< Object > new_target
Definition execution.cc:75
Label label
int32_t offset
TNode< Object > target
std::vector< PatternMap > pairs
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int x
uint32_t const mask
#define ASM_LOCATION_IN_ASSEMBLER(message)
#define ASM_LOCATION(message)
ReadOnlyCheck
SmiCheck
ComparisonMode
ArgumentAdaptionMode
InvokeType
SetIsolateDataSlots
JumpMode
RegListBase< RegisterT > registers
InstructionOperand destination
int n
Definition mul-fft.cc:296
int int32_t
Definition unicode.cc:40
constexpr unsigned CountTrailingZeros(T value)
Definition bits.h:144
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
V8_INLINE constexpr std::optional< RootIndex > UniqueMapOfInstanceType(InstanceType type)
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr Register kRootRegister
constexpr int kCodePointerTableEntrySizeLog2
constexpr AddrMode PreIndex
uint32_t AddSubWithCarryOp
constexpr Opcode ORR
constexpr uint64_t kExternalPointerTagShift
constexpr int kTaggedSize
Definition globals.h:542
constexpr Opcode AND
constexpr int64_t kXSignBit
bool IsNone(Tagged< FieldType > obj)
Definition field-type.h:50
constexpr int kFPRegisterPassedArguments
constexpr int64_t kWRegMask
DwVfpRegister DoubleRegister
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
constexpr int64_t kWSignBit
constexpr uint64_t kExternalPointerPayloadMask
static const unsigned kPrintfArgPatternBits
uint32_t ConditionalCompareOp
constexpr ShiftOp LSR
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr ExternalPointerTagRange kAnyExternalPointerTagRange(kFirstExternalPointerTag, kLastExternalPointerTag)
constexpr bool CodeKindCanTierUp(CodeKind kind)
Definition code-kind.h:95
constexpr Register kJavaScriptCallTargetRegister
constexpr int kCodePointerTableEntryCodeObjectOffset
constexpr int kTrustedPointerTableEntrySizeLog2
constexpr int kWRegSizeInBits
const unsigned kPrintfLength
const Address kWeakHeapObjectMask
Definition globals.h:967
constexpr ShiftOp LSL
constexpr int B
constexpr Register kJavaScriptCallArgCountRegister
Address Tagged_t
Definition globals.h:547
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
base::StrongAlias< JSDispatchHandleAliasTag, uint32_t > JSDispatchHandle
Definition globals.h:557
TagRange< ExternalPointerTag > ExternalPointerTagRange
static const int kRegisterPassedArguments
const unsigned kPrintfMaxArgCount
MemOperand FieldMemOperand(Register object, int offset)
const Instr kImmExceptionIsPrintf
constexpr int kSystemPointerSize
Definition globals.h:410
const RegList kCallerSaved
Definition reglist-arm.h:42
constexpr int kXRegSizeInBitsLog2
const char * GetAbortReason(AbortReason reason)
static constexpr int kMaxCParameters
constexpr uint32_t kDebugZapValue
Definition globals.h:1015
constexpr int S
constexpr uint32_t kZapValue
Definition globals.h:1005
constexpr bool SmiValuesAre31Bits()
constexpr LogicalOp ANDS
Condition NegateCondition(Condition cond)
constexpr int kWRegSizeInBitsLog2
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
uint32_t LoadStorePairOp
constexpr uint32_t kTrustedPointerHandleShift
constexpr uint32_t kCodePointerHandleShift
const int kHeapObjectTag
Definition v8-internal.h:72
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
constexpr LogicalOp BICS
constexpr Register kJavaScriptCallCodeStartRegister
constexpr int kJSDispatchTableEntrySizeLog2
Definition globals.h:562
constexpr AddrMode PostIndex
constexpr Register kPtrComprCageBaseRegister
const intptr_t kSmiTagMask
Definition v8-internal.h:88
constexpr ShiftOp ROR
constexpr int kXRegSizeInBits
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
constexpr uint8_t kInstrSize
const int kSmiTag
Definition v8-internal.h:86
constexpr Register cp
constexpr Opcode EOR
constexpr int kDRegSize
constexpr int kQRegSizeInBits
constexpr uint64_t kTrustedPointerTableMarkBit
constexpr Register kCArgRegs[]
constexpr LogicalOp NOT
static int CountLeadingZeros(uint64_t value, int width)
Definition utils-arm64.h:34
constexpr bool is_uintn(int64_t x, unsigned n)
Definition utils.h:574
uint32_t LoadStoreOp
V8_EXPORT_PRIVATE bool AreSameSizeAndType(const CPURegister &reg1, const CPURegister &reg2=NoCPUReg, const CPURegister &reg3=NoCPUReg, const CPURegister &reg4=NoCPUReg, const CPURegister &reg5=NoCPUReg, const CPURegister &reg6=NoCPUReg, const CPURegister &reg7=NoCPUReg, const CPURegister &reg8=NoCPUReg)
constexpr Register kJavaScriptCallDispatchHandleRegister
constexpr uint32_t kCodePointerHandleMarker
const uint32_t kClearedWeakHeapObjectLower32
Definition globals.h:981
constexpr int kXRegSize
constexpr Register NoReg
constexpr uint32_t kMaxUInt32
Definition globals.h:387
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
constexpr uint64_t kExternalPointerShiftedTagMask
static V8_INLINE constexpr bool ExternalPointerCanBeEmpty(ExternalPointerTagRange tag_range)
unsigned CalcLSPairDataSize(LoadStorePairOp op)
constexpr Register padreg
constexpr bool PointerCompressionIsEnabled()
constexpr int kDRegSizeInBits
Local< T > Handle
#define kCallerSavedV
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001
#define V8_UNLIKELY(condition)
Definition v8config.h:660
wasm::ValueType type