v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-arm.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <limits.h> // For LONG_MIN, LONG_MAX.
6
7#if V8_TARGET_ARCH_ARM
8
9#include "src/base/bits.h"
22#include "src/debug/debug.h"
29#include "src/runtime/runtime.h"
31
32// Satisfy cpplint check, but don't include platform-specific header. It is
33// included recursively via macro-assembler.h.
34#if 0
36#endif
37
38#define __ ACCESS_MASM(masm)
39
40namespace v8 {
41namespace internal {
42
44 Register exclusion1,
45 Register exclusion2,
46 Register exclusion3) const {
47 int bytes = 0;
48 RegList exclusions = {exclusion1, exclusion2, exclusion3};
49 RegList list = (kCallerSaved | lr) - exclusions;
50
51 bytes += list.Count() * kPointerSize;
52
53 if (fp_mode == SaveFPRegsMode::kSave) {
55 }
56
57 return bytes;
58}
59
60int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
61 Register exclusion2, Register exclusion3) {
62 ASM_CODE_COMMENT(this);
63 int bytes = 0;
64 RegList exclusions = {exclusion1, exclusion2, exclusion3};
65 RegList list = (kCallerSaved | lr) - exclusions;
66 stm(db_w, sp, list);
67
68 bytes += list.Count() * kPointerSize;
69
70 if (fp_mode == SaveFPRegsMode::kSave) {
71 SaveFPRegs(sp, lr);
73 }
74
75 return bytes;
76}
77
78int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
79 Register exclusion2, Register exclusion3) {
80 ASM_CODE_COMMENT(this);
81 int bytes = 0;
82 if (fp_mode == SaveFPRegsMode::kSave) {
83 RestoreFPRegs(sp, lr);
85 }
86
87 RegList exclusions = {exclusion1, exclusion2, exclusion3};
88 RegList list = (kCallerSaved | lr) - exclusions;
89 ldm(ia_w, sp, list);
90
91 bytes += list.Count() * kPointerSize;
92
93 return bytes;
94}
95
97 int constant_index) {
98 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
99
100 const uint32_t offset = OFFSET_OF_DATA_START(FixedArray) +
101 constant_index * kPointerSize - kHeapObjectTag;
102
103 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
105}
106
109}
110
111void MacroAssembler::StoreRootRelative(int32_t offset, Register value) {
113}
114
116 intptr_t offset) {
117 if (offset == 0) {
119 } else {
121 }
122}
123
125 ExternalReference reference, Register scratch) {
126 if (root_array_available()) {
127 if (reference.IsIsolateFieldId()) {
128 return MemOperand(kRootRegister, reference.offset_from_root_register());
129 }
130 if (options().enable_root_relative_access) {
131 intptr_t offset =
133 if (is_int32(offset)) {
134 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
135 }
136 }
137 if (options().isolate_independent_code) {
138 if (IsAddressableThroughRootRegister(isolate(), reference)) {
139 // Some external references can be efficiently loaded as an offset from
140 // kRootRegister.
141 intptr_t offset =
143 CHECK(is_int32(offset));
144 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
145 } else {
146 // Otherwise, do a memory load from the external reference table.
147 ldr(scratch,
150 isolate(), reference)));
151 return MemOperand(scratch, 0);
152 }
153 }
154 }
155 Move(scratch, reference);
156 return MemOperand(scratch, 0);
157}
158
159void MacroAssembler::GetLabelAddress(Register dest, Label* target) {
160 // This should be just a
161 // add(dest, pc, branch_offset(target));
162 // but current implementation of Assembler::bind_to()/target_at_put() add
163 // (InstructionStream::kHeaderSize - kHeapObjectTag) to a position of a label
164 // in a "linked" state and thus making it usable only for mov_label_offset().
165 // TODO(ishell): fix branch_offset() and re-implement
166 // RegExpMacroAssemblerARM::PushBacktrack() without mov_label_offset().
167 mov_label_offset(dest, target);
168 // mov_label_offset computes offset of the |target| relative to the "current
169 // InstructionStream object pointer" which is essentially pc_offset() of the
170 // label added with (InstructionStream::kHeaderSize - kHeapObjectTag).
171 // Compute "current InstructionStream object pointer" and add it to the
172 // offset in |lr| register.
173 int current_instr_code_object_relative_offset =
176 add(dest, pc, dest);
177 sub(dest, dest, Operand(current_instr_code_object_relative_offset));
178}
179
180void MacroAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
181
182void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
183 Condition cond) {
184 mov(pc, Operand(target, rmode), LeaveCC, cond);
185}
186
188 Condition cond) {
190 Jump(static_cast<intptr_t>(target), rmode, cond);
191}
192
194 Condition cond) {
196 DCHECK_IMPLIES(options().isolate_independent_code,
198
200 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
201 TailCallBuiltin(builtin, cond);
202 return;
203 }
204
205 // 'code' is always generated ARM code, never THUMB code
206 Jump(static_cast<intptr_t>(code.address()), rmode, cond);
207}
208
209void MacroAssembler::Jump(const ExternalReference& reference) {
210 UseScratchRegisterScope temps(this);
211 Register scratch = temps.Acquire();
212 Move(scratch, reference);
213 Jump(scratch);
214}
215
216void MacroAssembler::Call(Register target, Condition cond) {
217 // Block constant pool for the call instruction sequence.
218 BlockConstPoolScope block_const_pool(this);
219 blx(target, cond);
220}
221
224 bool check_constant_pool) {
225 // Check if we have to emit the constant pool before we block it.
226 if (check_constant_pool) MaybeCheckConstPool();
227 // Block constant pool for the call instruction sequence.
228 BlockConstPoolScope block_const_pool(this);
229
230 bool old_predictable_code_size = predictable_code_size();
231 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
233 }
234
235 // Use ip directly instead of using UseScratchRegisterScope, as we do not
236 // preserve scratch registers across calls.
237
238 // Call sequence on V7 or later may be :
239 // movw ip, #... @ call address low 16
240 // movt ip, #... @ call address high 16
241 // blx ip
242 // @ return address
243 // Or for pre-V7 or values that may be back-patched
244 // to avoid ICache flushes:
245 // ldr ip, [pc, #...] @ call address
246 // blx ip
247 // @ return address
248
249 mov(ip, Operand(target, rmode));
250 blx(ip, cond);
251
252 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
253 set_predictable_code_size(old_predictable_code_size);
254 }
255}
256
259 bool check_constant_pool) {
261 DCHECK_IMPLIES(options().isolate_independent_code,
263
265 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
266 CallBuiltin(builtin);
267 return;
268 }
269
270 // 'code' is always generated ARM code, never THUMB code
271 Call(code.address(), rmode, cond, mode);
272}
273
274void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index,
275 Register target) {
276 ASM_CODE_COMMENT(this);
277 static_assert(kSystemPointerSize == 4);
278 static_assert(kSmiShiftSize == 0);
279 static_assert(kSmiTagSize == 1);
280 static_assert(kSmiTag == 0);
281
282 // The builtin_index register contains the builtin index as a Smi.
283 mov(target,
284 Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiTagSize));
285 add(target, target, Operand(IsolateData::builtin_entry_table_offset()));
286 ldr(target, MemOperand(kRootRegister, target));
287}
288
289void MacroAssembler::CallBuiltinByIndex(Register builtin_index,
290 Register target) {
291 LoadEntryFromBuiltinIndex(builtin_index, target);
292 Call(target);
293}
294
296 Register destination) {
297 ASM_CODE_COMMENT(this);
299}
300
302 ASM_CODE_COMMENT(this);
306}
307
310 // Use ip directly instead of using UseScratchRegisterScope, as we do not
311 // preserve scratch registers across calls.
312 switch (options().builtin_call_jump_mode) {
314 mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
315 Call(ip, cond);
316 break;
317 }
319 UNREACHABLE();
321 ldr(ip, EntryFromBuiltinAsOperand(builtin));
322 Call(ip, cond);
323 break;
325 if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
326 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
327 int32_t code_target_index = AddCodeTarget(code);
328 bl(code_target_index * kInstrSize, cond,
330 } else {
331 ldr(ip, EntryFromBuiltinAsOperand(builtin));
332 Call(ip, cond);
333 }
334 break;
335 }
336 }
337}
338
341 CommentForOffHeapTrampoline("tail call", builtin));
342 // Use ip directly instead of using UseScratchRegisterScope, as we do not
343 // preserve scratch registers across calls.
344 switch (options().builtin_call_jump_mode) {
346 mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
347 Jump(ip, cond);
348 break;
349 }
351 UNREACHABLE();
353 ldr(ip, EntryFromBuiltinAsOperand(builtin));
354 Jump(ip, cond);
355 break;
357 if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
358 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
359 int32_t code_target_index = AddCodeTarget(code);
360 b(code_target_index * kInstrSize, cond,
362 } else {
363 ldr(ip, EntryFromBuiltinAsOperand(builtin));
364 Jump(ip, cond);
365 }
366 break;
367 }
368 }
369}
370
371#ifdef V8_ENABLE_LEAPTIERING
372
373void MacroAssembler::LoadEntrypointFromJSDispatchTable(Register destination,
374 Register dispatch_handle,
375 Register scratch) {
376 DCHECK(!AreAliased(destination, dispatch_handle, scratch));
377 ASM_CODE_COMMENT(this);
378
379 Move(scratch, ExternalReference::js_dispatch_table_address());
380 static_assert(kJSDispatchHandleShift == 0);
381 add(scratch, scratch,
382 Operand(dispatch_handle, LSL, kJSDispatchTableEntrySizeLog2));
383 ldr(destination, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
384}
385
386#endif // V8_ENABLE_LEAPTIERING
387
389 Register code_object,
390 CodeEntrypointTag tag) {
391 ASM_CODE_COMMENT(this);
392 ldr(destination, FieldMemOperand(code_object, Code::kInstructionStartOffset));
393}
394
395void MacroAssembler::CallCodeObject(Register code_object) {
396 ASM_CODE_COMMENT(this);
397 LoadCodeInstructionStart(code_object, code_object);
398 Call(code_object);
399}
400
401void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
402 ASM_CODE_COMMENT(this);
403 DCHECK_EQ(JumpMode::kJump, jump_mode);
404 LoadCodeInstructionStart(code_object, code_object);
405 Jump(code_object);
406}
407
408void MacroAssembler::CallJSFunction(Register function_object,
409 uint16_t argument_count) {
411#if V8_ENABLE_LEAPTIERING
412 Register dispatch_handle = r8;
413 Register scratch = r9;
414 ldr(dispatch_handle,
415 FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
416 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
417 Call(code);
418#else
419 ldr(code, FieldMemOperand(function_object, JSFunction::kCodeOffset));
420 CallCodeObject(code);
421#endif // V8_ENABLE_LEAPTIERING
422}
423
424#if V8_ENABLE_LEAPTIERING
425void MacroAssembler::CallJSDispatchEntry(JSDispatchHandle dispatch_handle,
426 uint16_t argument_count) {
428 Register dispatch_handle_reg = r8;
429 Register scratch = r9;
430 mov(dispatch_handle_reg,
431 Operand(dispatch_handle.value(), RelocInfo::JS_DISPATCH_HANDLE));
432 // WARNING: This entrypoint load is only safe because we are storing a
433 // RelocInfo for the dispatch handle in the movl above (thus keeping the
434 // dispatch entry alive) _and_ because the entrypoints are not compactable
435 // (thus meaning that the calculation in the entrypoint load is not
436 // invalidated by a compaction).
437 // TODO(leszeks): Make this less of a footgun.
438 static_assert(!JSDispatchTable::kSupportsCompaction);
439 LoadEntrypointFromJSDispatchTable(code, dispatch_handle_reg, scratch);
440 CHECK_EQ(argument_count,
441 IsolateGroup::current()->js_dispatch_table()->GetParameterCount(
442 dispatch_handle));
443 Call(code);
444}
445#endif
446
447void MacroAssembler::JumpJSFunction(Register function_object,
448 JumpMode jump_mode) {
450#if V8_ENABLE_LEAPTIERING
451 Register dispatch_handle = r8;
452 Register scratch = r9;
453 ldr(dispatch_handle,
454 FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
455 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
456 Jump(code);
457#else
458 ldr(code, FieldMemOperand(function_object, JSFunction::kCodeOffset));
459 JumpCodeObject(code, jump_mode);
460#endif // V8_ENABLE_LEAPTIERING
461}
462
463#ifdef V8_ENABLE_WEBASSEMBLY
464
465void MacroAssembler::ResolveWasmCodePointer(Register target) {
466 ASM_CODE_COMMENT(this);
467 static_assert(!V8_ENABLE_SANDBOX_BOOL);
468 ExternalReference global_jump_table =
469 ExternalReference::wasm_code_pointer_table();
470 UseScratchRegisterScope temps(this);
471 Register scratch = temps.Acquire();
472 Move(scratch, global_jump_table);
473 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == 4);
474 ldr(target, MemOperand(scratch, target, LSL, 2));
475}
476
477void MacroAssembler::CallWasmCodePointer(Register target,
478 CallJumpMode call_jump_mode) {
479 ResolveWasmCodePointer(target);
480 if (call_jump_mode == CallJumpMode::kTailCall) {
481 Jump(target);
482 } else {
483 Call(target);
484 }
485}
486
487#endif
488
489void MacroAssembler::StoreReturnAddressAndCall(Register target) {
490 ASM_CODE_COMMENT(this);
491 // This generates the final instruction sequence for calls to C functions
492 // once an exit frame has been constructed.
493 //
494 // Note that this assumes the caller code (i.e. the InstructionStream object
495 // currently being generated) is immovable or that the callee function cannot
496 // trigger GC, since the callee function will return to it.
497
498 // Compute the return address in lr to return to after the jump below. The pc
499 // is already at '+ 8' from the current instruction; but return is after three
500 // instructions, so add another 4 to pc to get the return address.
501 Assembler::BlockConstPoolScope block_const_pool(this);
502 add(lr, pc, Operand(4));
503 str(lr, MemOperand(sp));
504 Call(target);
505}
506
507void MacroAssembler::Ret(Condition cond) { bx(lr, cond); }
508
509void MacroAssembler::Drop(int count, Condition cond) {
510 if (count > 0) {
511 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
512 }
513}
514
515void MacroAssembler::Drop(Register count, Condition cond) {
516 add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
517}
518
519// Enforce alignment of sp.
521 int frame_alignment = ActivationFrameAlignment();
522 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
523
524 uint32_t frame_alignment_mask = ~(static_cast<uint32_t>(frame_alignment) - 1);
525 and_(sp, sp, Operand(frame_alignment_mask));
526}
527
529 Register scratch) {
530 ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
531 tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
532}
533
534Operand MacroAssembler::ClearedValue() const {
535 return Operand(static_cast<int32_t>(i::ClearedValue(isolate()).ptr()));
536}
537
538void MacroAssembler::Call(Label* target) { bl(target); }
539
540void MacroAssembler::Push(Handle<HeapObject> handle) {
541 UseScratchRegisterScope temps(this);
542 Register scratch = temps.Acquire();
543 mov(scratch, Operand(handle));
544 push(scratch);
545}
546
548 UseScratchRegisterScope temps(this);
549 Register scratch = temps.Acquire();
550 mov(scratch, Operand(smi));
551 push(scratch);
552}
553
554void MacroAssembler::Push(Tagged<TaggedIndex> index) {
555 // TaggedIndex is the same as Smi for 32 bit archs.
556 Push(Smi::FromIntptr(index.value()));
557}
558
559void MacroAssembler::PushArray(Register array, Register size, Register scratch,
560 PushArrayOrder order) {
561 ASM_CODE_COMMENT(this);
562 UseScratchRegisterScope temps(this);
563 Register counter = scratch;
564 Register tmp = temps.Acquire();
565 DCHECK(!AreAliased(array, size, counter, tmp));
566 Label loop, entry;
567 if (order == PushArrayOrder::kReverse) {
568 mov(counter, Operand(0));
569 b(&entry);
570 bind(&loop);
571 ldr(tmp, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
572 push(tmp);
573 add(counter, counter, Operand(1));
574 bind(&entry);
575 cmp(counter, size);
576 b(lt, &loop);
577 } else {
578 mov(counter, size);
579 b(&entry);
580 bind(&loop);
581 ldr(tmp, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
582 push(tmp);
583 bind(&entry);
584 sub(counter, counter, Operand(1), SetCC);
585 b(ge, &loop);
586 }
587}
588
589void MacroAssembler::Move(Register dst, Tagged<Smi> smi) {
590 mov(dst, Operand(smi));
591}
592
593void MacroAssembler::Move(Register dst, Handle<HeapObject> value) {
594 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
595 // non-isolate-independent code. In many cases it might be cheaper than
596 // embedding the relocatable value.
597 if (root_array_available_ && options().isolate_independent_code) {
598 IndirectLoadConstant(dst, value);
599 return;
600 }
601 mov(dst, Operand(value));
602}
603
604void MacroAssembler::Move(Register dst, ExternalReference reference) {
605 if (root_array_available()) {
606 if (reference.IsIsolateFieldId()) {
607 add(dst, kRootRegister, Operand(reference.offset_from_root_register()));
608 return;
609 }
610 if (options().isolate_independent_code) {
611 IndirectLoadExternalReference(dst, reference);
612 return;
613 }
614 }
615
616 // External references should not get created with IDs if
617 // `!root_array_available()`.
618 CHECK(!reference.IsIsolateFieldId());
619 mov(dst, Operand(reference));
620}
621
624}
625
626void MacroAssembler::Move(Register dst, Register src, Condition cond) {
627 if (dst != src) {
628 mov(dst, src, LeaveCC, cond);
629 }
630}
631
632void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
633 Condition cond) {
634 if (dst != src) {
635 vmov(dst, src, cond);
636 }
637}
638
639void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
640 Condition cond) {
641 if (dst != src) {
642 vmov(dst, src, cond);
643 }
644}
645
646void MacroAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
647 if (dst != src) {
648 vmov(dst, src);
649 }
650}
651
652void MacroAssembler::MovePair(Register dst0, Register src0, Register dst1,
653 Register src1) {
654 DCHECK_NE(dst0, dst1);
655 if (dst0 != src1) {
656 Move(dst0, src0);
657 Move(dst1, src1);
658 } else if (dst1 != src0) {
659 // Swap the order of the moves to resolve the overlap.
660 Move(dst1, src1);
661 Move(dst0, src0);
662 } else {
663 // Worse case scenario, this is a swap.
664 Swap(dst0, src0);
665 }
666}
667
668void MacroAssembler::Swap(Register srcdst0, Register srcdst1) {
669 DCHECK(srcdst0 != srcdst1);
670 UseScratchRegisterScope temps(this);
671 Register scratch = temps.Acquire();
672 mov(scratch, srcdst0);
673 mov(srcdst0, srcdst1);
674 mov(srcdst1, scratch);
675}
676
677void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
678 DCHECK(srcdst0 != srcdst1);
681
682 if (CpuFeatures::IsSupported(NEON)) {
683 vswp(srcdst0, srcdst1);
684 } else {
685 UseScratchRegisterScope temps(this);
686 DwVfpRegister scratch = temps.AcquireD();
687 vmov(scratch, srcdst0);
688 vmov(srcdst0, srcdst1);
689 vmov(srcdst1, scratch);
690 }
691}
692
693void MacroAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
694 DCHECK(srcdst0 != srcdst1);
695 vswp(srcdst0, srcdst1);
696}
697
698void MacroAssembler::Mls(Register dst, Register src1, Register src2,
699 Register srcA, Condition cond) {
700 if (CpuFeatures::IsSupported(ARMv7)) {
701 CpuFeatureScope scope(this, ARMv7);
702 mls(dst, src1, src2, srcA, cond);
703 } else {
704 UseScratchRegisterScope temps(this);
705 Register scratch = temps.Acquire();
706 DCHECK(srcA != scratch);
707 mul(scratch, src1, src2, LeaveCC, cond);
708 sub(dst, srcA, scratch, LeaveCC, cond);
709 }
710}
711
712void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
713 Condition cond) {
714 if (!src2.IsRegister() && !src2.MustOutputRelocInfo(this) &&
715 src2.immediate() == 0) {
716 mov(dst, Operand::Zero(), LeaveCC, cond);
717 } else if (!(src2.InstructionsRequired(this) == 1) &&
718 !src2.MustOutputRelocInfo(this) &&
720 base::bits::IsPowerOfTwo(src2.immediate() + 1)) {
721 CpuFeatureScope scope(this, ARMv7);
722 ubfx(dst, src1, 0,
723 base::bits::WhichPowerOfTwo(static_cast<uint32_t>(src2.immediate()) +
724 1),
725 cond);
726 } else {
727 and_(dst, src1, src2, LeaveCC, cond);
728 }
729}
730
731void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
732 Condition cond) {
733 DCHECK_LT(lsb, 32);
735 int mask = (1u << (width + lsb)) - 1u - ((1u << lsb) - 1u);
736 and_(dst, src1, Operand(mask), LeaveCC, cond);
737 if (lsb != 0) {
738 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
739 }
740 } else {
741 CpuFeatureScope scope(this, ARMv7);
742 ubfx(dst, src1, lsb, width, cond);
743 }
744}
745
746void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
747 Condition cond) {
748 DCHECK_LT(lsb, 32);
750 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
751 and_(dst, src1, Operand(mask), LeaveCC, cond);
752 int shift_up = 32 - lsb - width;
753 int shift_down = lsb + shift_up;
754 if (shift_up != 0) {
755 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
756 }
757 if (shift_down != 0) {
758 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
759 }
760 } else {
761 CpuFeatureScope scope(this, ARMv7);
762 sbfx(dst, src1, lsb, width, cond);
763 }
764}
765
766void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
767 Condition cond) {
768 DCHECK_LT(lsb, 32);
770 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
771 bic(dst, src, Operand(mask));
772 } else {
773 CpuFeatureScope scope(this, ARMv7);
774 Move(dst, src, cond);
775 bfc(dst, lsb, width, cond);
776 }
777}
778
780 Condition cond) {
783}
784
785void MacroAssembler::RecordWriteField(Register object, int offset,
786 Register value,
787 LinkRegisterStatus lr_status,
788 SaveFPRegsMode save_fp,
789 SmiCheck smi_check) {
790 ASM_CODE_COMMENT(this);
791 // First, check if a write barrier is even needed. The tests below
792 // catch stores of Smis.
793 Label done;
794
795 // Skip barrier if writing a smi.
796 if (smi_check == SmiCheck::kInline) {
797 JumpIfSmi(value, &done);
798 }
799
800 // Although the object register is tagged, the offset is relative to the start
801 // of the object, so so offset must be a multiple of kPointerSize.
803
804 if (v8_flags.slow_debug_code) {
805 ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
806 Label ok;
807 UseScratchRegisterScope temps(this);
808 Register scratch = temps.Acquire();
809 DCHECK(!AreAliased(object, value, scratch));
810 add(scratch, object, Operand(offset - kHeapObjectTag));
811 tst(scratch, Operand(kPointerSize - 1));
812 b(eq, &ok);
813 stop();
814 bind(&ok);
815 }
816
817 RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status,
818 save_fp, SmiCheck::kOmit);
819
820 bind(&done);
821}
822
823void MacroAssembler::Zero(const MemOperand& dest) {
824 ASM_CODE_COMMENT(this);
825 UseScratchRegisterScope temps(this);
826 Register scratch = temps.Acquire();
827
828 mov(scratch, Operand::Zero());
829 str(scratch, dest);
830}
831void MacroAssembler::Zero(const MemOperand& dest1, const MemOperand& dest2) {
832 ASM_CODE_COMMENT(this);
833 UseScratchRegisterScope temps(this);
834 Register scratch = temps.Acquire();
835
836 mov(scratch, Operand::Zero());
837 str(scratch, dest1);
838 str(scratch, dest2);
839}
840
842 if (registers.is_empty()) return;
843 ASM_CODE_COMMENT(this);
844 stm(db_w, sp, registers);
845}
846
848 if (registers.is_empty()) return;
849 ASM_CODE_COMMENT(this);
850 ldm(ia_w, sp, registers);
851}
852
853void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
854 SaveFPRegsMode fp_mode) {
855 ASM_CODE_COMMENT(this);
858
860 Register slot_address_parameter =
862 MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
863
866}
867
869 Operand offset,
870 SaveFPRegsMode fp_mode,
871 StubCallMode mode) {
872 ASM_CODE_COMMENT(this);
875
877 Register slot_address_parameter =
879 MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
880
881 CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode);
882
884}
885
886void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
887 SaveFPRegsMode fp_mode,
888 StubCallMode mode) {
889 ASM_CODE_COMMENT(this);
892#if V8_ENABLE_WEBASSEMBLY
893 if (mode == StubCallMode::kCallWasmRuntimeStub) {
894 auto wasm_target =
895 static_cast<Address>(wasm::WasmCode::GetRecordWriteBuiltin(fp_mode));
896 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
897#else
898 if (false) {
899#endif
900 } else {
902 }
903}
904
905void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
906 Register object, Operand offset) {
907 DCHECK_NE(dst_object, dst_slot);
908 DCHECK(offset.IsRegister() || offset.IsImmediate());
909 // If `offset` is a register, it cannot overlap with `object`.
910 DCHECK_IMPLIES(offset.IsRegister(), offset.rm() != object);
911
912 // If the slot register does not overlap with the object register, we can
913 // overwrite it.
914 if (dst_slot != object) {
915 add(dst_slot, object, offset);
916 Move(dst_object, object);
917 return;
918 }
919
920 DCHECK_EQ(dst_slot, object);
921
922 // If the destination object register does not overlap with the offset
923 // register, we can overwrite it.
924 if (!offset.IsRegister() || (offset.rm() != dst_object)) {
925 Move(dst_object, dst_slot);
926 add(dst_slot, dst_slot, offset);
927 return;
928 }
929
930 DCHECK_EQ(dst_object, offset.rm());
931
932 // We only have `dst_slot` and `dst_object` left as distinct registers so we
933 // have to swap them. We write this as a add+sub sequence to avoid using a
934 // scratch register.
935 add(dst_slot, dst_slot, dst_object);
936 sub(dst_object, dst_slot, dst_object);
937}
938
939// The register 'object' contains a heap object pointer. The heap object tag is
940// shifted away. A scratch register also needs to be available.
941void MacroAssembler::RecordWrite(Register object, Operand offset,
942 Register value, LinkRegisterStatus lr_status,
943 SaveFPRegsMode fp_mode, SmiCheck smi_check) {
944 DCHECK(!AreAliased(object, value));
945 if (v8_flags.slow_debug_code) {
946 ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
947 UseScratchRegisterScope temps(this);
948 Register scratch = temps.Acquire();
949 DCHECK(!AreAliased(object, value, scratch));
950 add(scratch, object, offset);
951 ldr(scratch, MemOperand(scratch));
952 cmp(scratch, value);
953 Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
954 }
955
956 if (v8_flags.disable_write_barriers) {
957 return;
958 }
959
960 // First, check if a write barrier is even needed. The tests below
961 // catch stores of smis and stores into the young generation.
962 Label done;
963
964 if (smi_check == SmiCheck::kInline) {
965 JumpIfSmi(value, &done);
966 }
967
969 &done);
971 &done);
972
973 // Record the actual write.
974 if (lr_status == kLRHasNotBeenSaved) {
975 push(lr);
976 }
977
979 DCHECK(!AreAliased(object, value, slot_address));
980 DCHECK(!offset.IsRegister());
981 add(slot_address, object, offset);
982 CallRecordWriteStub(object, slot_address, fp_mode);
983 if (lr_status == kLRHasNotBeenSaved) {
984 pop(lr);
985 }
986
987 if (v8_flags.slow_debug_code) Move(slot_address, Operand(kZapValue));
988
989 bind(&done);
990}
991
992void MacroAssembler::PushCommonFrame(Register marker_reg) {
993 ASM_CODE_COMMENT(this);
994 if (marker_reg.is_valid()) {
995 if (marker_reg.code() > fp.code()) {
996 stm(db_w, sp, {fp, lr});
997 mov(fp, Operand(sp));
998 Push(marker_reg);
999 } else {
1000 stm(db_w, sp, {marker_reg, fp, lr});
1001 add(fp, sp, Operand(kPointerSize));
1002 }
1003 } else {
1004 stm(db_w, sp, {fp, lr});
1005 mov(fp, sp);
1006 }
1007}
1008
1009void MacroAssembler::PushStandardFrame(Register function_reg) {
1010 ASM_CODE_COMMENT(this);
1011 DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
1012 stm(db_w, sp, {function_reg, cp, fp, lr});
1014 offset += function_reg.is_valid() ? kPointerSize : 0;
1015 add(fp, sp, Operand(offset));
1017}
1018
1019void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
1020 const DwVfpRegister src,
1021 const Condition cond) {
1022 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
1023 // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
1024 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
1025 vsub(dst, src, kDoubleRegZero, cond);
1026}
1027
1028void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
1029 const SwVfpRegister src2,
1030 const Condition cond) {
1031 // Compare and move FPSCR flags to the normal condition flags.
1032 VFPCompareAndLoadFlags(src1, src2, pc, cond);
1033}
1034
1035void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
1036 const float src2,
1037 const Condition cond) {
1038 // Compare and move FPSCR flags to the normal condition flags.
1039 VFPCompareAndLoadFlags(src1, src2, pc, cond);
1040}
1041
1042void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
1043 const DwVfpRegister src2,
1044 const Condition cond) {
1045 // Compare and move FPSCR flags to the normal condition flags.
1046 VFPCompareAndLoadFlags(src1, src2, pc, cond);
1047}
1048
1049void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
1050 const double src2,
1051 const Condition cond) {
1052 // Compare and move FPSCR flags to the normal condition flags.
1053 VFPCompareAndLoadFlags(src1, src2, pc, cond);
1054}
1055
1056void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
1057 const SwVfpRegister src2,
1058 const Register fpscr_flags,
1059 const Condition cond) {
1060 // Compare and load FPSCR.
1061 vcmp(src1, src2, cond);
1062 vmrs(fpscr_flags, cond);
1063}
1064
1065void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
1066 const float src2,
1067 const Register fpscr_flags,
1068 const Condition cond) {
1069 // Compare and load FPSCR.
1070 vcmp(src1, src2, cond);
1071 vmrs(fpscr_flags, cond);
1072}
1073
1074void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
1075 const DwVfpRegister src2,
1076 const Register fpscr_flags,
1077 const Condition cond) {
1078 // Compare and load FPSCR.
1079 vcmp(src1, src2, cond);
1080 vmrs(fpscr_flags, cond);
1081}
1082
1083void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
1084 const double src2,
1085 const Register fpscr_flags,
1086 const Condition cond) {
1087 // Compare and load FPSCR.
1088 vcmp(src1, src2, cond);
1089 vmrs(fpscr_flags, cond);
1090}
1091
1092void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
1093 if (src.code() < 16) {
1094 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
1095 vmov(dst, loc.high());
1096 } else {
1097 vmov(NeonS32, dst, src, 1);
1098 }
1099}
1100
1101void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
1102 if (dst.code() < 16) {
1103 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
1104 vmov(loc.high(), src);
1105 } else {
1106 vmov(NeonS32, dst, 1, src);
1107 }
1108}
1109
1110void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
1111 if (src.code() < 16) {
1112 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
1113 vmov(dst, loc.low());
1114 } else {
1115 vmov(NeonS32, dst, src, 0);
1116 }
1117}
1118
1119void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
1120 if (dst.code() < 16) {
1121 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
1122 vmov(loc.low(), src);
1123 } else {
1124 vmov(NeonS32, dst, 0, src);
1125 }
1126}
1127
1128void MacroAssembler::VmovExtended(Register dst, int src_code) {
1131 if (src_code & 0x1) {
1132 VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
1133 } else {
1134 VmovLow(dst, DwVfpRegister::from_code(src_code / 2));
1135 }
1136}
1137
1138void MacroAssembler::VmovExtended(int dst_code, Register src) {
1141 if (dst_code & 0x1) {
1142 VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
1143 } else {
1144 VmovLow(DwVfpRegister::from_code(dst_code / 2), src);
1145 }
1146}
1147
1148void MacroAssembler::VmovExtended(int dst_code, int src_code) {
1149 if (src_code == dst_code) return;
1150
1151 if (src_code < SwVfpRegister::kNumRegisters &&
1152 dst_code < SwVfpRegister::kNumRegisters) {
1153 // src and dst are both s-registers.
1155 SwVfpRegister::from_code(src_code));
1156 return;
1157 }
1158 DwVfpRegister dst_d_reg = DwVfpRegister::from_code(dst_code / 2);
1159 DwVfpRegister src_d_reg = DwVfpRegister::from_code(src_code / 2);
1160 int dst_offset = dst_code & 1;
1161 int src_offset = src_code & 1;
1162 if (CpuFeatures::IsSupported(NEON)) {
1163 UseScratchRegisterScope temps(this);
1164 DwVfpRegister scratch = temps.AcquireD();
1165 // On Neon we can shift and insert from d-registers.
1166 if (src_offset == dst_offset) {
1167 // Offsets are the same, use vdup to copy the source to the opposite lane.
1168 vdup(Neon32, scratch, src_d_reg, src_offset);
1169 // Here we are extending the lifetime of scratch.
1170 src_d_reg = scratch;
1171 src_offset = dst_offset ^ 1;
1172 }
1173 if (dst_offset) {
1174 if (dst_d_reg == src_d_reg) {
1175 vdup(Neon32, dst_d_reg, src_d_reg, 0);
1176 } else {
1177 vsli(Neon64, dst_d_reg, src_d_reg, 32);
1178 }
1179 } else {
1180 if (dst_d_reg == src_d_reg) {
1181 vdup(Neon32, dst_d_reg, src_d_reg, 1);
1182 } else {
1183 vsri(Neon64, dst_d_reg, src_d_reg, 32);
1184 }
1185 }
1186 return;
1187 }
1188
1189 // Without Neon, use the scratch registers to move src and/or dst into
1190 // s-registers.
1191 UseScratchRegisterScope temps(this);
1192 LowDwVfpRegister d_scratch = temps.AcquireLowD();
1193 LowDwVfpRegister d_scratch2 = temps.AcquireLowD();
1194 int s_scratch_code = d_scratch.low().code();
1195 int s_scratch_code2 = d_scratch2.low().code();
1196 if (src_code < SwVfpRegister::kNumRegisters) {
1197 // src is an s-register, dst is not.
1198 vmov(d_scratch, dst_d_reg);
1199 vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
1200 SwVfpRegister::from_code(src_code));
1201 vmov(dst_d_reg, d_scratch);
1202 } else if (dst_code < SwVfpRegister::kNumRegisters) {
1203 // dst is an s-register, src is not.
1204 vmov(d_scratch, src_d_reg);
1206 SwVfpRegister::from_code(s_scratch_code + src_offset));
1207 } else {
1208 // Neither src or dst are s-registers. Both scratch double registers are
1209 // available when there are 32 VFP registers.
1210 vmov(d_scratch, src_d_reg);
1211 vmov(d_scratch2, dst_d_reg);
1212 vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
1213 SwVfpRegister::from_code(s_scratch_code2 + src_offset));
1214 vmov(dst_d_reg, d_scratch2);
1215 }
1216}
1217
1218void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src) {
1219 if (dst_code < SwVfpRegister::kNumRegisters) {
1220 vldr(SwVfpRegister::from_code(dst_code), src);
1221 } else {
1222 UseScratchRegisterScope temps(this);
1223 LowDwVfpRegister scratch = temps.AcquireLowD();
1224 // TODO(bbudge) If Neon supported, use load single lane form of vld1.
1225 int dst_s_code = scratch.low().code() + (dst_code & 1);
1226 vmov(scratch, DwVfpRegister::from_code(dst_code / 2));
1227 vldr(SwVfpRegister::from_code(dst_s_code), src);
1228 vmov(DwVfpRegister::from_code(dst_code / 2), scratch);
1229 }
1230}
1231
1232void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code) {
1233 if (src_code < SwVfpRegister::kNumRegisters) {
1234 vstr(SwVfpRegister::from_code(src_code), dst);
1235 } else {
1236 // TODO(bbudge) If Neon supported, use store single lane form of vst1.
1237 UseScratchRegisterScope temps(this);
1238 LowDwVfpRegister scratch = temps.AcquireLowD();
1239 int src_s_code = scratch.low().code() + (src_code & 1);
1240 vmov(scratch, DwVfpRegister::from_code(src_code / 2));
1241 vstr(SwVfpRegister::from_code(src_s_code), dst);
1242 }
1243}
1244
1245void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
1246 NeonDataType dt, int lane) {
1247 int size = NeonSz(dt); // 0, 1, 2
1248 int byte = lane << size;
1249 int double_word = byte >> kDoubleSizeLog2;
1250 int double_byte = byte & (kDoubleSize - 1);
1251 int double_lane = double_byte >> size;
1252 DwVfpRegister double_source =
1253 DwVfpRegister::from_code(src.code() * 2 + double_word);
1254 vmov(dt, dst, double_source, double_lane);
1255}
1256
1257void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src,
1258 NeonDataType dt, int lane) {
1259 int size = NeonSz(dt); // 0, 1, 2
1260 int byte = lane << size;
1261 int double_byte = byte & (kDoubleSize - 1);
1262 int double_lane = double_byte >> size;
1263 vmov(dt, dst, src, double_lane);
1264}
1265
1266void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
1267 int lane) {
1268 int s_code = src.code() * 4 + lane;
1269 VmovExtended(dst.code(), s_code);
1270}
1271
1272void MacroAssembler::ExtractLane(DwVfpRegister dst, QwNeonRegister src,
1273 int lane) {
1274 DwVfpRegister double_dst = DwVfpRegister::from_code(src.code() * 2 + lane);
1275 vmov(dst, double_dst);
1276}
1277
1278void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1279 Register src_lane, NeonDataType dt, int lane) {
1280 Move(dst, src);
1281 int size = NeonSz(dt); // 0, 1, 2
1282 int byte = lane << size;
1283 int double_word = byte >> kDoubleSizeLog2;
1284 int double_byte = byte & (kDoubleSize - 1);
1285 int double_lane = double_byte >> size;
1286 DwVfpRegister double_dst =
1287 DwVfpRegister::from_code(dst.code() * 2 + double_word);
1288 vmov(dt, double_dst, double_lane, src_lane);
1289}
1290
1291void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1292 SwVfpRegister src_lane, int lane) {
1293 Move(dst, src);
1294 int s_code = dst.code() * 4 + lane;
1295 VmovExtended(s_code, src_lane.code());
1296}
1297
1298void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1299 DwVfpRegister src_lane, int lane) {
1300 Move(dst, src);
1301 DwVfpRegister double_dst = DwVfpRegister::from_code(dst.code() * 2 + lane);
1302 vmov(double_dst, src_lane);
1303}
1304
1305void MacroAssembler::LoadLane(NeonSize sz, NeonListOperand dst_list,
1306 uint8_t lane, NeonMemOperand src) {
1307 if (sz == Neon64) {
1308 // vld1s is not valid for Neon64.
1309 vld1(Neon64, dst_list, src);
1310 } else {
1311 vld1s(sz, dst_list, lane, src);
1312 }
1313}
1314
1315void MacroAssembler::StoreLane(NeonSize sz, NeonListOperand src_list,
1316 uint8_t lane, NeonMemOperand dst) {
1317 if (sz == Neon64) {
1318 // vst1s is not valid for Neon64.
1319 vst1(Neon64, src_list, dst);
1320 } else {
1321 vst1s(sz, src_list, lane, dst);
1322 }
1323}
1324
1325void MacroAssembler::LslPair(Register dst_low, Register dst_high,
1326 Register src_low, Register src_high,
1327 Register shift) {
1328 DCHECK(!AreAliased(dst_high, src_low));
1329 DCHECK(!AreAliased(dst_high, shift));
1330 UseScratchRegisterScope temps(this);
1331 Register scratch = temps.Acquire();
1332
1333 Label less_than_32;
1334 Label done;
1335 rsb(scratch, shift, Operand(32), SetCC);
1336 b(gt, &less_than_32);
1337 // If shift >= 32
1338 and_(scratch, shift, Operand(0x1F));
1339 lsl(dst_high, src_low, Operand(scratch));
1340 mov(dst_low, Operand(0));
1341 jmp(&done);
1342 bind(&less_than_32);
1343 // If shift < 32
1344 lsl(dst_high, src_high, Operand(shift));
1345 orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
1346 lsl(dst_low, src_low, Operand(shift));
1347 bind(&done);
1348}
1349
1350void MacroAssembler::LslPair(Register dst_low, Register dst_high,
1351 Register src_low, Register src_high,
1352 uint32_t shift) {
1353 DCHECK_GE(63, shift);
1354 DCHECK(!AreAliased(dst_high, src_low));
1355
1356 if (shift == 0) {
1357 Move(dst_high, src_high);
1358 Move(dst_low, src_low);
1359 } else if (shift == 32) {
1360 Move(dst_high, src_low);
1361 Move(dst_low, Operand(0));
1362 } else if (shift >= 32) {
1363 shift &= 0x1F;
1364 lsl(dst_high, src_low, Operand(shift));
1365 mov(dst_low, Operand(0));
1366 } else {
1367 lsl(dst_high, src_high, Operand(shift));
1368 orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
1369 lsl(dst_low, src_low, Operand(shift));
1370 }
1371}
1372
1373void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
1374 Register src_low, Register src_high,
1375 Register shift) {
1376 DCHECK(!AreAliased(dst_low, src_high));
1377 DCHECK(!AreAliased(dst_low, shift));
1378 UseScratchRegisterScope temps(this);
1379 Register scratch = temps.Acquire();
1380
1381 Label less_than_32;
1382 Label done;
1383 rsb(scratch, shift, Operand(32), SetCC);
1384 b(gt, &less_than_32);
1385 // If shift >= 32
1386 and_(scratch, shift, Operand(0x1F));
1387 lsr(dst_low, src_high, Operand(scratch));
1388 mov(dst_high, Operand(0));
1389 jmp(&done);
1390 bind(&less_than_32);
1391 // If shift < 32
1392
1393 lsr(dst_low, src_low, Operand(shift));
1394 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1395 lsr(dst_high, src_high, Operand(shift));
1396 bind(&done);
1397}
1398
1399void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
1400 Register src_low, Register src_high,
1401 uint32_t shift) {
1402 DCHECK_GE(63, shift);
1403 DCHECK(!AreAliased(dst_low, src_high));
1404
1405 if (shift == 32) {
1406 mov(dst_low, src_high);
1407 mov(dst_high, Operand(0));
1408 } else if (shift > 32) {
1409 shift &= 0x1F;
1410 lsr(dst_low, src_high, Operand(shift));
1411 mov(dst_high, Operand(0));
1412 } else if (shift == 0) {
1413 Move(dst_low, src_low);
1414 Move(dst_high, src_high);
1415 } else {
1416 lsr(dst_low, src_low, Operand(shift));
1417 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1418 lsr(dst_high, src_high, Operand(shift));
1419 }
1420}
1421
1422void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
1423 Register src_low, Register src_high,
1424 Register shift) {
1425 DCHECK(!AreAliased(dst_low, src_high));
1426 DCHECK(!AreAliased(dst_low, shift));
1427 UseScratchRegisterScope temps(this);
1428 Register scratch = temps.Acquire();
1429
1430 Label less_than_32;
1431 Label done;
1432 rsb(scratch, shift, Operand(32), SetCC);
1433 b(gt, &less_than_32);
1434 // If shift >= 32
1435 and_(scratch, shift, Operand(0x1F));
1436 asr(dst_low, src_high, Operand(scratch));
1437 asr(dst_high, src_high, Operand(31));
1438 jmp(&done);
1439 bind(&less_than_32);
1440 // If shift < 32
1441 lsr(dst_low, src_low, Operand(shift));
1442 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1443 asr(dst_high, src_high, Operand(shift));
1444 bind(&done);
1445}
1446
1447void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
1448 Register src_low, Register src_high,
1449 uint32_t shift) {
1450 DCHECK_GE(63, shift);
1451 DCHECK(!AreAliased(dst_low, src_high));
1452
1453 if (shift == 32) {
1454 mov(dst_low, src_high);
1455 asr(dst_high, src_high, Operand(31));
1456 } else if (shift > 32) {
1457 shift &= 0x1F;
1458 asr(dst_low, src_high, Operand(shift));
1459 asr(dst_high, src_high, Operand(31));
1460 } else if (shift == 0) {
1461 Move(dst_low, src_low);
1462 Move(dst_high, src_high);
1463 } else {
1464 lsr(dst_low, src_low, Operand(shift));
1465 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1466 asr(dst_high, src_high, Operand(shift));
1467 }
1468}
1469
1471 ASM_CODE_COMMENT(this);
1472 UseScratchRegisterScope temps(this);
1473 Register scratch = temps.Acquire();
1474 mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1475 PushCommonFrame(scratch);
1476}
1477
1479
1480void MacroAssembler::DropArguments(Register count) {
1481 add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC);
1482}
1483
1485 Register receiver) {
1486 DCHECK(!AreAliased(argc, receiver));
1487 DropArguments(argc);
1488 push(receiver);
1489}
1490
1492 bool load_constant_pool_pointer_reg) {
1493 ASM_CODE_COMMENT(this);
1494 // r0-r3: preserved
1495 UseScratchRegisterScope temps(this);
1496 Register scratch = no_reg;
1497 if (!StackFrame::IsJavaScript(type)) {
1498 scratch = temps.Acquire();
1499 mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1500 }
1501 PushCommonFrame(scratch);
1502#if V8_ENABLE_WEBASSEMBLY
1503 if (type == StackFrame::WASM) Push(kWasmImplicitArgRegister);
1504#endif // V8_ENABLE_WEBASSEMBLY
1505}
1506
1508 ASM_CODE_COMMENT(this);
1509 // r0: preserved
1510 // r1: preserved
1511 // r2: preserved
1512
1513 // Drop the execution stack down to the frame pointer and restore
1514 // the caller frame pointer and return address.
1515 mov(sp, fp);
1516 int frame_ends = pc_offset();
1517 ldm(ia_w, sp, {fp, lr});
1518 return frame_ends;
1519}
1520
1521#ifdef V8_OS_WIN
1522void MacroAssembler::AllocateStackSpace(Register bytes_scratch) {
1523 // "Functions that allocate 4 KB or more on the stack must ensure that each
1524 // page prior to the final page is touched in order." Source:
1525 // https://docs.microsoft.com/en-us/cpp/build/overview-of-arm-abi-conventions?view=vs-2019#stack
1526 ASM_CODE_COMMENT(this);
1527 UseScratchRegisterScope temps(this);
1528 DwVfpRegister scratch = temps.AcquireD();
1529 Label check_offset;
1530 Label touch_next_page;
1531 jmp(&check_offset);
1532 bind(&touch_next_page);
1533 sub(sp, sp, Operand(kStackPageSize));
1534 // Just to touch the page, before we increment further.
1535 vldr(scratch, MemOperand(sp));
1536 sub(bytes_scratch, bytes_scratch, Operand(kStackPageSize));
1537
1539 cmp(bytes_scratch, Operand(kStackPageSize));
1540 b(gt, &touch_next_page);
1541
1542 sub(sp, sp, bytes_scratch);
1543}
1544
1545void MacroAssembler::AllocateStackSpace(int bytes) {
1546 ASM_CODE_COMMENT(this);
1547 DCHECK_GE(bytes, 0);
1548 UseScratchRegisterScope temps(this);
1549 DwVfpRegister scratch = no_dreg;
1550 while (bytes > kStackPageSize) {
1551 if (scratch == no_dreg) {
1552 scratch = temps.AcquireD();
1553 }
1554 sub(sp, sp, Operand(kStackPageSize));
1555 vldr(scratch, MemOperand(sp));
1556 bytes -= kStackPageSize;
1557 }
1558 if (bytes == 0) return;
1559 sub(sp, sp, Operand(bytes));
1560}
1561#endif
1562
1563void MacroAssembler::EnterExitFrame(Register scratch, int stack_space,
1564 StackFrame::Type frame_type) {
1565 ASM_CODE_COMMENT(this);
1566 DCHECK(frame_type == StackFrame::EXIT ||
1567 frame_type == StackFrame::BUILTIN_EXIT ||
1568 frame_type == StackFrame::API_ACCESSOR_EXIT ||
1569 frame_type == StackFrame::API_CALLBACK_EXIT);
1570
1571 using ER = ExternalReference;
1572
1573 // Set up the frame structure on the stack.
1577 mov(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
1578 PushCommonFrame(scratch);
1579 // Reserve room for saved entry sp.
1581 if (v8_flags.debug_code) {
1582 mov(scratch, Operand::Zero());
1584 }
1585
1586 // Save the frame pointer and the context in top.
1587 ER c_entry_fp_address =
1588 ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
1589 str(fp, ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
1590
1591 ER context_address = ER::Create(IsolateAddressId::kContextAddress, isolate());
1592 str(cp, ExternalReferenceAsOperand(context_address, no_reg));
1593
1594 // Reserve place for the return address and stack space and align the frame
1595 // preparing for calling the runtime function.
1596 AllocateStackSpace((stack_space + 1) * kPointerSize);
1598
1599 // Set the exit frame sp value to point just before the return address
1600 // location.
1601 add(scratch, sp, Operand(kPointerSize));
1603}
1604
1606#if V8_HOST_ARCH_ARM
1607 // Running on the real platform. Use the alignment as mandated by the local
1608 // environment.
1609 // Note: This will break if we ever start generating snapshots on one ARM
1610 // platform for another ARM platform with a different alignment.
1612#else // V8_HOST_ARCH_ARM
1613 // If we are using the simulator then we should always align to the expected
1614 // alignment. As the simulator is used to generate snapshots we do not know
1615 // if the target platform will need alignment, so this is controlled from a
1616 // flag.
1617 return v8_flags.sim_stack_alignment;
1618#endif // V8_HOST_ARCH_ARM
1619}
1620
1621void MacroAssembler::LeaveExitFrame(Register scratch) {
1622 ASM_CODE_COMMENT(this);
1623 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1624
1625 using ER = ExternalReference;
1626
1627 // Restore current context from top and clear it in debug mode.
1628 ER context_address = ER::Create(IsolateAddressId::kContextAddress, isolate());
1629 ldr(cp, ExternalReferenceAsOperand(context_address, no_reg));
1630#ifdef DEBUG
1631 mov(scratch, Operand(Context::kInvalidContext));
1632 str(scratch, ExternalReferenceAsOperand(context_address, no_reg));
1633#endif
1634
1635 // Clear the top frame.
1636 ER c_entry_fp_address =
1637 ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
1638 mov(scratch, Operand::Zero());
1639 str(scratch, ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
1640
1641 // Tear down the exit frame, pop the arguments, and return.
1642 mov(sp, Operand(fp));
1643 ldm(ia_w, sp, {fp, lr});
1644}
1645
1646void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1647 if (use_eabi_hardfloat()) {
1648 Move(dst, d0);
1649 } else {
1650 vmov(dst, r0, r1);
1651 }
1652}
1653
1654// On ARM this is just a synonym to make the purpose clear.
1655void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1656 MovFromFloatResult(dst);
1657}
1658
1660 ASM_CODE_COMMENT(this);
1664 : IsolateData::jslimit_offset();
1665 CHECK(is_int32(offset));
1667}
1668
1669void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
1670 Label* stack_overflow) {
1671 ASM_CODE_COMMENT(this);
1672 // Check the stack for overflow. We are not trying to catch
1673 // interruptions (e.g. debug break and preemption) here, so the "real stack
1674 // limit" is checked.
1676 // Make scratch the space we have left. The stack might already be overflowed
1677 // here which will cause scratch to become negative.
1678 sub(scratch, sp, scratch);
1679 // Check if the arguments will overflow the stack.
1680 cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
1681 b(le, stack_overflow); // Signed comparison.
1682}
1683
1684void MacroAssembler::InvokePrologue(Register expected_parameter_count,
1685 Register actual_parameter_count,
1686 InvokeType type) {
1687 ASM_CODE_COMMENT(this);
1688 Label regular_invoke;
1689 // r0: actual arguments count
1690 // r1: function (passed through to callee)
1691 // r2: expected arguments count
1692 DCHECK_EQ(actual_parameter_count, r0);
1693 DCHECK_EQ(expected_parameter_count, r2);
1694
1695 // If overapplication or if the actual argument count is equal to the
1696 // formal parameter count, no need to push extra undefined values.
1697 sub(expected_parameter_count, expected_parameter_count,
1698 actual_parameter_count, SetCC);
1699 b(le, &regular_invoke);
1700
1701 Label stack_overflow;
1702 Register scratch = r4;
1703 StackOverflowCheck(expected_parameter_count, scratch, &stack_overflow);
1704
1705 // Underapplication. Move the arguments already in the stack, including the
1706 // receiver and the return address.
1707 {
1708 Label copy, check;
1709 Register num = r5, src = r6, dest = r9; // r7 and r8 are context and root.
1710 mov(src, sp);
1711 // Update stack pointer.
1712 lsl(scratch, expected_parameter_count, Operand(kSystemPointerSizeLog2));
1713 AllocateStackSpace(scratch);
1714 mov(dest, sp);
1715 mov(num, actual_parameter_count);
1716 b(&check);
1717 bind(&copy);
1718 ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
1719 str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
1720 sub(num, num, Operand(1), SetCC);
1721 bind(&check);
1722 b(gt, &copy);
1723 }
1724
1725 // Fill remaining expected arguments with undefined values.
1726 LoadRoot(scratch, RootIndex::kUndefinedValue);
1727 {
1728 Label loop;
1729 bind(&loop);
1731 sub(expected_parameter_count, expected_parameter_count, Operand(1), SetCC);
1732 b(gt, &loop);
1733 }
1734 b(&regular_invoke);
1735
1736 bind(&stack_overflow);
1737 {
1738 FrameScope frame(
1739 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
1740 CallRuntime(Runtime::kThrowStackOverflow);
1741 bkpt(0);
1742 }
1743
1744 bind(&regular_invoke);
1745}
1746
1747void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
1748 Register expected_parameter_count,
1749 Register actual_parameter_count) {
1750 ASM_CODE_COMMENT(this);
1751 // Load receiver to pass it later to DebugOnFunctionCall hook.
1752 ldr(r4, ReceiverOperand());
1753 FrameScope frame(
1754 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
1755
1756 SmiTag(expected_parameter_count);
1757 Push(expected_parameter_count);
1758
1759 SmiTag(actual_parameter_count);
1760 Push(actual_parameter_count);
1761
1762 if (new_target.is_valid()) {
1764 }
1765 Push(fun);
1766 Push(fun);
1767 Push(r4);
1768 CallRuntime(Runtime::kDebugOnFunctionCall);
1769 Pop(fun);
1770 if (new_target.is_valid()) {
1771 Pop(new_target);
1772 }
1773
1774 Pop(actual_parameter_count);
1775 SmiUntag(actual_parameter_count);
1776
1777 Pop(expected_parameter_count);
1778 SmiUntag(expected_parameter_count);
1779}
1780
1781void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1782 Register expected_parameter_count,
1783 Register actual_parameter_count,
1784 InvokeType type) {
1785 ASM_CODE_COMMENT(this);
1786 // You can't call a function without a valid frame.
1788 DCHECK_EQ(function, r1);
1789 DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
1790
1791 // On function call, call into the debugger if necessary.
1792 Label debug_hook, continue_after_hook;
1793 {
1794 ExternalReference debug_hook_active =
1795 ExternalReference::debug_hook_on_function_call_address(isolate());
1796 Move(r4, debug_hook_active);
1797 ldrsb(r4, MemOperand(r4));
1798 cmp(r4, Operand(0));
1799 b(ne, &debug_hook);
1800 }
1801 bind(&continue_after_hook);
1802
1803 // Clear the new.target register if not given.
1804 if (!new_target.is_valid()) {
1805 LoadRoot(r3, RootIndex::kUndefinedValue);
1806 }
1807
1808 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
1809 // We call indirectly through the code field in the function to
1810 // allow recompilation to take effect without changing any of the
1811 // call sites.
1812 constexpr int unused_argument_count = 0;
1813 switch (type) {
1814 case InvokeType::kCall:
1815 CallJSFunction(function, unused_argument_count);
1816 break;
1817 case InvokeType::kJump:
1818 JumpJSFunction(function);
1819 break;
1820 }
1821 Label done;
1822 b(&done);
1823
1824 // Deferred debug hook.
1825 bind(&debug_hook);
1826 CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
1827 actual_parameter_count);
1828 b(&continue_after_hook);
1829
1830 // Continue here if InvokePrologue does handle the invocation due to
1831 // mismatched parameter counts.
1832 bind(&done);
1833}
1834
1836 Register fun, Register new_target, Register actual_parameter_count,
1837 InvokeType type) {
1838 ASM_CODE_COMMENT(this);
1839 // You can't call a function without a valid frame.
1841
1842 // Contract with called JS functions requires that function is passed in r1.
1843 DCHECK_EQ(fun, r1);
1844
1845 Register expected_reg = r2;
1846 Register temp_reg = r4;
1847
1848 ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1849 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1850 ldrh(expected_reg,
1851 FieldMemOperand(temp_reg,
1852 SharedFunctionInfo::kFormalParameterCountOffset));
1853
1854 InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
1855 type);
1856}
1857
1858void MacroAssembler::InvokeFunction(Register function,
1859 Register expected_parameter_count,
1860 Register actual_parameter_count,
1861 InvokeType type) {
1862 ASM_CODE_COMMENT(this);
1863 // You can't call a function without a valid frame.
1865
1866 // Contract with called JS functions requires that function is passed in r1.
1867 DCHECK_EQ(function, r1);
1868
1869 // Get the function and setup the context.
1870 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1871
1872 InvokeFunctionCode(r1, no_reg, expected_parameter_count,
1873 actual_parameter_count, type);
1874}
1875
1877 ASM_CODE_COMMENT(this);
1878 // Adjust this code if not the case.
1879 static_assert(StackHandlerConstants::kSize == 2 * kPointerSize);
1881
1882 Push(Smi::zero()); // Padding.
1883 // Link the current handler as the next handler.
1884 Move(r6,
1885 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1886 ldr(r5, MemOperand(r6));
1887 push(r5);
1888 // Set this new handler as the current one.
1889 str(sp, MemOperand(r6));
1890}
1891
1893 ASM_CODE_COMMENT(this);
1894 UseScratchRegisterScope temps(this);
1895 Register scratch = temps.Acquire();
1896 static_assert(StackHandlerConstants::kNextOffset == 0);
1897 pop(r1);
1898 Move(scratch,
1899 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1900 str(r1, MemOperand(scratch));
1901 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1902}
1903
1904void MacroAssembler::CompareObjectType(Register object, Register map,
1905 Register type_reg, InstanceType type) {
1906 ASM_CODE_COMMENT(this);
1907 UseScratchRegisterScope temps(this);
1908 const Register temp = type_reg == no_reg ? temps.Acquire() : type_reg;
1909
1910 LoadMap(map, object);
1911 CompareInstanceType(map, temp, type);
1912}
1913
1914void MacroAssembler::CompareObjectTypeRange(Register object, Register map,
1915 Register type_reg, Register scratch,
1916 InstanceType lower_limit,
1917 InstanceType upper_limit) {
1918 ASM_CODE_COMMENT(this);
1919 LoadMap(map, object);
1920 CompareInstanceTypeRange(map, type_reg, scratch, lower_limit, upper_limit);
1921}
1922
1923void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1924 InstanceType type) {
1925 ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1926 cmp(type_reg, Operand(type));
1927}
1928
1929void MacroAssembler::CompareRange(Register value, Register scratch,
1930 unsigned lower_limit, unsigned higher_limit) {
1931 ASM_CODE_COMMENT(this);
1932 DCHECK_LT(lower_limit, higher_limit);
1933 if (lower_limit != 0) {
1934 sub(scratch, value, Operand(lower_limit));
1935 cmp(scratch, Operand(higher_limit - lower_limit));
1936 } else {
1937 cmp(value, Operand(higher_limit));
1938 }
1939}
1940void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
1941 Register scratch,
1942 InstanceType lower_limit,
1943 InstanceType higher_limit) {
1944 ASM_CODE_COMMENT(this);
1945 DCHECK_LT(lower_limit, higher_limit);
1946 ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1947 CompareRange(type_reg, scratch, lower_limit, higher_limit);
1948}
1949
1950void MacroAssembler::CompareTaggedRoot(Register obj, RootIndex index) {
1951 CompareRoot(obj, index);
1952}
1953
1954void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
1955 UseScratchRegisterScope temps(this);
1956 Register scratch = temps.Acquire();
1957 DCHECK(obj != scratch);
1958 LoadRoot(scratch, index);
1959 cmp(obj, scratch);
1960}
1961
1962void MacroAssembler::JumpIfIsInRange(Register value, Register scratch,
1963 unsigned lower_limit,
1964 unsigned higher_limit,
1965 Label* on_in_range) {
1966 ASM_CODE_COMMENT(this);
1967 CompareRange(value, scratch, lower_limit, higher_limit);
1968 b(ls, on_in_range);
1969}
1970
1972 DwVfpRegister double_input,
1973 Label* done) {
1974 ASM_CODE_COMMENT(this);
1975 UseScratchRegisterScope temps(this);
1976 SwVfpRegister single_scratch = SwVfpRegister::no_reg();
1977 if (temps.CanAcquireVfp<SwVfpRegister>()) {
1978 single_scratch = temps.AcquireS();
1979 } else {
1980 // Reuse the input as a scratch register. However, we can only do this if
1981 // the input register is d0-d15 as there are no s32+ registers.
1982 DCHECK_LT(double_input.code(), LowDwVfpRegister::kNumRegisters);
1983 LowDwVfpRegister double_scratch =
1984 LowDwVfpRegister::from_code(double_input.code());
1985 single_scratch = double_scratch.low();
1986 }
1987 vcvt_s32_f64(single_scratch, double_input);
1988 vmov(result, single_scratch);
1989
1990 Register scratch = temps.Acquire();
1991 // If result is not saturated (0x7FFFFFFF or 0x80000000), we are done.
1992 sub(scratch, result, Operand(1));
1993 cmp(scratch, Operand(0x7FFFFFFE));
1994 b(lt, done);
1995}
1996
1997void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1998 Register result,
1999 DwVfpRegister double_input,
2000 StubCallMode stub_mode) {
2001 ASM_CODE_COMMENT(this);
2002 Label done;
2003
2004 TryInlineTruncateDoubleToI(result, double_input, &done);
2005
2006 // If we fell through then inline version didn't succeed - call stub instead.
2007 push(lr);
2008 AllocateStackSpace(kDoubleSize); // Put input on stack.
2009 vstr(double_input, MemOperand(sp, 0));
2010
2011#if V8_ENABLE_WEBASSEMBLY
2012 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
2013 Call(static_cast<Address>(Builtin::kDoubleToI), RelocInfo::WASM_STUB_CALL);
2014#else
2015 // For balance.
2016 if (false) {
2017#endif // V8_ENABLE_WEBASSEMBLY
2018 } else {
2019 CallBuiltin(Builtin::kDoubleToI);
2020 }
2021 ldr(result, MemOperand(sp, 0));
2022
2023 add(sp, sp, Operand(kDoubleSize));
2024 pop(lr);
2025
2026 bind(&done);
2027}
2028
2029namespace {
2030
2031#ifndef V8_ENABLE_LEAPTIERING
2032
2033void TailCallOptimizedCodeSlot(MacroAssembler* masm,
2034 Register optimized_code_entry,
2035 Register scratch) {
2036 // ----------- S t a t e -------------
2037 // -- r0 : actual argument count
2038 // -- r3 : new target (preserved for callee if needed, and caller)
2039 // -- r1 : target function (preserved for callee if needed, and caller)
2040 // -----------------------------------
2041 DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch));
2042
2043 Register closure = r1;
2044 Label heal_optimized_code_slot;
2045
2046 // If the optimized code is cleared, go to runtime to update the optimization
2047 // marker field.
2048 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
2049 &heal_optimized_code_slot);
2050
2051 // The entry references a CodeWrapper object. Unwrap it now.
2052 __ ldr(optimized_code_entry,
2053 FieldMemOperand(optimized_code_entry, CodeWrapper::kCodeOffset));
2054
2055 // Check if the optimized code is marked for deopt. If it is, call the
2056 // runtime to clear it.
2057 {
2058 UseScratchRegisterScope temps(masm);
2059 __ TestCodeIsMarkedForDeoptimization(optimized_code_entry, temps.Acquire());
2060 __ b(ne, &heal_optimized_code_slot);
2061 }
2062
2063 // Optimized code is good, get it into the closure and link the closure
2064 // into the optimized functions list, then tail call the optimized code.
2065 __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure);
2066 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
2067 __ LoadCodeInstructionStart(r2, optimized_code_entry);
2068 __ Jump(r2);
2069
2070 // Optimized code slot contains deoptimized code or code is cleared and
2071 // optimized code marker isn't updated. Evict the code, update the marker
2072 // and re-enter the closure's code.
2073 __ bind(&heal_optimized_code_slot);
2074 __ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
2075}
2076
2077#endif // V8_ENABLE_LEAPTIERING
2078
2079} // namespace
2080
2081#ifdef V8_ENABLE_DEBUG_CODE
2082void MacroAssembler::AssertFeedbackCell(Register object, Register scratch) {
2083 if (v8_flags.debug_code) {
2084 CompareObjectType(object, scratch, scratch, FEEDBACK_CELL_TYPE);
2085 Assert(eq, AbortReason::kExpectedFeedbackCell);
2086 }
2087}
2088void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
2089 if (v8_flags.debug_code) {
2090 CompareObjectType(object, scratch, scratch, FEEDBACK_VECTOR_TYPE);
2091 Assert(eq, AbortReason::kExpectedFeedbackVector);
2092 }
2093}
2094#endif // V8_ENABLE_DEBUG_CODE
2095
2097 Register optimized_code, Register closure) {
2098 ASM_CODE_COMMENT(this);
2099#ifdef V8_ENABLE_LEAPTIERING
2100 UNREACHABLE();
2101#else
2102 DCHECK(!AreAliased(optimized_code, closure));
2103 // Store code entry in the closure.
2104 str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
2105 RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
2108#endif // V8_ENABLE_LEAPTIERING
2109}
2110
2112 Runtime::FunctionId function_id) {
2113 // ----------- S t a t e -------------
2114 // -- r0 : actual argument count
2115 // -- r1 : target function (preserved for callee)
2116 // -- r3 : new target (preserved for callee)
2117 // -----------------------------------
2118 {
2119 FrameAndConstantPoolScope scope(this, StackFrame::INTERNAL);
2120 // Push a copy of the target function, the new target and the actual
2121 // argument count.
2122 // Push function as parameter to the runtime call.
2126
2127 CallRuntime(function_id, 1);
2128 mov(r2, r0);
2129
2130 // Restore target function, new target and actual argument count.
2134 }
2135 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
2136 JumpCodeObject(r2);
2137}
2138
2139#ifndef V8_ENABLE_LEAPTIERING
2140
2141// Read off the flags in the feedback vector and check if there
2142// is optimized code or a tiering state that needs to be processed.
2144 Register flags, Register feedback_vector, CodeKind current_code_kind) {
2145 ASM_CODE_COMMENT(this);
2146 DCHECK(!AreAliased(flags, feedback_vector));
2147 DCHECK(CodeKindCanTierUp(current_code_kind));
2148 ldrh(flags, FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
2152 if (current_code_kind != CodeKind::MAGLEV) {
2154 }
2155 tst(flags, Operand(kFlagsMask));
2156 return ne;
2157}
2158
2160 Register flags, Register feedback_vector, CodeKind current_code_kind,
2161 Label* flags_need_processing) {
2162 ASM_CODE_COMMENT(this);
2164 current_code_kind),
2165 flags_need_processing);
2166}
2167
2169 Register flags, Register feedback_vector) {
2170 ASM_CODE_COMMENT(this);
2171 DCHECK(!AreAliased(flags, feedback_vector));
2172 Label maybe_has_optimized_code, maybe_needs_logging;
2173 // Check if optimized code is available.
2175 b(eq, &maybe_needs_logging);
2176 GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
2177
2178 bind(&maybe_needs_logging);
2179 tst(flags, Operand(FeedbackVector::LogNextExecutionBit::kMask));
2180 b(eq, &maybe_has_optimized_code);
2181 GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution);
2182
2183 bind(&maybe_has_optimized_code);
2184 Register optimized_code_entry = flags;
2185 ldr(optimized_code_entry,
2186 FieldMemOperand(feedback_vector,
2187 FeedbackVector::kMaybeOptimizedCodeOffset));
2188 TailCallOptimizedCodeSlot(this, optimized_code_entry, r6);
2189}
2190
2191#endif // !V8_ENABLE_LEAPTIERING
2192
2193void MacroAssembler::CallRuntime(const Runtime::Function* f,
2194 int num_arguments) {
2195 ASM_CODE_COMMENT(this);
2196 // All parameters are on the stack. r0 has the return value after call.
2197
2198 // If the expected number of arguments of the runtime function is
2199 // constant, we check that the actual number of arguments match the
2200 // expectation.
2201 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2202
2203 // TODO(1236192): Most runtime routines don't need the number of
2204 // arguments passed in because it is constant. At some point we
2205 // should remove this need and make the runtime routine entry code
2206 // smarter.
2207 mov(r0, Operand(num_arguments));
2209 bool switch_to_central_stack = options().is_wasm;
2210 CallBuiltin(Builtins::RuntimeCEntry(f->result_size, switch_to_central_stack));
2211}
2212
2214 ASM_CODE_COMMENT(this);
2215 const Runtime::Function* function = Runtime::FunctionForId(fid);
2216 DCHECK_EQ(1, function->result_size);
2217 if (function->nargs >= 0) {
2218 // TODO(1236192): Most runtime routines don't need the number of
2219 // arguments passed in because it is constant. At some point we
2220 // should remove this need and make the runtime routine entry code
2221 // smarter.
2222 mov(r0, Operand(function->nargs));
2223 }
2225}
2226
2227void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
2228 bool builtin_exit_frame) {
2229#if defined(__thumb__)
2230 // Thumb mode builtin.
2231 DCHECK_EQ(builtin.address() & 1, 1);
2232#endif
2233 Move(r1, builtin);
2234 TailCallBuiltin(Builtins::CEntry(1, ArgvMode::kStack, builtin_exit_frame));
2235}
2236
2237void MacroAssembler::LoadWeakValue(Register out, Register in,
2238 Label* target_if_cleared) {
2239 cmp(in, Operand(kClearedWeakHeapObjectLower32));
2240 b(eq, target_if_cleared);
2241
2242 and_(out, in, Operand(~kWeakHeapObjectMask));
2243}
2244
2245void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
2246 Register scratch1,
2247 Register scratch2) {
2248 DCHECK_GT(value, 0);
2249 if (v8_flags.native_code_counters && counter->Enabled()) {
2250 ASM_CODE_COMMENT(this);
2251 Move(scratch2, ExternalReference::Create(counter));
2252 ldr(scratch1, MemOperand(scratch2));
2253 add(scratch1, scratch1, Operand(value));
2254 str(scratch1, MemOperand(scratch2));
2255 }
2256}
2257
2258void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
2259 Register scratch1,
2260 Register scratch2) {
2261 DCHECK_GT(value, 0);
2262 if (v8_flags.native_code_counters && counter->Enabled()) {
2263 ASM_CODE_COMMENT(this);
2264 Move(scratch2, ExternalReference::Create(counter));
2265 ldr(scratch1, MemOperand(scratch2));
2266 sub(scratch1, scratch1, Operand(value));
2267 str(scratch1, MemOperand(scratch2));
2268 }
2269}
2270
2271#ifdef V8_ENABLE_DEBUG_CODE
2272void MacroAssembler::Assert(Condition cond, AbortReason reason) {
2273 if (v8_flags.debug_code) Check(cond, reason);
2274}
2275
2277 if (v8_flags.debug_code) Abort(reason);
2278}
2279
2280void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
2281 if (!v8_flags.debug_code) return;
2282 ASM_CODE_COMMENT(this);
2283 static_assert(kSmiTag == 0);
2284 tst(object, Operand(kSmiTagMask));
2285 Check(ne, reason);
2286}
2287
2288void MacroAssembler::AssertSmi(Register object, AbortReason reason) {
2289 if (!v8_flags.debug_code) return;
2290 ASM_CODE_COMMENT(this);
2291 static_assert(kSmiTag == 0);
2292 tst(object, Operand(kSmiTagMask));
2293 Check(eq, reason);
2294}
2295
2296void MacroAssembler::AssertMap(Register object) {
2297 if (!v8_flags.debug_code) return;
2298 ASM_CODE_COMMENT(this);
2299 AssertNotSmi(object, AbortReason::kOperandIsNotAMap);
2300
2301 UseScratchRegisterScope temps(this);
2302 Register temp = temps.Acquire();
2303
2304 CompareObjectType(object, temp, temp, MAP_TYPE);
2305 Check(eq, AbortReason::kOperandIsNotAMap);
2306}
2307
2308void MacroAssembler::AssertConstructor(Register object) {
2309 if (!v8_flags.debug_code) return;
2310 ASM_CODE_COMMENT(this);
2311 static_assert(kSmiTag == 0);
2312 tst(object, Operand(kSmiTagMask));
2313 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
2314 push(object);
2315 LoadMap(object, object);
2316 ldrb(object, FieldMemOperand(object, Map::kBitFieldOffset));
2317 tst(object, Operand(Map::Bits1::IsConstructorBit::kMask));
2318 pop(object);
2319 Check(ne, AbortReason::kOperandIsNotAConstructor);
2320}
2321
2322void MacroAssembler::AssertFunction(Register object) {
2323 if (!v8_flags.debug_code) return;
2324 ASM_CODE_COMMENT(this);
2325 static_assert(kSmiTag == 0);
2326 tst(object, Operand(kSmiTagMask));
2327 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
2328 push(object);
2329 LoadMap(object, object);
2330 CompareInstanceTypeRange(object, object, object, FIRST_JS_FUNCTION_TYPE,
2331 LAST_JS_FUNCTION_TYPE);
2332 pop(object);
2333 Check(ls, AbortReason::kOperandIsNotAFunction);
2334}
2335
2336void MacroAssembler::AssertCallableFunction(Register object) {
2337 if (!v8_flags.debug_code) return;
2338 ASM_CODE_COMMENT(this);
2339 static_assert(kSmiTag == 0);
2340 tst(object, Operand(kSmiTagMask));
2341 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
2342 push(object);
2343 LoadMap(object, object);
2344 CompareInstanceTypeRange(object, object, object,
2347 pop(object);
2348 Check(ls, AbortReason::kOperandIsNotACallableFunction);
2349}
2350
2351void MacroAssembler::AssertBoundFunction(Register object) {
2352 if (!v8_flags.debug_code) return;
2353 ASM_CODE_COMMENT(this);
2354 static_assert(kSmiTag == 0);
2355 tst(object, Operand(kSmiTagMask));
2356 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
2357 push(object);
2358 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2359 pop(object);
2360 Check(eq, AbortReason::kOperandIsNotABoundFunction);
2361}
2362
2363void MacroAssembler::AssertGeneratorObject(Register object) {
2364 if (!v8_flags.debug_code) return;
2365 ASM_CODE_COMMENT(this);
2366 tst(object, Operand(kSmiTagMask));
2367 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
2368
2369 // Load map
2370 Register map = object;
2371 push(object);
2372 LoadMap(map, object);
2373
2374 // Check if JSGeneratorObject
2375 Register scratch = object;
2376 CompareInstanceTypeRange(map, scratch, scratch,
2377 FIRST_JS_GENERATOR_OBJECT_TYPE,
2378 LAST_JS_GENERATOR_OBJECT_TYPE);
2379 // Restore generator object to register and perform assertion
2380 pop(object);
2381 Check(ls, AbortReason::kOperandIsNotAGeneratorObject);
2382}
2383
2385 Register scratch) {
2386 if (!v8_flags.debug_code) return;
2387 ASM_CODE_COMMENT(this);
2388 Label done_checking;
2389 AssertNotSmi(object);
2390 CompareRoot(object, RootIndex::kUndefinedValue);
2391 b(eq, &done_checking);
2392 LoadMap(scratch, object);
2393 CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
2394 Assert(eq, AbortReason::kExpectedUndefinedOrCell);
2395 bind(&done_checking);
2396}
2397
2398void MacroAssembler::AssertJSAny(Register object, Register map_tmp,
2399 Register tmp, AbortReason abort_reason) {
2400 if (!v8_flags.debug_code) return;
2401
2402 ASM_CODE_COMMENT(this);
2403 DCHECK(!AreAliased(object, map_tmp, tmp));
2404 Label ok;
2405
2406 JumpIfSmi(object, &ok);
2407
2408 LoadMap(map_tmp, object);
2409 CompareInstanceType(map_tmp, tmp, LAST_NAME_TYPE);
2411
2412 CompareInstanceType(map_tmp, tmp, FIRST_JS_RECEIVER_TYPE);
2414
2415 CompareRoot(map_tmp, RootIndex::kHeapNumberMap);
2416 b(kEqual, &ok);
2417
2418 CompareRoot(map_tmp, RootIndex::kBigIntMap);
2419 b(kEqual, &ok);
2420
2421 CompareRoot(object, RootIndex::kUndefinedValue);
2422 b(kEqual, &ok);
2423
2424 CompareRoot(object, RootIndex::kTrueValue);
2425 b(kEqual, &ok);
2426
2427 CompareRoot(object, RootIndex::kFalseValue);
2428 b(kEqual, &ok);
2429
2430 CompareRoot(object, RootIndex::kNullValue);
2431 b(kEqual, &ok);
2432
2433 Abort(abort_reason);
2434
2435 bind(&ok);
2436}
2437
2438#endif // V8_ENABLE_DEBUG_CODE
2439
2440void MacroAssembler::Check(Condition cond, AbortReason reason) {
2441 Label L;
2442 b(cond, &L);
2443 Abort(reason);
2444 // will not return here
2445 bind(&L);
2446}
2447
2449 ASM_CODE_COMMENT(this);
2450 if (v8_flags.code_comments) {
2451 RecordComment("Abort message:", SourceLocation{});
2452 RecordComment(GetAbortReason(reason), SourceLocation{});
2453 }
2454
2455 // Without debug code, save the code size and just trap.
2456 if (!v8_flags.debug_code || v8_flags.trap_on_abort) {
2457 stop();
2458 return;
2459 }
2460
2461 if (should_abort_hard()) {
2462 // We don't care if we constructed a frame. Just pretend we did.
2463 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
2464 Move32BitImmediate(r0, Operand(static_cast<int>(reason)));
2465 PrepareCallCFunction(1, 0, r1);
2466 Move(r1, ExternalReference::abort_with_reason());
2467 // Use Call directly to avoid any unneeded overhead. The function won't
2468 // return anyway.
2469 Call(r1);
2470 return;
2471 }
2472
2473 Move(r1, Smi::FromInt(static_cast<int>(reason)));
2474
2475 {
2476 // We don't actually want to generate a pile of code for this, so just
2477 // claim there is a stack frame, without generating one.
2478 FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
2479 if (root_array_available()) {
2480 // Generate an indirect call via builtins entry table here in order to
2481 // ensure that the interpreter_entry_return_pc_offset is the same for
2482 // InterpreterEntryTrampoline and InterpreterEntryTrampolineForProfiling
2483 // when v8_flags.debug_code is enabled.
2484 LoadEntryFromBuiltin(Builtin::kAbort, ip);
2485 Call(ip);
2486 } else {
2487 CallBuiltin(Builtin::kAbort);
2488 }
2489 }
2490 // will not return here
2491}
2492
2493void MacroAssembler::LoadMap(Register destination, Register object) {
2495}
2496
2497void MacroAssembler::LoadFeedbackVector(Register dst, Register closure,
2498 Register scratch, Label* fbv_undef) {
2499 Label done;
2500
2501 // Load the feedback vector from the closure.
2502 ldr(dst, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
2503 ldr(dst, FieldMemOperand(dst, FeedbackCell::kValueOffset));
2504
2505 // Check if feedback vector is valid.
2507 ldrh(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2508 cmp(scratch, Operand(FEEDBACK_VECTOR_TYPE));
2509 b(eq, &done);
2510
2511 // Not valid, load undefined.
2512 LoadRoot(dst, RootIndex::kUndefinedValue);
2513 b(fbv_undef);
2514
2515 bind(&done);
2516}
2517
2518void MacroAssembler::LoadGlobalProxy(Register dst) {
2519 ASM_CODE_COMMENT(this);
2520 LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
2521}
2522
2523void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
2524 ASM_CODE_COMMENT(this);
2525 LoadMap(dst, cp);
2526 ldr(dst, FieldMemOperand(
2527 dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
2528 ldr(dst, MemOperand(dst, Context::SlotOffset(index)));
2529}
2530
2532 ASM_CODE_COMMENT(this);
2533 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
2534 mov(kRootRegister, Operand(isolate_root));
2535}
2536
2537void MacroAssembler::SmiTag(Register reg, SBit s) {
2538 add(reg, reg, Operand(reg), s);
2539}
2540
2541void MacroAssembler::SmiTag(Register dst, Register src, SBit s) {
2542 add(dst, src, Operand(src), s);
2543}
2544
2545void MacroAssembler::SmiTst(Register value) {
2546 tst(value, Operand(kSmiTagMask));
2547}
2548
2549void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) {
2550 tst(value, Operand(kSmiTagMask));
2551 b(eq, smi_label);
2552}
2553
2554void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
2555 cmp(x, Operand(y));
2556 b(eq, dest);
2557}
2558
2559void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
2560 cmp(x, Operand(y));
2561 b(lt, dest);
2562}
2563
2564void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
2565 tst(value, Operand(kSmiTagMask));
2566 b(ne, not_smi_label);
2567}
2568
2569void MacroAssembler::CheckFor32DRegs(Register scratch) {
2570 ASM_CODE_COMMENT(this);
2571 Move(scratch, ExternalReference::cpu_features());
2572 ldr(scratch, MemOperand(scratch));
2573 tst(scratch, Operand(1u << VFP32DREGS));
2574}
2575
2576void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
2577 ASM_CODE_COMMENT(this);
2578 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2579 CheckFor32DRegs(scratch);
2580 vstm(db_w, location, d16, d31, ne);
2581 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2582 vstm(db_w, location, d0, d15);
2583}
2584
2585void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
2586 ASM_CODE_COMMENT(this);
2587 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2588 CheckFor32DRegs(scratch);
2589 vldm(ia_w, location, d0, d15);
2590 vldm(ia_w, location, d16, d31, ne);
2591 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2592}
2593
2594void MacroAssembler::SaveFPRegsToHeap(Register location, Register scratch) {
2595 ASM_CODE_COMMENT(this);
2596 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2597 CheckFor32DRegs(scratch);
2598 vstm(ia_w, location, d0, d15);
2599 vstm(ia_w, location, d16, d31, ne);
2600 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2601}
2602
2603void MacroAssembler::RestoreFPRegsFromHeap(Register location,
2604 Register scratch) {
2605 ASM_CODE_COMMENT(this);
2606 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2607 CheckFor32DRegs(scratch);
2608 vldm(ia_w, location, d0, d15);
2609 vldm(ia_w, location, d16, d31, ne);
2610 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2611}
2612
2613template <typename T>
2614void MacroAssembler::FloatMaxHelper(T result, T left, T right,
2615 Label* out_of_line) {
2616 // This trivial case is caught sooner, so that the out-of-line code can be
2617 // completely avoided.
2618 DCHECK(left != right);
2619
2620 if (CpuFeatures::IsSupported(ARMv8)) {
2621 CpuFeatureScope scope(this, ARMv8);
2622 VFPCompareAndSetFlags(left, right);
2623 b(vs, out_of_line);
2624 vmaxnm(result, left, right);
2625 } else {
2626 Label done;
2627 VFPCompareAndSetFlags(left, right);
2628 b(vs, out_of_line);
2629 // Avoid a conditional instruction if the result register is unique.
2630 bool aliased_result_reg = result == left || result == right;
2631 Move(result, right, aliased_result_reg ? mi : al);
2632 Move(result, left, gt);
2633 b(ne, &done);
2634 // Left and right are equal, but check for +/-0.
2635 VFPCompareAndSetFlags(left, 0.0);
2636 b(eq, out_of_line);
2637 // The arguments are equal and not zero, so it doesn't matter which input we
2638 // pick. We have already moved one input into the result (if it didn't
2639 // already alias) so there's nothing more to do.
2640 bind(&done);
2641 }
2642}
2643
2644template <typename T>
2645void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
2646 DCHECK(left != right);
2647
2648 // ARMv8: At least one of left and right is a NaN.
2649 // Anything else: At least one of left and right is a NaN, or both left and
2650 // right are zeroes with unknown sign.
2651
2652 // If left and right are +/-0, select the one with the most positive sign.
2653 // If left or right are NaN, vadd propagates the appropriate one.
2654 vadd(result, left, right);
2655}
2656
2657template <typename T>
2658void MacroAssembler::FloatMinHelper(T result, T left, T right,
2659 Label* out_of_line) {
2660 // This trivial case is caught sooner, so that the out-of-line code can be
2661 // completely avoided.
2662 DCHECK(left != right);
2663
2664 if (CpuFeatures::IsSupported(ARMv8)) {
2665 CpuFeatureScope scope(this, ARMv8);
2666 VFPCompareAndSetFlags(left, right);
2667 b(vs, out_of_line);
2668 vminnm(result, left, right);
2669 } else {
2670 Label done;
2671 VFPCompareAndSetFlags(left, right);
2672 b(vs, out_of_line);
2673 // Avoid a conditional instruction if the result register is unique.
2674 bool aliased_result_reg = result == left || result == right;
2675 Move(result, left, aliased_result_reg ? mi : al);
2676 Move(result, right, gt);
2677 b(ne, &done);
2678 // Left and right are equal, but check for +/-0.
2679 VFPCompareAndSetFlags(left, 0.0);
2680 // If the arguments are equal and not zero, it doesn't matter which input we
2681 // pick. We have already moved one input into the result (if it didn't
2682 // already alias) so there's nothing more to do.
2683 b(ne, &done);
2684 // At this point, both left and right are either 0 or -0.
2685 // We could use a single 'vorr' instruction here if we had NEON support.
2686 // The algorithm used is -((-L) + (-R)), which is most efficiently expressed
2687 // as -((-L) - R).
2688 if (left == result) {
2689 DCHECK(right != result);
2690 vneg(result, left);
2691 vsub(result, result, right);
2692 vneg(result, result);
2693 } else {
2694 DCHECK(left != result);
2695 vneg(result, right);
2696 vsub(result, result, left);
2697 vneg(result, result);
2698 }
2699 bind(&done);
2700 }
2701}
2702
2703template <typename T>
2704void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
2705 DCHECK(left != right);
2706
2707 // At least one of left and right is a NaN. Use vadd to propagate the NaN
2708 // appropriately. +/-0 is handled inline.
2709 vadd(result, left, right);
2710}
2711
2712void MacroAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
2713 SwVfpRegister right, Label* out_of_line) {
2714 FloatMaxHelper(result, left, right, out_of_line);
2715}
2716
2717void MacroAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
2718 SwVfpRegister right, Label* out_of_line) {
2719 FloatMinHelper(result, left, right, out_of_line);
2720}
2721
2722void MacroAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
2723 DwVfpRegister right, Label* out_of_line) {
2724 FloatMaxHelper(result, left, right, out_of_line);
2725}
2726
2727void MacroAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
2728 DwVfpRegister right, Label* out_of_line) {
2729 FloatMinHelper(result, left, right, out_of_line);
2730}
2731
2732void MacroAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
2733 SwVfpRegister right) {
2734 FloatMaxOutOfLineHelper(result, left, right);
2735}
2736
2737void MacroAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
2738 SwVfpRegister right) {
2739 FloatMinOutOfLineHelper(result, left, right);
2740}
2741
2742void MacroAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
2743 DwVfpRegister right) {
2744 FloatMaxOutOfLineHelper(result, left, right);
2745}
2746
2747void MacroAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
2748 DwVfpRegister right) {
2749 FloatMinOutOfLineHelper(result, left, right);
2750}
2751
2752int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
2753 int num_double_arguments) {
2754 int stack_passed_words = 0;
2755 if (use_eabi_hardfloat()) {
2756 // In the hard floating point calling convention, we can use the first 8
2757 // registers to pass doubles.
2758 if (num_double_arguments > kDoubleRegisterPassedArguments) {
2759 stack_passed_words +=
2760 2 * (num_double_arguments - kDoubleRegisterPassedArguments);
2761 }
2762 } else {
2763 // In the soft floating point calling convention, every double
2764 // argument is passed using two registers.
2765 num_reg_arguments += 2 * num_double_arguments;
2766 }
2767 // Up to four simple arguments are passed in registers r0..r3.
2768 if (num_reg_arguments > kRegisterPassedArguments) {
2769 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2770 }
2771 return stack_passed_words;
2772}
2773
2774void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2775 int num_double_arguments,
2776 Register scratch) {
2777 ASM_CODE_COMMENT(this);
2778 int frame_alignment = ActivationFrameAlignment();
2779 int stack_passed_arguments =
2780 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2781 if (frame_alignment > kPointerSize) {
2782 UseScratchRegisterScope temps(this);
2783 if (!scratch.is_valid()) scratch = temps.Acquire();
2784 // Make stack end at alignment and make room for num_arguments - 4 words
2785 // and the original value of sp.
2786 mov(scratch, sp);
2787 AllocateStackSpace((stack_passed_arguments + 1) * kPointerSize);
2789 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
2790 } else if (stack_passed_arguments > 0) {
2791 AllocateStackSpace(stack_passed_arguments * kPointerSize);
2792 }
2793}
2794
2795void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
2796 DCHECK(src == d0);
2797 if (!use_eabi_hardfloat()) {
2798 vmov(r0, r1, src);
2799 }
2800}
2801
2802// On ARM this is just a synonym to make the purpose clear.
2803void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
2805}
2806
2807void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
2808 DwVfpRegister src2) {
2809 DCHECK(src1 == d0);
2810 DCHECK(src2 == d1);
2811 if (!use_eabi_hardfloat()) {
2812 vmov(r0, r1, src1);
2813 vmov(r2, r3, src2);
2814 }
2815}
2816
2817int MacroAssembler::CallCFunction(ExternalReference function,
2818 int num_reg_arguments,
2819 int num_double_arguments,
2820 SetIsolateDataSlots set_isolate_data_slots,
2821 Label* return_label) {
2822 UseScratchRegisterScope temps(this);
2823 Register scratch = temps.Acquire();
2824 Move(scratch, function);
2825 return CallCFunction(scratch, num_reg_arguments, num_double_arguments,
2826 set_isolate_data_slots, return_label);
2827}
2828
2829int MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
2830 int num_double_arguments,
2831 SetIsolateDataSlots set_isolate_data_slots,
2832 Label* return_label) {
2833 ASM_CODE_COMMENT(this);
2834 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
2835 DCHECK(has_frame());
2836 // Make sure that the stack is aligned before calling a C function unless
2837 // running in the simulator. The simulator has its own alignment check which
2838 // provides more information.
2839#if V8_HOST_ARCH_ARM
2840 if (v8_flags.debug_code) {
2841 int frame_alignment = base::OS::ActivationFrameAlignment();
2842 int frame_alignment_mask = frame_alignment - 1;
2843 if (frame_alignment > kPointerSize) {
2844 ASM_CODE_COMMENT_STRING(this, "Check stack alignment");
2845 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2846 Label alignment_as_expected;
2847 tst(sp, Operand(frame_alignment_mask));
2848 b(eq, &alignment_as_expected);
2849 // Don't use Check here, as it will call Runtime_Abort possibly
2850 // re-entering here.
2851 stop();
2852 bind(&alignment_as_expected);
2853 }
2854 }
2855#endif
2856
2857 Label get_pc;
2858
2859 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
2860 Register pc_scratch = r5;
2861 Push(pc_scratch);
2862 GetLabelAddress(pc_scratch, &get_pc);
2863
2864 // Save the frame pointer and PC so that the stack layout remains iterable,
2865 // even without an ExitFrame which normally exists between JS and C frames.
2867 str(pc_scratch,
2868 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerPC));
2869 str(fp, ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
2870
2871 Pop(pc_scratch);
2872 }
2873
2874 // Just call directly. The function called cannot cause a GC, or
2875 // allow preemption, so the return address in the link register
2876 // stays correct.
2877 Call(function);
2878 int call_pc_offset = pc_offset();
2879 bind(&get_pc);
2880 if (return_label) bind(return_label);
2881
2882 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
2883 // We don't unset the PC; the FP is the source of truth.
2884 Register zero_scratch = r5;
2885 Push(zero_scratch);
2886 mov(zero_scratch, Operand::Zero());
2887
2888 str(zero_scratch,
2889 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
2890
2891 Pop(zero_scratch);
2892 }
2893
2894 int stack_passed_arguments =
2895 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2897 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
2898 } else {
2899 add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
2900 }
2901
2902 return call_pc_offset;
2903}
2904
2905int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments,
2906 SetIsolateDataSlots set_isolate_data_slots,
2907 Label* return_label) {
2908 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
2909 return_label);
2910}
2911
2912int MacroAssembler::CallCFunction(Register function, int num_arguments,
2913 SetIsolateDataSlots set_isolate_data_slots,
2914 Label* return_label) {
2915 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
2916 return_label);
2917}
2918
2919void MacroAssembler::CheckPageFlag(Register object, int mask, Condition cc,
2920 Label* condition_met) {
2921 ASM_CODE_COMMENT(this);
2922 UseScratchRegisterScope temps(this);
2923 Register scratch = temps.Acquire();
2924 DCHECK(!AreAliased(object, scratch));
2925 DCHECK(cc == eq || cc == ne);
2926 Bfc(scratch, object, 0, kPageSizeBits);
2927 ldr(scratch, MemOperand(scratch, MemoryChunk::FlagsOffset()));
2928 tst(scratch, Operand(mask));
2929 b(cc, condition_met);
2930}
2931
2932Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
2933 Register reg4, Register reg5,
2934 Register reg6) {
2935 RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
2936
2937 const RegisterConfiguration* config = RegisterConfiguration::Default();
2938 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2939 int code = config->GetAllocatableGeneralCode(i);
2940 Register candidate = Register::from_code(code);
2941 if (regs.has(candidate)) continue;
2942 return candidate;
2943 }
2944 UNREACHABLE();
2945}
2946
2947void MacroAssembler::ComputeCodeStartAddress(Register dst) {
2948 ASM_CODE_COMMENT(this);
2949 // We can use the register pc - 8 for the address of the current instruction.
2950 sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
2951}
2952
2953// Check if the code object is marked for deoptimization. If it is, then it
2954// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
2955// to:
2956// 1. read from memory the word that contains that bit, which can be found in
2957// the flags in the referenced {Code} object;
2958// 2. test kMarkedForDeoptimizationBit in those flags; and
2959// 3. if it is not zero then it jumps to the builtin.
2960//
2961// Note: With leaptiering we simply assert the code is not deoptimized.
2963 UseScratchRegisterScope temps(this);
2964 Register scratch = temps.Acquire();
2965 int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
2966 if (v8_flags.debug_code || !V8_ENABLE_LEAPTIERING_BOOL) {
2968 ldr(scratch, FieldMemOperand(scratch, Code::kFlagsOffset));
2969 tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
2970 }
2971#ifdef V8_ENABLE_LEAPTIERING
2972 if (v8_flags.debug_code) {
2973 Assert(kZero, AbortReason::kInvalidDeoptimizedCode);
2974 }
2975#else
2976 TailCallBuiltin(Builtin::kCompileLazyDeoptimizedCode, ne);
2977#endif
2978}
2979
2980void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
2981 DeoptimizeKind kind, Label* ret,
2982 Label*) {
2983 ASM_CODE_COMMENT(this);
2984
2985 // All constants should have been emitted prior to deoptimization exit
2986 // emission. See PrepareForDeoptimizationExits.
2988 BlockConstPoolScope block_const_pool(this);
2989
2991 ldr(ip,
2993 Call(ip);
2997
2998 // The above code must not emit constants either.
3000}
3001
3002void MacroAssembler::Trap() { stop(); }
3003void MacroAssembler::DebugBreak() { stop(); }
3004
3005void MacroAssembler::I64x2BitMask(Register dst, QwNeonRegister src) {
3006 UseScratchRegisterScope temps(this);
3007 QwNeonRegister tmp1 = temps.AcquireQ();
3008 Register tmp = temps.Acquire();
3009
3010 vshr(NeonU64, tmp1, src, 63);
3011 vmov(NeonU32, dst, tmp1.low(), 0);
3012 vmov(NeonU32, tmp, tmp1.high(), 0);
3013 add(dst, dst, Operand(tmp, LSL, 1));
3014}
3015
3016void MacroAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1,
3017 QwNeonRegister src2) {
3018 UseScratchRegisterScope temps(this);
3019 Simd128Register scratch = temps.AcquireQ();
3020 vceq(Neon32, dst, src1, src2);
3021 vrev64(Neon32, scratch, dst);
3022 vand(dst, dst, scratch);
3023}
3024
3025void MacroAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1,
3026 QwNeonRegister src2) {
3027 UseScratchRegisterScope temps(this);
3028 Simd128Register tmp = temps.AcquireQ();
3029 vceq(Neon32, dst, src1, src2);
3030 vrev64(Neon32, tmp, dst);
3031 vmvn(dst, dst);
3032 vorn(dst, dst, tmp);
3033}
3034
3035void MacroAssembler::I64x2GtS(QwNeonRegister dst, QwNeonRegister src1,
3036 QwNeonRegister src2) {
3037 ASM_CODE_COMMENT(this);
3038 vqsub(NeonS64, dst, src2, src1);
3039 vshr(NeonS64, dst, dst, 63);
3040}
3041
3042void MacroAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1,
3043 QwNeonRegister src2) {
3044 ASM_CODE_COMMENT(this);
3045 vqsub(NeonS64, dst, src1, src2);
3046 vshr(NeonS64, dst, dst, 63);
3047 vmvn(dst, dst);
3048}
3049
3050void MacroAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) {
3051 ASM_CODE_COMMENT(this);
3052 UseScratchRegisterScope temps(this);
3053 QwNeonRegister tmp = temps.AcquireQ();
3054 // src = | a | b | c | d |
3055 // tmp = | max(a,b) | max(c,d) | ...
3056 vpmax(NeonU32, tmp.low(), src.low(), src.high());
3057 // tmp = | max(a,b) == 0 | max(c,d) == 0 | ...
3058 vceq(Neon32, tmp, tmp, 0);
3059 // tmp = | max(a,b) == 0 or max(c,d) == 0 | ...
3060 vpmax(NeonU32, tmp.low(), tmp.low(), tmp.low());
3061 // dst = (max(a,b) == 0 || max(c,d) == 0)
3062 // dst will either be -1 or 0.
3063 vmov(NeonS32, dst, tmp.low(), 0);
3064 // dst = !dst (-1 -> 0, 0 -> 1)
3065 add(dst, dst, Operand(1));
3066 // This works because:
3067 // !dst
3068 // = !(max(a,b) == 0 || max(c,d) == 0)
3069 // = max(a,b) != 0 && max(c,d) != 0
3070 // = (a != 0 || b != 0) && (c != 0 || d != 0)
3071 // = definition of i64x2.all_true.
3072}
3073
3074void MacroAssembler::I64x2Abs(QwNeonRegister dst, QwNeonRegister src) {
3075 ASM_CODE_COMMENT(this);
3076 UseScratchRegisterScope temps(this);
3077 Simd128Register tmp = temps.AcquireQ();
3078 vshr(NeonS64, tmp, src, 63);
3079 veor(dst, src, tmp);
3080 vsub(Neon64, dst, dst, tmp);
3081}
3082
3083namespace {
3084using AssemblerFunc = void (Assembler::*)(DwVfpRegister, SwVfpRegister,
3086// Helper function for f64x2 convert low instructions.
3087// This ensures that we do not overwrite src, if dst == src.
3088void F64x2ConvertLowHelper(Assembler* assm, QwNeonRegister dst,
3089 QwNeonRegister src, AssemblerFunc convert_fn) {
3090 LowDwVfpRegister src_d = LowDwVfpRegister::from_code(src.low().code());
3091 UseScratchRegisterScope temps(assm);
3092 if (dst == src) {
3093 LowDwVfpRegister tmp = temps.AcquireLowD();
3094 assm->vmov(tmp, src_d);
3095 src_d = tmp;
3096 }
3097 // Default arguments are not part of the function type
3098 (assm->*convert_fn)(dst.low(), src_d.low(), kDefaultRoundToZero, al);
3099 (assm->*convert_fn)(dst.high(), src_d.high(), kDefaultRoundToZero, al);
3100}
3101} // namespace
3102
3103void MacroAssembler::F64x2ConvertLowI32x4S(QwNeonRegister dst,
3104 QwNeonRegister src) {
3105 F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_s32);
3106}
3107
3108void MacroAssembler::F64x2ConvertLowI32x4U(QwNeonRegister dst,
3109 QwNeonRegister src) {
3110 F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_u32);
3111}
3112
3113void MacroAssembler::F64x2PromoteLowF32x4(QwNeonRegister dst,
3114 QwNeonRegister src) {
3115 F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_f32);
3116}
3117
3118void MacroAssembler::Switch(Register scratch, Register value,
3119 int case_value_base, Label** labels,
3120 int num_labels) {
3121 Label fallthrough;
3122 if (case_value_base != 0) {
3123 sub(value, value, Operand(case_value_base));
3124 }
3125 // This {cmp} might still emit a constant pool entry.
3126 cmp(value, Operand(num_labels));
3127 // Ensure to emit the constant pool first if necessary.
3128 CheckConstPool(true, true);
3129 BlockConstPoolFor(num_labels + 2);
3130 add(pc, pc, Operand(value, LSL, 2), LeaveCC, lo);
3131 b(&fallthrough);
3132 for (int i = 0; i < num_labels; ++i) {
3133 b(labels[i]);
3134 }
3135 bind(&fallthrough);
3136}
3137
3139 Register code, Register scratch, Label* if_marked_for_deoptimization) {
3140 ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
3141 tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
3142 b(if_marked_for_deoptimization, ne);
3143}
3144
3145void MacroAssembler::JumpIfCodeIsTurbofanned(Register code, Register scratch,
3146 Label* if_turbofanned) {
3147 ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
3148 tst(scratch, Operand(1 << Code::kIsTurbofannedBit));
3149 b(if_turbofanned, ne);
3150}
3151
3152void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
3153 CodeKind min_opt_level,
3154 Register feedback_vector,
3155 FeedbackSlot slot,
3156 Label* on_result,
3158 Label fallthrough, clear_slot;
3160 scratch_and_result,
3161 FieldMemOperand(feedback_vector,
3162 FeedbackVector::OffsetOfElementAt(slot.ToInt())));
3163 LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
3164
3165 // Is it marked_for_deoptimization? If yes, clear the slot.
3166 {
3167 UseScratchRegisterScope temps(this);
3168
3169 // The entry references a CodeWrapper object. Unwrap it now.
3170 ldr(scratch_and_result,
3171 FieldMemOperand(scratch_and_result, CodeWrapper::kCodeOffset));
3172
3173 Register temp = temps.Acquire();
3174 JumpIfCodeIsMarkedForDeoptimization(scratch_and_result, temp, &clear_slot);
3175 if (min_opt_level == CodeKind::TURBOFAN_JS) {
3176 JumpIfCodeIsTurbofanned(scratch_and_result, temp, on_result);
3177 b(&fallthrough);
3178 } else {
3179 b(on_result);
3180 }
3181 }
3182
3183 bind(&clear_slot);
3184 Move(scratch_and_result, ClearedValue());
3186 scratch_and_result,
3187 FieldMemOperand(feedback_vector,
3188 FeedbackVector::OffsetOfElementAt(slot.ToInt())));
3189
3190 bind(&fallthrough);
3191 Move(scratch_and_result, Operand(0));
3192}
3193
3194// Calls an API function. Allocates HandleScope, extracts returned value
3195// from handle and propagates exceptions. Clobbers C argument registers
3196// and C caller-saved registers. Restores context. On return removes
3197// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
3198// (GCed, includes the call JS arguments space and the additional space
3199// allocated for the fast call).
3200void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
3201 Register function_address,
3202 ExternalReference thunk_ref, Register thunk_arg,
3203 int slots_to_drop_on_return,
3204 MemOperand* argc_operand,
3205 MemOperand return_value_operand) {
3206 ASM_CODE_COMMENT(masm);
3207
3208 using ER = ExternalReference;
3209
3210 Isolate* isolate = masm->isolate();
3212 ER::handle_scope_next_address(isolate), no_reg);
3214 ER::handle_scope_limit_address(isolate), no_reg);
3216 ER::handle_scope_level_address(isolate), no_reg);
3217
3218 Register return_value = r0;
3219 Register scratch = r8;
3220 Register scratch2 = r9;
3221
3222 // Allocate HandleScope in callee-saved registers.
3223 // We will need to restore the HandleScope after the call to the API function,
3224 // by allocating it in callee-saved registers it'll be preserved by C code.
3225 Register prev_next_address_reg = r4;
3226 Register prev_limit_reg = r5;
3227 Register prev_level_reg = r6;
3228
3229 // C arguments (kCArgRegs[0/1]) are expected to be initialized outside, so
3230 // this function must not corrupt them (return_value overlaps with
3231 // kCArgRegs[0] but that's ok because we start using it only after the C
3232 // call).
3233 DCHECK(!AreAliased(kCArgRegs[0], kCArgRegs[1], // C args
3234 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
3235 // function_address and thunk_arg might overlap but this function must not
3236 // corrupted them until the call is made (i.e. overlap with return_value is
3237 // fine).
3238 DCHECK(!AreAliased(function_address, // incoming parameters
3239 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
3240 DCHECK(!AreAliased(thunk_arg, // incoming parameters
3241 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
3242 {
3244 "Allocate HandleScope in callee-save registers.");
3245 __ ldr(prev_next_address_reg, next_mem_op);
3246 __ ldr(prev_limit_reg, limit_mem_op);
3247 __ ldr(prev_level_reg, level_mem_op);
3248 __ add(scratch, prev_level_reg, Operand(1));
3249 __ str(scratch, level_mem_op);
3250 }
3251
3252 Label profiler_or_side_effects_check_enabled, done_api_call;
3253 if (with_profiling) {
3254 __ RecordComment("Check if profiler or side effects check is enabled");
3255 __ ldrb(scratch,
3256 __ ExternalReferenceAsOperand(IsolateFieldId::kExecutionMode));
3257 __ cmp(scratch, Operand(0));
3258 __ b(ne, &profiler_or_side_effects_check_enabled);
3259#ifdef V8_RUNTIME_CALL_STATS
3260 __ RecordComment("Check if RCS is enabled");
3261 __ Move(scratch, ER::address_of_runtime_stats_flag());
3262 __ ldr(scratch, MemOperand(scratch, 0));
3263 __ cmp(scratch, Operand(0));
3264 __ b(ne, &profiler_or_side_effects_check_enabled);
3265#endif // V8_RUNTIME_CALL_STATS
3266 }
3267
3268 __ RecordComment("Call the api function directly.");
3269 __ StoreReturnAddressAndCall(function_address);
3270 __ bind(&done_api_call);
3271
3272 Label propagate_exception;
3273 Label delete_allocated_handles;
3274 Label leave_exit_frame;
3275
3276 __ RecordComment("Load the value from ReturnValue");
3277 __ ldr(return_value, return_value_operand);
3278
3279 {
3281 masm,
3282 "No more valid handles (the result handle was the last one)."
3283 "Restore previous handle scope.");
3284 __ str(prev_next_address_reg, next_mem_op);
3285 if (v8_flags.debug_code) {
3286 __ ldr(scratch, level_mem_op);
3287 __ sub(scratch, scratch, Operand(1));
3288 __ cmp(scratch, prev_level_reg);
3289 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
3290 }
3291 __ str(prev_level_reg, level_mem_op);
3292 __ ldr(scratch, limit_mem_op);
3293 __ cmp(scratch, prev_limit_reg);
3294 __ b(ne, &delete_allocated_handles);
3295 }
3296
3297 __ RecordComment("Leave the API exit frame.");
3298 __ bind(&leave_exit_frame);
3299
3300 Register argc_reg = prev_limit_reg;
3301 if (argc_operand != nullptr) {
3302 // Load the number of stack slots to drop before LeaveExitFrame modifies sp.
3303 __ ldr(argc_reg, *argc_operand);
3304 }
3305 __ LeaveExitFrame(scratch);
3306
3307 {
3309 "Check if the function scheduled an exception.");
3310 __ LoadRoot(scratch, RootIndex::kTheHoleValue);
3312 ER::exception_address(isolate), no_reg));
3313 __ cmp(scratch, scratch2);
3314 __ b(ne, &propagate_exception);
3315 }
3316
3317 __ AssertJSAny(return_value, scratch, scratch2,
3318 AbortReason::kAPICallReturnedInvalidObject);
3319
3320 if (argc_operand == nullptr) {
3321 DCHECK_NE(slots_to_drop_on_return, 0);
3322 __ add(sp, sp, Operand(slots_to_drop_on_return * kSystemPointerSize));
3323
3324 } else {
3325 // {argc_operand} was loaded into {argc_reg} above.
3326 __ add(sp, sp, Operand(slots_to_drop_on_return * kSystemPointerSize));
3327 __ add(sp, sp, Operand(argc_reg, LSL, kSystemPointerSizeLog2));
3328 }
3329
3330 __ mov(pc, lr);
3331
3332 if (with_profiling) {
3333 ASM_CODE_COMMENT_STRING(masm, "Call the api function via thunk wrapper.");
3334 __ bind(&profiler_or_side_effects_check_enabled);
3335 // Additional parameter is the address of the actual callback function.
3336 if (thunk_arg.is_valid()) {
3337 MemOperand thunk_arg_mem_op = __ ExternalReferenceAsOperand(
3338 IsolateFieldId::kApiCallbackThunkArgument);
3339 __ str(thunk_arg, thunk_arg_mem_op);
3340 }
3341 __ Move(scratch, thunk_ref);
3343 __ b(&done_api_call);
3344 }
3345
3346 __ RecordComment("An exception was thrown. Propagate it.");
3347 __ bind(&propagate_exception);
3348 __ TailCallRuntime(Runtime::kPropagateException);
3349 {
3351 masm, "HandleScope limit has changed. Delete allocated extensions.");
3352 __ bind(&delete_allocated_handles);
3353 __ str(prev_limit_reg, limit_mem_op);
3354 // Save the return value in a callee-save register.
3355 Register saved_result = prev_limit_reg;
3356 __ mov(saved_result, return_value);
3358 __ Move(kCArgRegs[0], ER::isolate_address());
3359 __ CallCFunction(ER::delete_handle_scope_extensions(), 1);
3360 __ mov(return_value, saved_result);
3361 __ jmp(&leave_exit_frame);
3362 }
3363}
3364
3365} // namespace internal
3366} // namespace v8
3367
3368#undef __
3369
3370#endif // V8_TARGET_ARCH_ARM
friend Zone
Definition asm-types.cc:195
constexpr int kPageSizeBits
#define Assert(condition)
Builtins::Kind kind
Definition builtins.cc:40
static int ActivationFrameAlignment()
friend class ConstantPoolUnavailableScope
Definition assembler.h:579
void set_predictable_code_size(bool value)
Definition assembler.h:342
friend class FrameAndConstantPoolScope
Definition assembler.h:578
bool predictable_code_size() const
Definition assembler.h:341
int AddCodeTarget(IndirectHandle< Code > target)
Definition assembler.cc:267
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
const AssemblerOptions & options() const
Definition assembler.h:339
void rsb(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vmrs(const Register dst, const Condition cond=al)
void Move32BitImmediate(Register rd, const Operand &x, Condition cond=al)
void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vshr(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src, int shift)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vneg(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
ConstantPool::BlockScope BlockConstPoolScope
void vldm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
bool has_pending_constants() const
void mov_label_offset(Register dst, Label *label)
void mls(Register dst, Register src1, Register src2, Register srcA, Condition cond=al)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void stm(BlockAddrMode am, Register base, RegList src, Condition cond=al)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void vminnm(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void CheckConstPool(bool force_emit, bool require_jump)
void sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void vmvn(QwNeonRegister dst, QwNeonRegister src)
void bl(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void vmov(const SwVfpRegister dst, Float32 imm)
void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void cmp(Register src1, const Operand &src2, Condition cond=al)
void BlockConstPoolFor(int instructions)
void vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift)
bool VfpRegisterIsAvailable(DwVfpRegister reg)
void lsr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
friend class UseScratchRegisterScope
V8_INLINE void MaybeCheckConstPool()
void ldrsb(Register dst, const MemOperand &src, Condition cond=al)
void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void str(Register src, const MemOperand &dst, Condition cond=al)
void vstm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void shift(Operand dst, Immediate shift_amount, int subcode, int size)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vorn(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vld1(NeonSize size, const NeonListOperand &dst, const NeonMemOperand &src)
void vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift)
void blx(int branch_offset)
void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void lsl(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2)
void vswp(DwVfpRegister dst, DwVfpRegister src)
void ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void vst1s(NeonSize size, const NeonListOperand &src, uint8_t index, const NeonMemOperand &dst)
void vcmp(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void vdup(NeonSize size, QwNeonRegister dst, Register src)
void vld1s(NeonSize size, const NeonListOperand &dst, uint8_t index, const NeonMemOperand &src)
void orr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond=al)
void vstr(const DwVfpRegister src, const Register base, int offset, const Condition cond=al)
void vsub(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void tst(Register src1, const Operand &src2, Condition cond=al)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void bic(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vadd(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void bkpt(uint32_t imm16)
Assembler(const AssemblerOptions &, std::unique_ptr< AssemblerBuffer >={})
void bx(Register target, Condition cond=al)
void vst1(NeonSize size, const NeonListOperand &src, const NeonMemOperand &dst)
void vmaxnm(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2)
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
int SizeOfCodeGeneratedSince(Label *label)
void vldr(const DwVfpRegister dst, const Register base, int offset, const Condition cond=al)
Instruction * pc() const
void bfc(Register dst, int lsb, int width, Condition cond=al)
void asr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src)
static constexpr Builtin RecordWrite(SaveFPRegsMode fp_mode)
static bool IsIsolateIndependentBuiltin(Tagged< Code > code)
Definition builtins.cc:372
V8_EXPORT_PRIVATE Handle< Code > code_handle(Builtin builtin)
Definition builtins.cc:154
static constexpr Builtin RuntimeCEntry(int result_size, bool switch_to_central_stack=false)
static constexpr Builtin EphemeronKeyBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin kLastTier0
Definition builtins.h:114
static constexpr Builtin CEntry(int result_size, ArgvMode argv_mode, bool builtin_exit_frame=false, bool switch_to_central_stack=false)
static const int kIsTurbofannedBit
Definition code.h:458
static const int kMarkedForDeoptimizationBit
Definition code.h:456
static const int kInvalidContext
Definition contexts.h:578
static V8_INLINE constexpr int SlotOffset(int index)
Definition contexts.h:516
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static constexpr int kSizeInBytes
static constexpr int kCallerSPDisplacement
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr uint32_t kFlagsMaybeHasMaglevCode
static constexpr uint32_t kFlagsTieringStateIsAnyRequested
static constexpr uint32_t kFlagsLogNextExecution
static constexpr uint32_t kFlagsMaybeHasTurbofanCode
static constexpr int OffsetOfElementAt(int index)
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr int kPcLoadDelta
static constexpr int BuiltinEntrySlotOffset(Builtin id)
static constexpr int real_jslimit_offset()
static IsolateGroup * current()
Builtins * builtins()
Definition isolate.h:1443
Address BuiltinEntry(Builtin builtin)
static bool IsAddressableThroughRootRegister(Isolate *isolate, const ExternalReference &reference)
V8_INLINE std::string CommentForOffHeapTrampoline(const char *prefix, Builtin builtin)
static int32_t RootRegisterOffsetForExternalReferenceTableEntry(Isolate *isolate, const ExternalReference &reference)
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
void IndirectLoadConstant(Register destination, Handle< HeapObject > object)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
void IndirectLoadExternalReference(Register destination, ExternalReference reference)
void LoadStackLimit(Register destination, StackLimitKind kind)
void Call(Register target, Condition cond=al)
void CallJSFunction(Register function_object, uint16_t argument_count)
void CallDebugOnFunctionCall(Register fun, Register new_target, Register expected_parameter_count, Register actual_parameter_count)
void TestCodeIsMarkedForDeoptimization(Register code, Register scratch)
void AssertMap(Register object) NOOP_UNLESS_DEBUG_CODE
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void Drop(int count, Condition cond=al)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void MovFromFloatResult(DwVfpRegister dst)
void mov(Register rd, Register rj)
void I64x2GtS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void SmiUntag(Register reg, SBit s=LeaveCC)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void PushStandardFrame(Register function_reg)
void I64x2Eq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void I64x2AllTrue(Register dst, QwNeonRegister src)
void CompareRoot(Register obj, RootIndex index)
void MovFromFloatParameter(DwVfpRegister dst)
void LoadLane(NeonSize sz, NeonListOperand dst_list, uint8_t lane, NeonMemOperand src)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void Move(Register dst, Tagged< Smi > smi)
void SmiTst(Register value)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void StoreReturnAddressAndCall(Register target)
void FloatMaxOutOfLineHelper(T result, T left, T right)
void I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane, NeonDataType dt, int lane)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
void LoadRootRelative(Register destination, int32_t offset) final
void JumpIfSmi(Register value, Label *smi_label)
void SaveFPRegsToHeap(Register location, Register scratch)
void CallCodeObject(Register code_object)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void RestoreFPRegs(Register location, Register scratch)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
void LsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond=al)
void RestoreFPRegsFromHeap(Register location, Register scratch)
void AsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void CompareTaggedRoot(Register with, RootIndex index)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void SmiTag(Register reg, SBit s=LeaveCC)
void VmovLow(Register dst, DwVfpRegister src)
void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label *out_of_line)
void PushArray(Register array, Register size, Register scratch, PushArrayOrder order=PushArrayOrder::kNormal)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void MovToFloatResult(DwVfpRegister src)
void Bfc(Register dst, Register src, int lsb, int width, Condition cond=al)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void Zero(const MemOperand &dest)
void MovToFloatParameter(DwVfpRegister src)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void PushCommonFrame(Register marker_reg=no_reg)
void JumpIfEqual(Register x, int32_t y, Label *dest)
int LeaveFrame(StackFrame::Type type)
void GetLabelAddress(Register dst, Label *target)
void LslPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void LoadGlobalProxy(Register dst)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void Jump(Register target, Condition cond=al)
void LoadRoot(Register destination, RootIndex index) final
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void CompareRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void JumpIfCodeIsTurbofanned(Register code, Register scratch, Label *if_turbofanned)
void FloatMinHelper(T result, T left, T right, Label *out_of_line)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void StoreLane(NeonSize sz, NeonListOperand src_list, uint8_t lane, NeonMemOperand dst)
void LoadFromConstantsTable(Register destination, int constant_index) final
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void CompareObjectTypeRange(Register heap_object, Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void Sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void MaybeSaveRegisters(RegList registers)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void CheckFor32DRegs(Register scratch)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void VFPCanonicalizeNaN(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void MovePair(Register dst0, Register src0, Register dst1, Register src1)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void VmovHigh(Register dst, DwVfpRegister src)
void I64x2BitMask(Register dst, QwNeonRegister src)
void MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
Condition LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void CallBuiltin(Builtin builtin, Condition cond=al)
void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane)
void I64x2Abs(QwNeonRegister dst, QwNeonRegister src)
void F64x2ConvertLowI32x4S(QwNeonRegister dst, QwNeonRegister src)
void Ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void Check(Condition cond, AbortReason reason)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void FloatMaxHelper(T result, T left, T right, Label *out_of_line)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch, Label *if_marked_for_deoptimization)
void AllocateStackSpace(Register bytes)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void FloatMinOutOfLineHelper(T result, T left, T right)
void VmovExtended(Register dst, int src_code)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void VFPCompareAndLoadFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Register fpscr_flags, const Condition cond=al)
void SaveFPRegs(Register location, Register scratch)
void JumpIfLessThan(Register x, int32_t y, Label *dest)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Mls(Register dst, Register src1, Register src2, Register srcA, Condition cond=al)
void CompareInstanceTypeRange(Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadIsolateField(Register dst, IsolateFieldId id)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void MaybeRestoreRegisters(RegList registers)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void TryLoadOptimizedOsrCode(Register scratch_and_result, CodeKind min_opt_level, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void StubPrologue(StackFrame::Type type)
void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2)
void StoreRootRelative(int32_t offset, Register value) final
void LoadMap(Register destination, Register object)
void TailCallRuntime(Runtime::FunctionId fid)
void Swap(Register srcdst0, Register srcdst1)
void LoadNativeContextSlot(Register dst, int index)
void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label *out_of_line)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void Switch(Register scratch, Register value, int case_value_base, Label **labels, int num_labels)
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr intptr_t FlagsOffset()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static V8_INLINE Operand Zero()
constexpr unsigned Count() const
static constexpr LowDwVfpRegister from_code(int8_t code)
constexpr int8_t code() const
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr bool IsCodeTarget(Mode mode)
Definition reloc-info.h:196
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
Definition runtime.cc:350
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > FromIntptr(intptr_t value)
Definition smi.h:43
static constexpr Tagged< Smi > zero()
Definition smi.h:99
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static bool IsJavaScript(Type t)
Definition frames.h:284
static constexpr int kFixedFrameSizeFromFp
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode)
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define V8_ENABLE_LEAPTIERING_BOOL
Definition globals.h:151
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
DirectHandle< Object > new_target
Definition execution.cc:75
int32_t offset
TNode< Object > receiver
ZoneVector< RpoNumber > & result
LiftoffRegister reg
Register tmp
int x
uint32_t const mask
SmiCheck
InvokeType
SetIsolateDataSlots
JumpMode
RegListBase< RegisterT > registers
InstructionOperand destination
int int32_t
Definition unicode.cc:40
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr int WhichPowerOfTwo(T value)
Definition bits.h:195
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr Register kRootRegister
constexpr NeonSize Neon32
constexpr int kPointerSizeLog2
Definition globals.h:600
constexpr BlockAddrMode ia_w
const int kSmiTagSize
Definition v8-internal.h:87
constexpr ShiftOp LSR
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr bool CodeKindCanTierUp(CodeKind kind)
Definition code-kind.h:95
constexpr Register kJavaScriptCallTargetRegister
constexpr int kPointerSize
Definition globals.h:599
constexpr BlockAddrMode db_w
constexpr NeonSize Neon64
constexpr ShiftOp ASR
int NeonSz(NeonDataType dt)
const Address kWeakHeapObjectMask
Definition globals.h:967
constexpr ShiftOp LSL
constexpr DwVfpRegister no_dreg
constexpr SBit LeaveCC
constexpr Register kJavaScriptCallArgCountRegister
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
base::StrongAlias< JSDispatchHandleAliasTag, uint32_t > JSDispatchHandle
Definition globals.h:557
static const int kRegisterPassedArguments
Flag flags[]
Definition flags.cc:3797
constexpr int L
QwNeonRegister Simd128Register
static const int kDoubleRegisterPassedArguments
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
const RegList kCallerSaved
Definition reglist-arm.h:42
const char * GetAbortReason(AbortReason reason)
static constexpr int kMaxCParameters
constexpr uint32_t kZapValue
Definition globals.h:1005
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
constexpr NeonDataType NeonS32
const int kHeapObjectTag
Definition v8-internal.h:72
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
const int kSmiShiftSize
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJavaScriptCallCodeStartRegister
constexpr int kJSDispatchTableEntrySizeLog2
Definition globals.h:562
constexpr NeonDataType NeonU32
constexpr AddrMode PostIndex
const intptr_t kSmiTagMask
Definition v8-internal.h:88
constexpr SBit SetCC
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
constexpr NeonDataType NeonU64
constexpr uint8_t kInstrSize
constexpr NeonDataType NeonS64
const int kSmiTag
Definition v8-internal.h:86
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr VFPConversionMode kDefaultRoundToZero
constexpr int kDoubleSizeLog2
Definition globals.h:421
constexpr Register cp
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
Definition globals.h:407
const uint32_t kClearedWeakHeapObjectLower32
Definition globals.h:981
constexpr Register kJavaScriptCallNewTargetRegister
Local< T > Handle
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define OFFSET_OF_DATA_START(Type)