v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-assembler-arm64-inl.h
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_MAGLEV_ARM64_MAGLEV_ASSEMBLER_ARM64_INL_H_
6#define V8_MAGLEV_ARM64_MAGLEV_ASSEMBLER_ARM64_INL_H_
7
10#include "src/common/globals.h"
17
18namespace v8 {
19namespace internal {
20namespace maglev {
21
22constexpr Condition ConditionForFloat64(Operation operation) {
23 return ConditionFor(operation);
24}
25
26constexpr Condition ConditionForNaN() { return vs; }
27
28inline int ShiftFromScale(int n) {
29 switch (n) {
30 case 1:
31 return 0;
32 case 2:
33 return 1;
34 case 4:
35 return 2;
36 case 8:
37 return 3;
38 default:
40 }
41}
42
43class MaglevAssembler::TemporaryRegisterScope
44 : public TemporaryRegisterScopeBase<TemporaryRegisterScope> {
45 using Base = TemporaryRegisterScopeBase<TemporaryRegisterScope>;
46
47 public:
48 struct SavedData : public Base::SavedData {
51 };
52
54 : Base(masm), scratch_scope_(masm) {}
56 const SavedData& saved_data)
57 : Base(masm, saved_data), scratch_scope_(masm) {
58 scratch_scope_.SetAvailable(saved_data.available_scratch_);
59 scratch_scope_.SetAvailableFP(saved_data.available_fp_scratch_);
60 }
61
73
81
86
87 private:
89};
90
92 size_t map_count)
93 : masm_(masm), object_(object), map_count_(map_count) {
97 } else {
99 }
100 USE(map_count_);
101}
102
103void MapCompare::Generate(Handle<Map> map, Condition cond, Label* if_true,
104 Label::Distance distance) {
105 MaglevAssembler::TemporaryRegisterScope temps(masm_);
106 Register temp = temps.AcquireScratch();
107 masm_->Move(temp, map);
108 masm_->CmpTagged(map_, temp);
109 masm_->JumpIf(cond, if_true, distance);
110}
111
114 // Decompression is idempotent (UXTW operand is used), so this would return
115 // a valid pointer even if called multiple times in a row.
116 masm_->DecompressTagged(map_, map_);
117 }
118 return map_;
119}
120
121int MapCompare::TemporaryCount(size_t map_count) { return 1; }
122
123namespace detail {
124
125// Check if the argument is already in a register and doesn't need any
126// scratches to reload. This should be in sync with `ToRegister` function below.
127template <typename Arg>
128inline bool AlreadyInARegister(Arg arg) {
129 return false;
130}
131
132inline bool AlreadyInARegister(Register reg) { return true; }
133
134inline bool AlreadyInARegister(const Input& input) {
135 if (input.operand().IsConstant()) {
136 return false;
137 }
138 const compiler::AllocatedOperand& operand =
139 compiler::AllocatedOperand::cast(input.operand());
140 if (operand.IsRegister()) {
141 return true;
142 }
143 DCHECK(operand.IsStackSlot());
144 return false;
145}
146
147template <typename Arg>
148inline Register ToRegister(MaglevAssembler* masm,
150 Arg arg) {
151 Register reg = scratch->AcquireScratch();
152 masm->Move(reg, arg);
153 return reg;
154}
155inline Register ToRegister(MaglevAssembler* masm,
157 Register reg) {
158 return reg;
159}
160inline Register ToRegister(MaglevAssembler* masm,
162 const Input& input) {
163 if (input.operand().IsConstant()) {
164 Register reg = scratch->AcquireScratch();
165 input.node()->LoadToRegister(masm, reg);
166 return reg;
167 }
168 const compiler::AllocatedOperand& operand =
169 compiler::AllocatedOperand::cast(input.operand());
170 if (operand.IsRegister()) {
171 return ToRegister(input);
172 } else {
173 DCHECK(operand.IsStackSlot());
174 Register reg = scratch->AcquireScratch();
175 masm->Move(reg, masm->ToMemOperand(input));
176 return reg;
177 }
178}
179
180template <typename... Args>
182
183template <>
185 static int Count() { return 0; }
186};
187
188template <typename Arg, typename... Args>
189struct CountPushHelper<Arg, Args...> {
190 static int Count(Arg arg, Args... args) {
191 int arg_count = 1;
192 if constexpr (is_iterator_range<Arg>::value) {
193 arg_count = static_cast<int>(std::distance(arg.begin(), arg.end()));
194 }
195 return arg_count + CountPushHelper<Args...>::Count(args...);
196 }
197};
198
199template <typename... Args>
200struct PushAllHelper;
201
202template <typename... Args>
203inline void PushAll(MaglevAssembler* masm, Args... args) {
205}
206
207template <typename... Args>
208inline void PushAllReverse(MaglevAssembler* masm, Args... args) {
210}
211
212template <>
213struct PushAllHelper<> {
214 static void Push(MaglevAssembler* masm) {}
215 static void PushReverse(MaglevAssembler* masm) {}
216};
217
218template <typename T, typename... Args>
219inline void PushIterator(MaglevAssembler* masm, base::iterator_range<T> range,
220 Args... args) {
221 using value_type = typename base::iterator_range<T>::value_type;
222 for (auto iter = range.begin(), end = range.end(); iter != end; ++iter) {
223 value_type val1 = *iter;
224 ++iter;
225 if (iter == end) {
226 PushAll(masm, val1, args...);
227 return;
228 }
229 value_type val2 = *iter;
230 masm->Push(val1, val2);
231 }
232 PushAll(masm, args...);
233}
234
235template <typename T, typename... Args>
236inline void PushIteratorReverse(MaglevAssembler* masm,
237 base::iterator_range<T> range, Args... args) {
238 using value_type = typename base::iterator_range<T>::value_type;
239 using difference_type = typename base::iterator_range<T>::difference_type;
240 difference_type count = std::distance(range.begin(), range.end());
241 DCHECK_GE(count, 0);
242 auto iter = range.rbegin();
243 auto end = range.rend();
244 if (count % 2 != 0) {
245 PushAllReverse(masm, *iter, args...);
246 ++iter;
247 } else {
248 PushAllReverse(masm, args...);
249 }
250 while (iter != end) {
251 value_type val1 = *iter;
252 ++iter;
253 value_type val2 = *iter;
254 ++iter;
255 masm->Push(val1, val2);
256 }
257}
258
259template <typename Arg1, typename Arg2>
260inline void PushAligned(MaglevAssembler* masm, Arg1 arg1, Arg2 arg2) {
261 if (AlreadyInARegister(arg1) || AlreadyInARegister(arg2)) {
262 // If one of the operands is already in a register, there is no need
263 // to reuse scratch registers, so two arguments can be pushed together.
265 masm->MacroAssembler::Push(ToRegister(masm, &temps, arg1),
266 ToRegister(masm, &temps, arg2));
267 return;
268 }
269 {
270 // Push the first argument together with padding to ensure alignment.
271 // The second argument is not pushed together with the first so we can
272 // reuse any scratch registers used to materialise the first argument for
273 // the second one.
275 masm->MacroAssembler::Push(ToRegister(masm, &temps, arg1), padreg);
276 }
277 {
279 masm->MacroAssembler::str(ToRegister(masm, &temps, arg2), MemOperand(sp));
280 }
281}
282
283template <typename Arg>
284struct PushAllHelper<Arg> {
285 static void Push(MaglevAssembler* masm, Arg arg) {
286 if constexpr (is_iterator_range<Arg>::value) {
287 PushIterator(masm, arg);
288 } else {
289 FATAL("Unaligned push");
290 }
291 }
292 static void PushReverse(MaglevAssembler* masm, Arg arg) {
293 if constexpr (is_iterator_range<Arg>::value) {
294 PushIteratorReverse(masm, arg);
295 } else {
296 PushAllReverse(masm, arg, padreg);
297 }
298 }
299};
300
301template <typename Arg1, typename Arg2, typename... Args>
302struct PushAllHelper<Arg1, Arg2, Args...> {
303 static void Push(MaglevAssembler* masm, Arg1 arg1, Arg2 arg2, Args... args) {
304 if constexpr (is_iterator_range<Arg1>::value) {
305 PushIterator(masm, arg1, arg2, args...);
306 } else if constexpr (is_iterator_range<Arg2>::value) {
307 if (arg2.begin() != arg2.end()) {
308 auto val = *arg2.begin();
309 PushAligned(masm, arg1, val);
310 PushAll(masm,
311 base::make_iterator_range(std::next(arg2.begin()), arg2.end()),
312 args...);
313 } else {
314 PushAll(masm, arg1, args...);
315 }
316 } else {
317 PushAligned(masm, arg1, arg2);
318 PushAll(masm, args...);
319 }
320 }
321 static void PushReverse(MaglevAssembler* masm, Arg1 arg1, Arg2 arg2,
322 Args... args) {
323 if constexpr (is_iterator_range<Arg1>::value) {
324 PushIteratorReverse(masm, arg1, arg2, args...);
325 } else if constexpr (is_iterator_range<Arg2>::value) {
326 if (arg2.begin() != arg2.end()) {
327 auto val = *arg2.begin();
328 PushAllReverse(
329 masm,
330 base::make_iterator_range(std::next(arg2.begin()), arg2.end()),
331 args...);
332 PushAligned(masm, val, arg1);
333 } else {
334 PushAllReverse(masm, arg1, args...);
335 }
336 } else {
337 PushAllReverse(masm, args...);
338 PushAligned(masm, arg2, arg1);
339 }
340 }
341};
342
343} // namespace detail
344
345template <typename... T>
346void MaglevAssembler::Push(T... vals) {
347 const int push_count = detail::CountPushHelper<T...>::Count(vals...);
348 if (push_count % 2 == 0) {
349 detail::PushAll(this, vals...);
350 } else {
351 detail::PushAll(this, padreg, vals...);
352 }
353}
354
355template <typename... T>
356void MaglevAssembler::PushReverse(T... vals) {
357 detail::PushAllReverse(this, vals...);
358}
359
360inline void MaglevAssembler::BindJumpTarget(Label* label) {
362}
363
364inline void MaglevAssembler::BindBlock(BasicBlock* block) {
365 if (block->is_start_block_of_switch_case()) {
366 BindJumpTarget(block->label());
367 } else {
368 Bind(block->label());
369 }
370}
371
372inline void MaglevAssembler::SmiTagInt32AndSetFlags(Register dst,
373 Register src) {
374 if (SmiValuesAre31Bits()) {
375 Adds(dst.W(), src.W(), src.W());
376 } else {
377 SmiTag(dst, src);
378 }
379}
380
381inline void MaglevAssembler::CheckInt32IsSmi(Register obj, Label* fail,
382 Register scratch) {
384
385 Adds(wzr, obj.W(), obj.W());
386 JumpIf(kOverflow, fail);
387}
388
389inline void MaglevAssembler::SmiAddConstant(Register dst, Register src,
390 int value, Label* fail,
391 Label::Distance distance) {
392 AssertSmi(src);
393 if (value != 0) {
394 if (SmiValuesAre31Bits()) {
395 Adds(dst.W(), src.W(), Immediate(Smi::FromInt(value)));
396 } else {
397 DCHECK(dst.IsX());
398 Adds(dst.X(), src.X(), Immediate(Smi::FromInt(value)));
399 }
400 JumpIf(kOverflow, fail, distance);
401 } else {
402 Move(dst, src);
403 }
404}
405
406inline void MaglevAssembler::SmiSubConstant(Register dst, Register src,
407 int value, Label* fail,
408 Label::Distance distance) {
409 AssertSmi(src);
410 if (value != 0) {
411 if (SmiValuesAre31Bits()) {
412 Subs(dst.W(), src.W(), Immediate(Smi::FromInt(value)));
413 } else {
414 DCHECK(dst.IsX());
415 Subs(dst.X(), src.X(), Immediate(Smi::FromInt(value)));
416 }
417 JumpIf(kOverflow, fail, distance);
418 } else {
419 Move(dst, src);
420 }
421}
422
423inline void MaglevAssembler::MoveHeapNumber(Register dst, double value) {
424 Mov(dst, Operand::EmbeddedHeapNumber(value));
425}
426
428 RootIndex root_index) {
429 if (input.operand().IsRegister()) {
430 CompareRoot(ToRegister(input), root_index);
431 } else {
432 DCHECK(input.operand().IsStackSlot());
433 TemporaryRegisterScope temps(this);
434 Register scratch = temps.AcquireScratch();
435 Ldr(scratch, ToMemOperand(input));
436 CompareRoot(scratch, root_index);
437 }
438 return eq;
439}
440
441inline MemOperand MaglevAssembler::StackSlotOperand(StackSlot slot) {
442 return MemOperand(fp, slot.index);
443}
444
445inline Register MaglevAssembler::GetFramePointer() { return fp; }
446
447// TODO(Victorgomes): Unify this to use StackSlot struct.
449 const compiler::AllocatedOperand& operand) {
451}
452
454 const compiler::InstructionOperand& operand) {
456}
457
458inline MemOperand MaglevAssembler::ToMemOperand(const ValueLocation& location) {
459 return ToMemOperand(location.operand());
460}
461
462inline void MaglevAssembler::BuildTypedArrayDataPointer(Register data_pointer,
463 Register object) {
464 DCHECK_NE(data_pointer, object);
466 data_pointer,
467 FieldMemOperand(object, JSTypedArray::kExternalPointerOffset));
468 if (JSTypedArray::kMaxSizeInHeap == 0) return;
469 TemporaryRegisterScope scope(this);
470 Register base = scope.AcquireScratch();
472 Ldr(base.W(), FieldMemOperand(object, JSTypedArray::kBasePointerOffset));
473 } else {
474 Ldr(base, FieldMemOperand(object, JSTypedArray::kBasePointerOffset));
475 }
476 Add(data_pointer, data_pointer, base);
477}
478
480 Register data_pointer, Register index, int element_size) {
481 Add(data_pointer, data_pointer,
482 Operand(index, LSL, ShiftFromScale(element_size)));
483 return MemOperand(data_pointer);
484}
485
486inline MemOperand MaglevAssembler::DataViewElementOperand(Register data_pointer,
487 Register index) {
488 return MemOperand(data_pointer, index);
489}
490
492 Register object,
493 Register index, int scale,
494 int offset) {
495 Add(result, object, Operand(index, LSL, ShiftFromScale(scale)));
497}
498
500 Register object,
501 int offset) {
503#ifdef V8_ENABLE_SANDBOX
504 Lsr(result, result, kBoundedSizeShift);
505#endif // V8_ENABLE_SANDBOX
506}
507
509 MemOperand operand) {
510#ifdef V8_ENABLE_SANDBOX
512#else
513 Move(result, operand);
514#endif
515}
516
517void MaglevAssembler::LoadFixedArrayElement(Register result, Register array,
518 Register index) {
519 if (v8_flags.debug_code) {
520 AssertObjectType(array, FIXED_ARRAY_TYPE, AbortReason::kUnexpectedValue);
522 AbortReason::kUnexpectedNegativeValue);
523 }
525 OFFSET_OF_DATA_START(FixedArray));
526}
527
529 Register result, Register object, int offset) {
531 result, FieldMemOperand(object, offset));
532}
533
535 Register result, Register array, Register index) {
536 if (v8_flags.debug_code) {
537 AssertObjectType(array, FIXED_ARRAY_TYPE, AbortReason::kUnexpectedValue);
539 AbortReason::kUnexpectedNegativeValue);
540 }
541 Add(result, array, Operand(index, LSL, kTaggedSizeLog2));
544}
545
547 Register array,
548 Register index) {
549 MaglevAssembler::TemporaryRegisterScope temps(this);
550 Register scratch = temps.AcquireScratch();
551 if (v8_flags.debug_code) {
552 AssertObjectType(array, FIXED_DOUBLE_ARRAY_TYPE,
553 AbortReason::kUnexpectedValue);
555 AbortReason::kUnexpectedNegativeValue);
556 }
557 Add(scratch, array, Operand(index, LSL, kDoubleSizeLog2));
558 Ldr(result, FieldMemOperand(scratch, OFFSET_OF_DATA_START(FixedArray)));
559}
560
562 Register array, Register index, DoubleRegister value) {
563 TemporaryRegisterScope temps(this);
564 Register scratch = temps.AcquireScratch();
565 Add(scratch, array, Operand(index, LSL, kDoubleSizeLog2));
566 Str(value, FieldMemOperand(scratch, OFFSET_OF_DATA_START(FixedArray)));
567}
568
569inline void MaglevAssembler::LoadSignedField(Register result,
570 MemOperand operand, int size) {
571 if (size == 1) {
572 Ldrsb(result, operand);
573 } else if (size == 2) {
574 Ldrsh(result, operand);
575 } else {
576 DCHECK_EQ(size, 4);
577 Ldr(result.W(), operand);
578 }
579}
580
581inline void MaglevAssembler::LoadUnsignedField(Register result,
582 MemOperand operand, int size) {
583 if (size == 1) {
584 Ldrb(result.W(), operand);
585 } else if (size == 2) {
586 Ldrh(result.W(), operand);
587 } else {
588 DCHECK_EQ(size, 4);
589 Ldr(result.W(), operand);
590 }
591}
592
593inline void MaglevAssembler::SetSlotAddressForTaggedField(Register slot_reg,
594 Register object,
595 int offset) {
596 Add(slot_reg, object, offset - kHeapObjectTag);
597}
599 Register slot_reg, Register object, Register index) {
600 Add(slot_reg, object, OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag);
601 Add(slot_reg, slot_reg, Operand(index, LSL, kTaggedSizeLog2));
602}
603
604inline void MaglevAssembler::StoreTaggedFieldNoWriteBarrier(Register object,
605 int offset,
606 Register value) {
608}
609
611 Register array, Register index, Register value) {
612 TemporaryRegisterScope temps(this);
613 Register scratch = temps.AcquireScratch();
614 Add(scratch, array, Operand(index, LSL, kTaggedSizeLog2));
616 value, FieldMemOperand(scratch, OFFSET_OF_DATA_START(FixedArray)));
617}
618
619inline void MaglevAssembler::StoreTaggedSignedField(Register object, int offset,
620 Register value) {
621 AssertSmi(value);
623}
624
625inline void MaglevAssembler::StoreTaggedSignedField(Register object, int offset,
626 Tagged<Smi> value) {
627 TemporaryRegisterScope temps(this);
628 Register scratch = temps.AcquireScratch();
629 Mov(scratch, value);
631}
632
633inline void MaglevAssembler::StoreInt32Field(Register object, int offset,
634 int32_t value) {
635 if (value == 0) {
636 Str(wzr, FieldMemOperand(object, offset));
637 return;
638 }
639 TemporaryRegisterScope scope(this);
640 Register scratch = scope.AcquireScratch().W();
641 Move(scratch, value);
642 Str(scratch, FieldMemOperand(object, offset));
643}
644
645inline void MaglevAssembler::StoreField(MemOperand operand, Register value,
646 int size) {
647 DCHECK(size == 1 || size == 2 || size == 4);
648 if (size == 1) {
649 Strb(value.W(), operand);
650 } else if (size == 2) {
651 Strh(value.W(), operand);
652 } else {
653 DCHECK_EQ(size, 4);
654 Str(value.W(), operand);
655 }
656}
657
658#ifdef V8_ENABLE_SANDBOX
659
660inline void MaglevAssembler::StoreTrustedPointerFieldNoWriteBarrier(
661 Register object, int offset, Register value) {
663 FieldMemOperand(object, offset));
664}
665
666#endif // V8_ENABLE_SANDBOX
667
668inline void MaglevAssembler::ReverseByteOrder(Register value, int size) {
669 if (size == 2) {
670 Rev16(value, value);
671 Sxth(value, value);
672 } else if (size == 4) {
673 Rev32(value, value);
674 } else {
675 DCHECK_EQ(size, 1);
676 }
677}
678
679inline void MaglevAssembler::IncrementInt32(Register reg) {
680 Add(reg.W(), reg.W(), Immediate(1));
681}
682
683inline void MaglevAssembler::DecrementInt32(Register reg) {
684 Sub(reg.W(), reg.W(), Immediate(1));
685}
686
687inline void MaglevAssembler::AddInt32(Register reg, int amount) {
688 Add(reg.W(), reg.W(), Immediate(amount));
689}
690
691inline void MaglevAssembler::AndInt32(Register reg, int mask) {
692 And(reg.W(), reg.W(), Immediate(mask));
693}
694
695inline void MaglevAssembler::OrInt32(Register reg, int mask) {
696 Orr(reg.W(), reg.W(), Immediate(mask));
697}
698
699inline void MaglevAssembler::AndInt32(Register reg, Register other) {
700 And(reg.W(), reg.W(), other.W());
701}
702
703inline void MaglevAssembler::OrInt32(Register reg, Register other) {
704 Orr(reg.W(), reg.W(), other.W());
705}
706
707inline void MaglevAssembler::ShiftLeft(Register reg, int amount) {
708 Lsl(reg.W(), reg.W(), amount);
709}
710
711inline void MaglevAssembler::IncrementAddress(Register reg, int32_t delta) {
712 Add(reg.X(), reg.X(), Immediate(delta));
713}
714
715inline void MaglevAssembler::LoadAddress(Register dst, MemOperand location) {
716 DCHECK(location.IsImmediateOffset());
717 Add(dst.X(), location.base(), Immediate(location.offset()));
718}
719
720inline void MaglevAssembler::Call(Label* target) { bl(target); }
721
722inline void MaglevAssembler::EmitEnterExitFrame(int extra_slots,
723 StackFrame::Type frame_type,
724 Register c_function,
725 Register scratch) {
726 EnterExitFrame(scratch, extra_slots, frame_type);
727}
728
729inline void MaglevAssembler::Move(StackSlot dst, Register src) {
730 Str(src, StackSlotOperand(dst));
731}
732inline void MaglevAssembler::Move(StackSlot dst, DoubleRegister src) {
733 Str(src, StackSlotOperand(dst));
734}
735inline void MaglevAssembler::Move(Register dst, StackSlot src) {
736 Ldr(dst, StackSlotOperand(src));
737}
738inline void MaglevAssembler::Move(DoubleRegister dst, StackSlot src) {
739 Ldr(dst, StackSlotOperand(src));
740}
741inline void MaglevAssembler::Move(MemOperand dst, Register src) {
742 Str(src, dst);
743}
744inline void MaglevAssembler::Move(Register dst, MemOperand src) {
745 Ldr(dst, src);
746}
748 Fmov(dst, src);
749}
750inline void MaglevAssembler::Move(Register dst, Tagged<Smi> src) {
751 MacroAssembler::Move(dst, src);
752}
753inline void MaglevAssembler::Move(Register dst, ExternalReference src) {
754 Mov(dst, src);
755}
756inline void MaglevAssembler::Move(Register dst, Register src) {
757 MacroAssembler::Move(dst, src);
758}
759inline void MaglevAssembler::Move(Register dst, Tagged<TaggedIndex> i) {
760 Mov(dst, i.ptr());
761}
762inline void MaglevAssembler::Move(Register dst, int32_t i) {
763 Mov(dst.W(), Immediate(i));
764}
765inline void MaglevAssembler::Move(Register dst, uint32_t i) {
766 Mov(dst.W(), Immediate(i));
767}
769 Mov(dst, Immediate(i));
770}
771inline void MaglevAssembler::Move(DoubleRegister dst, double n) {
772 Fmov(dst, n);
773}
774inline void MaglevAssembler::Move(DoubleRegister dst, Float64 n) {
775 Fmov(dst, n.get_scalar());
776}
777inline void MaglevAssembler::Move(Register dst, Handle<HeapObject> obj) {
778 Mov(dst, Operand(obj));
779}
780void MaglevAssembler::MoveTagged(Register dst, Handle<HeapObject> obj) {
781#ifdef V8_COMPRESS_POINTERS
782 Mov(dst.W(), Operand(obj, RelocInfo::COMPRESSED_EMBEDDED_OBJECT));
783#else
784 Mov(dst, Operand(obj));
785#endif
786}
787
788inline void MaglevAssembler::LoadInt32(Register dst, MemOperand src) {
789 Ldr(dst.W(), src);
790}
791
792inline void MaglevAssembler::StoreInt32(MemOperand dst, Register src) {
793 Str(src.W(), dst);
794}
795
797 Ldr(dst.S(), src);
798 Fcvt(dst, dst.S());
799}
801 TemporaryRegisterScope temps(this);
802 DoubleRegister scratch = temps.AcquireScratchDouble();
803 Fcvt(scratch.S(), src);
804 Str(scratch.S(), dst);
805}
807 Ldr(dst, src);
808}
810 Str(src, dst);
811}
812
814 Register base,
815 Register index) {
816 LoadFloat64(dst, MemOperand(base, index));
817}
819 DoubleRegister dst, Register base, Register index) {
820 TemporaryRegisterScope temps(this);
821 Register scratch = temps.AcquireScratch();
822 Ldr(scratch, MemOperand(base, index));
823 Rev(scratch, scratch);
824 Fmov(dst, scratch);
825}
826inline void MaglevAssembler::StoreUnalignedFloat64(Register base,
827 Register index,
828 DoubleRegister src) {
829 StoreFloat64(MemOperand(base, index), src);
830}
832 Register base, Register index, DoubleRegister src) {
833 TemporaryRegisterScope temps(this);
834 Register scratch = temps.AcquireScratch();
835 Fmov(scratch, src);
836 Rev(scratch, scratch);
837 Str(scratch, MemOperand(base, index));
838}
839
840inline void MaglevAssembler::SignExtend32To64Bits(Register dst, Register src) {
841 Mov(dst, Operand(src.W(), SXTW));
842}
843inline void MaglevAssembler::NegateInt32(Register val) {
844 Neg(val.W(), val.W());
845}
846
847inline void MaglevAssembler::ToUint8Clamped(Register result,
848 DoubleRegister value, Label* min,
849 Label* max, Label* done) {
850 TemporaryRegisterScope temps(this);
851 DoubleRegister scratch = temps.AcquireScratchDouble();
852 Move(scratch, 0.0);
853 Fcmp(scratch, value);
854 // Set to 0 if NaN.
855 B(vs, min);
856 B(ge, min);
857 Move(scratch, 255.0);
858 Fcmp(value, scratch);
859 B(ge, max);
860 // if value in [0, 255], then round up to the nearest.
861 Frintn(scratch, value);
863 B(done);
864}
865
866template <typename NodeT>
867inline void MaglevAssembler::DeoptIfBufferDetached(Register array,
868 Register scratch,
869 NodeT* node) {
870 // A detached buffer leads to megamorphic feedback, so we won't have a deopt
871 // loop if we deopt here.
872 LoadTaggedField(scratch,
873 FieldMemOperand(array, JSArrayBufferView::kBufferOffset));
874 LoadTaggedField(scratch,
875 FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
876 Tst(scratch.W(), Immediate(JSArrayBuffer::WasDetachedBit::kMask));
877 EmitEagerDeoptIf(ne, DeoptimizeReason::kArrayBufferWasDetached, node);
878}
879
880inline void MaglevAssembler::LoadByte(Register dst, MemOperand src) {
881 Ldrb(dst, src);
882}
883
885 Register map, Register scratch) {
886 Ldrb(scratch.W(), FieldMemOperand(map, Map::kBitFieldOffset));
887 And(scratch.W(), scratch.W(),
888 Map::Bits1::IsUndetectableBit::kMask | Map::Bits1::IsCallableBit::kMask);
889 Cmp(scratch.W(), Map::Bits1::IsCallableBit::kMask);
890 return kEqual;
891}
892
894 Register map, Register scratch) {
895 Ldrb(scratch.W(), FieldMemOperand(map, Map::kBitFieldOffset));
896 Tst(scratch.W(), Immediate(Map::Bits1::IsUndetectableBit::kMask |
897 Map::Bits1::IsCallableBit::kMask));
898 return kEqual;
899}
900
901inline void MaglevAssembler::LoadInstanceType(Register instance_type,
902 Register heap_object) {
903 LoadMap(instance_type, heap_object);
904 Ldrh(instance_type.W(),
905 FieldMemOperand(instance_type, Map::kInstanceTypeOffset));
906}
907
908inline void MaglevAssembler::JumpIfObjectType(Register heap_object,
909 InstanceType type, Label* target,
910 Label::Distance distance) {
911 TemporaryRegisterScope temps(this);
912 Register scratch = temps.AcquireScratch();
913 IsObjectType(heap_object, scratch, scratch, type);
914 JumpIf(kEqual, target, distance);
915}
916
917inline void MaglevAssembler::JumpIfNotObjectType(Register heap_object,
918 InstanceType type,
919 Label* target,
920 Label::Distance distance) {
921 TemporaryRegisterScope temps(this);
922 Register scratch = temps.AcquireScratch();
923 IsObjectType(heap_object, scratch, scratch, type);
924 JumpIf(kNotEqual, target, distance);
925}
926
927inline void MaglevAssembler::AssertObjectType(Register heap_object,
928 InstanceType type,
929 AbortReason reason) {
930 TemporaryRegisterScope temps(this);
931 Register scratch = temps.AcquireScratch();
932 AssertNotSmi(heap_object);
933 IsObjectType(heap_object, scratch, scratch, type);
934 Assert(kEqual, reason);
935}
936
938 Register heap_object, InstanceType type, Label* if_true,
939 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
940 Label::Distance false_distance, bool fallthrough_when_false) {
941 TemporaryRegisterScope temps(this);
942 Register scratch = temps.AcquireScratch();
943 IsObjectType(heap_object, scratch, scratch, type);
944 Branch(kEqual, if_true, true_distance, fallthrough_when_true, if_false,
945 false_distance, fallthrough_when_false);
946}
947
948inline void MaglevAssembler::JumpIfObjectTypeInRange(Register heap_object,
949 InstanceType lower_limit,
950 InstanceType higher_limit,
951 Label* target,
952 Label::Distance distance) {
953 TemporaryRegisterScope temps(this);
954 Register scratch = temps.AcquireScratch();
955 IsObjectTypeInRange(heap_object, scratch, lower_limit, higher_limit);
956 JumpIf(kUnsignedLessThanEqual, target, distance);
957}
958
960 Register heap_object, InstanceType lower_limit, InstanceType higher_limit,
961 Label* target, Label::Distance distance) {
962 TemporaryRegisterScope temps(this);
963 Register scratch = temps.AcquireScratch();
964 IsObjectTypeInRange(heap_object, scratch, lower_limit, higher_limit);
965 JumpIf(kUnsignedGreaterThan, target, distance);
966}
967
968inline void MaglevAssembler::AssertObjectTypeInRange(Register heap_object,
969 InstanceType lower_limit,
970 InstanceType higher_limit,
971 AbortReason reason) {
972 TemporaryRegisterScope temps(this);
973 Register scratch = temps.AcquireScratch();
974 AssertNotSmi(heap_object);
975 IsObjectTypeInRange(heap_object, scratch, lower_limit, higher_limit);
977}
978
980 Register heap_object, InstanceType lower_limit, InstanceType higher_limit,
981 Label* if_true, Label::Distance true_distance, bool fallthrough_when_true,
982 Label* if_false, Label::Distance false_distance,
983 bool fallthrough_when_false) {
984 TemporaryRegisterScope temps(this);
985 Register scratch = temps.AcquireScratch();
986 IsObjectTypeInRange(heap_object, scratch, lower_limit, higher_limit);
987 Branch(kUnsignedLessThanEqual, if_true, true_distance, fallthrough_when_true,
988 if_false, false_distance, fallthrough_when_false);
989}
990
991#if V8_STATIC_ROOTS_BOOL
992inline void MaglevAssembler::JumpIfObjectInRange(Register heap_object,
993 Tagged_t lower_limit,
994 Tagged_t higher_limit,
995 Label* target,
996 Label::Distance distance) {
997 // Only allowed for comparisons against RORoots.
998 DCHECK_LE(lower_limit, StaticReadOnlyRoot::kLastAllocatedRoot);
999 DCHECK_LE(higher_limit, StaticReadOnlyRoot::kLastAllocatedRoot);
1000 TemporaryRegisterScope temps(this);
1001 Register scratch = temps.AcquireScratch();
1002 AssertNotSmi(heap_object);
1003 CompareRange(heap_object, scratch, lower_limit, higher_limit);
1004 JumpIf(kUnsignedLessThanEqual, target, distance);
1005}
1006
1007inline void MaglevAssembler::JumpIfObjectNotInRange(Register heap_object,
1008 Tagged_t lower_limit,
1009 Tagged_t higher_limit,
1010 Label* target,
1011 Label::Distance distance) {
1012 // Only allowed for comparisons against RORoots.
1013 DCHECK_LE(lower_limit, StaticReadOnlyRoot::kLastAllocatedRoot);
1014 DCHECK_LE(higher_limit, StaticReadOnlyRoot::kLastAllocatedRoot);
1015 TemporaryRegisterScope temps(this);
1016 Register scratch = temps.AcquireScratch();
1017 AssertNotSmi(heap_object);
1018 CompareRange(heap_object, scratch, lower_limit, higher_limit);
1019 JumpIf(kUnsignedGreaterThan, target, distance);
1020}
1021
1022inline void MaglevAssembler::AssertObjectInRange(Register heap_object,
1023 Tagged_t lower_limit,
1024 Tagged_t higher_limit,
1025 AbortReason reason) {
1026 // Only allowed for comparisons against RORoots.
1027 DCHECK_LE(lower_limit, StaticReadOnlyRoot::kLastAllocatedRoot);
1028 DCHECK_LE(higher_limit, StaticReadOnlyRoot::kLastAllocatedRoot);
1029 TemporaryRegisterScope temps(this);
1030 Register scratch = temps.AcquireScratch();
1031 AssertNotSmi(heap_object);
1032 CompareRange(heap_object, scratch, lower_limit, higher_limit);
1034}
1035#endif
1036
1038 Register heap_object, Label* target, Label::Distance distance) {
1039 TemporaryRegisterScope temps(this);
1040 Register scratch = temps.AcquireScratch();
1041 MacroAssembler::JumpIfJSAnyIsNotPrimitive(heap_object, scratch, target,
1042 distance);
1043}
1044
1045inline void MaglevAssembler::CompareMapWithRoot(Register object,
1046 RootIndex index,
1047 Register scratch) {
1049 LoadCompressedMap(scratch, object);
1050 CmpTagged(scratch, Immediate(ReadOnlyRootPtr(index)));
1051 return;
1052 }
1053 LoadMap(scratch, object);
1054 CompareRoot(scratch, index);
1055}
1056
1057inline void MaglevAssembler::CompareInstanceType(Register map,
1058 InstanceType instance_type) {
1059 TemporaryRegisterScope temps(this);
1060 Register scratch = temps.AcquireScratch();
1061 MacroAssembler::CompareInstanceType(map, scratch, instance_type);
1062}
1063
1065 Register map, Register instance_type_out, InstanceType lower_limit,
1066 InstanceType higher_limit) {
1067 MacroAssembler::CompareInstanceTypeRange(map, instance_type_out, lower_limit,
1068 higher_limit);
1070}
1071
1073 DoubleRegister src1, DoubleRegister src2, Condition cond, Label* target,
1074 Label* nan_failed, Label::Distance distance) {
1075 Fcmp(src1, src2);
1076 JumpIf(ConditionForNaN(), nan_failed);
1077 JumpIf(cond, target, distance);
1078}
1079
1081 DoubleRegister src1, DoubleRegister src2, Condition cond,
1082 BasicBlock* if_true, BasicBlock* if_false, BasicBlock* next_block,
1083 BasicBlock* nan_failed) {
1084 Fcmp(src1, src2);
1085 JumpIf(ConditionForNaN(), nan_failed->label());
1086 Branch(cond, if_true, if_false, next_block);
1087}
1088
1089inline void MaglevAssembler::PrepareCallCFunction(int num_reg_arguments,
1090 int num_double_registers) {}
1091
1092inline void MaglevAssembler::CallSelf() {
1093 DCHECK(allow_call());
1094 DCHECK(code_gen_state()->entry_label()->is_bound());
1095 Bl(code_gen_state()->entry_label());
1096}
1097
1098inline void MaglevAssembler::Jump(Label* target, Label::Distance) {
1099 // Any eager deopts should go through JumpIf to enable us to support the
1100 // `--deopt-every-n-times` stress mode. See EmitEagerDeoptStress.
1101 DCHECK(!IsDeoptLabel(target));
1102 B(target);
1103}
1104
1105inline void MaglevAssembler::JumpToDeopt(Label* target) {
1106 DCHECK(IsDeoptLabel(target));
1107 B(target);
1108}
1109
1110inline void MaglevAssembler::EmitEagerDeoptStress(Label* target) {
1111 // TODO(olivf): On arm `--deopt-every-n-times` is currently not supported.
1112 // Supporting it would require to implement this method, additionally handle
1113 // deopt branches in Cbz, and handle all cases where we fall through to the
1114 // deopt branch (like Int32Divide).
1115}
1116
1117inline void MaglevAssembler::JumpIf(Condition cond, Label* target,
1119 B(target, cond);
1120}
1121
1122inline void MaglevAssembler::JumpIfRoot(Register with, RootIndex index,
1123 Label* if_equal,
1124 Label::Distance distance) {
1125 MacroAssembler::JumpIfRoot(with, index, if_equal);
1126}
1127
1128inline void MaglevAssembler::JumpIfNotRoot(Register with, RootIndex index,
1129 Label* if_not_equal,
1130 Label::Distance distance) {
1131 MacroAssembler::JumpIfNotRoot(with, index, if_not_equal);
1132}
1133
1134inline void MaglevAssembler::JumpIfSmi(Register src, Label* on_smi,
1135 Label::Distance distance) {
1136 MacroAssembler::JumpIfSmi(src, on_smi);
1137}
1138
1139inline void MaglevAssembler::JumpIfNotSmi(Register src, Label* on_smi,
1140 Label::Distance distance) {
1141 MacroAssembler::JumpIfNotSmi(src, on_smi);
1142}
1143
1144void MaglevAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
1145 Label* target, Label::Distance) {
1146 CompareAndBranch(value, Immediate(byte), cc, target);
1147}
1148
1149void MaglevAssembler::JumpIfHoleNan(DoubleRegister value, Register scratch,
1150 Label* target, Label::Distance distance) {
1151 // TODO(leszeks): Right now this only accepts Zone-allocated target labels.
1152 // This works because all callsites are jumping to either a deopt, deferred
1153 // code, or a basic block. If we ever need to jump to an on-stack label, we
1154 // have to add support for it here change the caller to pass a ZoneLabelRef.
1155 DCHECK(compilation_info()->zone()->Contains(target));
1156 ZoneLabelRef is_hole = ZoneLabelRef::UnsafeFromLabelPointer(target);
1157 ZoneLabelRef is_not_hole(this);
1158 Fcmp(value, value);
1161 [](MaglevAssembler* masm, DoubleRegister value, Register scratch,
1162 ZoneLabelRef is_hole, ZoneLabelRef is_not_hole) {
1163 masm->Umov(scratch.W(), value.V2S(), 1);
1164 masm->CompareInt32AndJumpIf(scratch.W(), kHoleNanUpper32, kEqual,
1165 *is_hole);
1166 masm->Jump(*is_not_hole);
1167 },
1168 value, scratch, is_hole, is_not_hole));
1169 bind(*is_not_hole);
1170}
1171
1172void MaglevAssembler::JumpIfNotHoleNan(DoubleRegister value, Register scratch,
1173 Label* target,
1174 Label::Distance distance) {
1175 JumpIfNotNan(value, target, distance);
1176 Umov(scratch.W(), value.V2S(), 1);
1177 CompareInt32AndJumpIf(scratch.W(), kHoleNanUpper32, kNotEqual, target,
1178 distance);
1179}
1180
1181void MaglevAssembler::JumpIfNotHoleNan(MemOperand operand, Label* target,
1182 Label::Distance distance) {
1183 MaglevAssembler::TemporaryRegisterScope temps(this);
1184 Register upper_bits = temps.AcquireScratch();
1185 DCHECK(operand.IsImmediateOffset() && operand.shift_amount() == 0);
1186 Ldr(upper_bits.W(),
1187 MemOperand(operand.base(), operand.offset() + (kDoubleSize / 2),
1188 operand.addrmode()));
1189 CompareInt32AndJumpIf(upper_bits.W(), kHoleNanUpper32, kNotEqual, target,
1190 distance);
1191}
1192
1193void MaglevAssembler::JumpIfNan(DoubleRegister value, Label* target,
1194 Label::Distance distance) {
1195 Fcmp(value, value);
1196 JumpIf(ConditionForNaN(), target, distance);
1197}
1198
1199void MaglevAssembler::JumpIfNotNan(DoubleRegister value, Label* target,
1200 Label::Distance distance) {
1201 Fcmp(value, value);
1202 JumpIf(NegateCondition(ConditionForNaN()), target, distance);
1203}
1204
1205inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, Register r2,
1206 Condition cond,
1207 Label* target,
1208 Label::Distance distance) {
1209 CompareAndBranch(r1.W(), r2.W(), cond, target);
1210}
1211
1212void MaglevAssembler::CompareIntPtrAndJumpIf(Register r1, Register r2,
1213 Condition cond, Label* target,
1214 Label::Distance distance) {
1215 CompareAndBranch(r1.X(), r2.X(), cond, target);
1216}
1217
1218void MaglevAssembler::CompareIntPtrAndJumpIf(Register r1, int32_t value,
1219 Condition cond, Label* target,
1220 Label::Distance distance) {
1221 CompareAndBranch(r1.X(), Immediate(value), cond, target);
1222}
1223
1224inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, int32_t value,
1225 Condition cond,
1226 Label* target,
1227 Label::Distance distance) {
1228 CompareAndBranch(r1.W(), Immediate(value), cond, target);
1229}
1230
1231inline void MaglevAssembler::CompareInt32AndAssert(Register r1, Register r2,
1232 Condition cond,
1233 AbortReason reason) {
1234 Cmp(r1.W(), r2.W());
1235 Assert(cond, reason);
1236}
1237inline void MaglevAssembler::CompareInt32AndAssert(Register r1, int32_t value,
1238 Condition cond,
1239 AbortReason reason) {
1240 Cmp(r1.W(), Immediate(value));
1241 Assert(cond, reason);
1242}
1243
1245 Register r1, int32_t value, Condition cond, Label* if_true,
1246 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1247 Label::Distance false_distance, bool fallthrough_when_false) {
1248 Cmp(r1.W(), Immediate(value));
1249 Branch(cond, if_true, true_distance, fallthrough_when_true, if_false,
1250 false_distance, fallthrough_when_false);
1251}
1252
1254 Register r1, Register value, Condition cond, Label* if_true,
1255 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1256 Label::Distance false_distance, bool fallthrough_when_false) {
1257 Cmp(r1.W(), value.W());
1258 Branch(cond, if_true, true_distance, fallthrough_when_true, if_false,
1259 false_distance, fallthrough_when_false);
1260}
1261
1263 Register r1, int32_t value, Condition cond, Label* if_true,
1264 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1265 Label::Distance false_distance, bool fallthrough_when_false) {
1266 Cmp(r1.X(), Immediate(value));
1267 Branch(cond, if_true, true_distance, fallthrough_when_true, if_false,
1268 false_distance, fallthrough_when_false);
1269}
1270
1271inline void MaglevAssembler::CompareSmiAndJumpIf(Register r1, Tagged<Smi> value,
1272 Condition cond, Label* target,
1273 Label::Distance distance) {
1274 AssertSmi(r1);
1275 CompareTaggedAndBranch(r1, Immediate(value), cond, target);
1276}
1277
1278inline void MaglevAssembler::CompareSmiAndAssert(Register r1, Tagged<Smi> value,
1279 Condition cond,
1280 AbortReason reason) {
1281 if (!v8_flags.debug_code) return;
1282 AssertSmi(r1);
1283 CmpTagged(r1, value);
1284 Assert(cond, reason);
1285}
1286
1287inline void MaglevAssembler::CompareByteAndJumpIf(MemOperand left, int8_t right,
1288 Condition cond,
1289 Register scratch,
1290 Label* target,
1291 Label::Distance distance) {
1292 LoadByte(scratch.W(), left);
1293 CompareAndBranch(scratch.W(), Immediate(right), cond, target);
1294}
1295
1296inline void MaglevAssembler::CompareTaggedAndJumpIf(Register r1,
1297 Tagged<Smi> value,
1298 Condition cond,
1299 Label* target,
1300 Label::Distance distance) {
1301 CompareTaggedAndBranch(r1, Immediate(value), cond, target);
1302}
1303
1304inline void MaglevAssembler::CompareTaggedAndJumpIf(Register r1,
1305 Handle<HeapObject> obj,
1306 Condition cond,
1307 Label* target,
1308 Label::Distance distance) {
1309 CmpTagged(r1, Operand(obj, COMPRESS_POINTERS_BOOL
1312 JumpIf(cond, target, distance);
1313}
1314
1315inline void MaglevAssembler::CompareTaggedAndJumpIf(Register src1,
1316 Register src2,
1317 Condition cond,
1318 Label* target,
1319 Label::Distance distance) {
1320 CmpTagged(src1, src2);
1321 JumpIf(cond, target, distance);
1322}
1323
1325 DoubleRegister reg, Label* target, Label::Distance distance) {
1326 Fcmp(reg, 0.0);
1327 JumpIf(eq, target);
1328 JumpIf(vs, target); // NaN check
1329}
1330
1332 MemOperand operand, Label* target, Label::Distance distance) {
1333 TemporaryRegisterScope temps(this);
1334 DoubleRegister value_double = temps.AcquireScratchDouble();
1335 Ldr(value_double, operand);
1336 CompareDoubleAndJumpIfZeroOrNaN(value_double, target, distance);
1337}
1338
1340 Register r1, int32_t mask, Label* target, Label::Distance distance) {
1341 TestAndBranchIfAnySet(r1.W(), mask, target);
1342}
1343
1345 MemOperand operand, int32_t mask, Label* target, Label::Distance distance) {
1346 TemporaryRegisterScope temps(this);
1347 Register value = temps.AcquireScratch().W();
1348 Ldr(value, operand);
1349 TestAndBranchIfAnySet(value, mask, target);
1350}
1351
1353 MemOperand operand, uint8_t mask, Label* target, Label::Distance distance) {
1354 TemporaryRegisterScope temps(this);
1355 Register value = temps.AcquireScratch().W();
1356 LoadByte(value, operand);
1357 TestAndBranchIfAnySet(value, mask, target);
1358}
1359
1361 Register r1, int32_t mask, Label* target, Label::Distance distance) {
1362 TestAndBranchIfAllClear(r1.W(), mask, target);
1363}
1364
1366 MemOperand operand, int32_t mask, Label* target, Label::Distance distance) {
1367 TemporaryRegisterScope temps(this);
1368 Register value = temps.AcquireScratch().W();
1369 Ldr(value, operand);
1370 TestAndBranchIfAllClear(value, mask, target);
1371}
1372
1374 MemOperand operand, uint8_t mask, Label* target, Label::Distance distance) {
1375 TemporaryRegisterScope temps(this);
1376 Register value = temps.AcquireScratch().W();
1377 LoadByte(value, operand);
1378 TestAndBranchIfAllClear(value, mask, target);
1379}
1380
1382 Register heap_number) {
1383 Ldr(result, FieldMemOperand(heap_number, offsetof(HeapNumber, value_)));
1384}
1385
1386inline void MaglevAssembler::LoadHeapInt32Value(Register result,
1387 Register heap_number) {
1388 Ldr(result, FieldMemOperand(heap_number, offsetof(HeapNumber, value_)));
1389}
1390
1391inline void MaglevAssembler::StoreHeapInt32Value(Register value,
1392 Register heap_number) {
1393 Str(value, (FieldMemOperand(heap_number, offsetof(HeapNumber, value_))));
1394}
1395
1397 Register src) {
1398 Scvtf(result, src.W());
1399}
1400
1402 Register src) {
1403 Ucvtf(result, src.W());
1404}
1405
1407 Register src) {
1408 Scvtf(result, src.X());
1409}
1410
1411inline void MaglevAssembler::Pop(Register dst) { Pop(dst, padreg); }
1412
1414 if (v8_flags.slow_debug_code) {
1415 TemporaryRegisterScope temps(this);
1416 Register scratch = temps.AcquireScratch();
1417 Add(scratch, sp,
1421 Cmp(scratch, fp);
1422 Assert(eq, AbortReason::kStackAccessBelowStackPointer);
1423 }
1424}
1425
1427 int stack_check_offset) {
1428 TemporaryRegisterScope temps(this);
1429 Register stack_cmp_reg = sp;
1430 if (stack_check_offset >= kStackLimitSlackForDeoptimizationInBytes) {
1431 stack_cmp_reg = temps.AcquireScratch();
1432 Sub(stack_cmp_reg, sp, stack_check_offset);
1433 }
1434 Register interrupt_stack_limit = temps.AcquireScratch();
1436 Cmp(stack_cmp_reg, interrupt_stack_limit);
1438}
1439
1440inline void MaglevAssembler::FinishCode() {
1442}
1443
1444template <typename NodeT>
1446 NodeT* node) {
1447 EmitEagerDeoptIf(ne, reason, node);
1448}
1449
1450template <>
1452 Register src) {
1453 Mov(dst, src);
1454}
1455template <>
1457 MemOperand src) {
1458 switch (repr) {
1460 return Ldr(dst.W(), src);
1465 return Ldr(dst, src);
1466 default:
1467 UNREACHABLE();
1468 }
1469}
1470template <>
1472 MemOperand dst, Register src) {
1473 switch (repr) {
1475 return Str(src.W(), dst);
1480 return Str(src, dst);
1481 default:
1482 UNREACHABLE();
1483 }
1484}
1485template <>
1487 MemOperand dst, MemOperand src) {
1488 TemporaryRegisterScope temps(this);
1489 Register scratch = temps.AcquireScratch();
1490 MoveRepr(repr, scratch, src);
1491 MoveRepr(repr, dst, scratch);
1492}
1493
1495 // Implemented only for x64.
1496}
1497
1498} // namespace maglev
1499} // namespace internal
1500} // namespace v8
1501
1502#endif // V8_MAGLEV_ARM64_MAGLEV_ASSEMBLER_ARM64_INL_H_
#define T
#define Assert(condition)
interpreter::OperandScale scale
Definition builtins.cc:44
typename std::iterator_traits< iterator >::value_type value_type
Definition iterator.h:38
void bl(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
static constexpr size_t kMaxSizeInHeap
Tagged_t ReadOnlyRootPtr(RootIndex index)
void JumpIfRoot(Register with, RootIndex index, Label *if_equal)
void LoadStackLimit(Register destination, StackLimitKind kind)
void Fcvt(const VRegister &fd, const VRegister &fn)
void Cmp(const Register &rn, int imm)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void Orr(const Register &rd, const Register &rn, const Operand &operand)
void Neg(const Register &rd, const Operand &operand)
void Add(const Register &rd, const Register &rn, const Operand &operand)
void IsObjectType(Register heap_object, Register scratch1, Register scratch2, InstanceType type)
void Adds(const Register &rd, const Register &rn, const Operand &operand)
void Bind(Label *label, BranchTargetIdentifier id=BranchTargetIdentifier::kNone)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
static CPURegList DefaultTmpList()
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal)
void CompareAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void Fmov(VRegister fd, VRegister fn)
void CompareRoot(Register obj, RootIndex index)
void Move(Register dst, Tagged< Smi > smi)
void Lsr(const Register &rd, const Register &rn, unsigned shift)
void Tst(const Register &rn, const Operand &operand)
void Sxth(const Register &rd, const Register &rn)
void JumpIfSmi(Register value, Label *smi_label)
void LoadSandboxedPointerField(Register destination, MemOperand field_operand)
void TestAndBranchIfAllClear(const Register &reg, const uint64_t bit_pattern, Label *label)
void Scvtf(const VRegister &fd, const Register &rn, unsigned fbits=0)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Register scratch, Label *target, Label::Distance distance=Label::kFar, Condition condition=Condition::kUnsignedGreaterThanEqual)
void CompareTaggedAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void Lsl(const Register &rd, const Register &rn, unsigned shift)
void SmiTag(Register reg, SBit s=LeaveCC)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void Ldr(const CPURegister &rt, const Operand &imm)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void Mov(const Register &rd, const Operand &operand, DiscardMoveMode discard_mode=kDontDiscardForSameWReg)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void IsObjectTypeInRange(Register heap_object, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void Umov(const Register &rd, const VRegister &vn, int vn_index)
void Fcmp(const VRegister &fn, const VRegister &fm)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void Ucvtf(const VRegister &fd, const Register &rn, unsigned fbits=0)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void CompareRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit)
void Subs(const Register &rd, const Register &rn, const Operand &operand)
void Rev(const Register &rd, const Register &rn)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void LoadCompressedMap(Register dst, Register object)
void CmpTagged(const Register &r1, const Register &r2)
void TestAndBranchIfAnySet(const Register &reg, const uint64_t bit_pattern, Label *label)
void StoreTrustedPointerField(Register value, MemOperand dst_field_operand)
void Sub(const Register &rd, const Register &rn, const Operand &operand)
void Rev16(const Register &rd, const Register &rn)
void CompareInstanceTypeRange(Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void Rev32(const Register &rd, const Register &rn)
void LoadMap(Register destination, Register object)
static CPURegList DefaultFPTmpList()
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand)
static Operand EmbeddedHeapNumber(double number)
constexpr bool has(RegisterT reg) const
static constexpr bool IsReadOnly(RootIndex root_index)
Definition roots.h:623
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
void Include(const Register &reg1, const Register &reg2=no_reg)
void SetAvailableFP(const CPURegList &list)
static LocationOperand * cast(InstructionOperand *op)
TemporaryRegisterScope(MaglevAssembler *masm, const SavedData &saved_data)
void CompareMapWithRoot(Register object, RootIndex index, Register scratch)
void LoadFixedArrayElement(Register result, Register array, Register index)
void CompareInstanceType(Register map, InstanceType instance_type)
void SmiAddConstant(Register dst, Register src, int value, Label *fail, Label::Distance distance=Label::kFar)
void JumpIfByte(Condition cc, Register value, int32_t byte, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotNan(DoubleRegister value, Label *target, Label::Distance distance=Label::kFar)
Condition IsNotCallableNorUndetactable(Register map, Register scratch)
void JumpIfRoot(Register with, RootIndex index, Label *if_equal, Label::Distance distance=Label::kFar)
void ToUint8Clamped(Register result, DoubleRegister value, Label *min, Label *max, Label *done)
Condition IsCallableAndNotUndetectable(Register map, Register scratch)
void LoadFloat32(DoubleRegister dst, MemOperand src)
void JumpIfNotObjectType(Register heap_object, InstanceType type, Label *target, Label::Distance distance=Label::kFar)
void ReverseByteOrderAndStoreUnalignedFloat64(Register base, Register index, DoubleRegister src)
void IntPtrToDouble(DoubleRegister result, Register src)
void Branch(Condition condition, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
MemOperand GetStackSlot(const compiler::AllocatedOperand &operand)
void StoreField(MemOperand operand, Register value, int element_size)
void LoadSignedField(Register result, MemOperand operand, int element_size)
void JumpIfSmi(Register src, Label *on_smi, Label::Distance near_jump=Label::kFar)
MaglevAssembler(Isolate *isolate, Zone *zone, MaglevCodeGenState *code_gen_state)
void TestUint8AndJumpIfAllClear(MemOperand operand, uint8_t mask, Label *target, Label::Distance distance=Label::kFar)
void AssertObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, AbortReason reason)
void CompareInt32AndAssert(Register r1, Register r2, Condition cond, AbortReason reason)
void CompareDoubleAndJumpIfZeroOrNaN(DoubleRegister reg, Label *target, Label::Distance distance=Label::kFar)
void LoadFixedDoubleArrayElement(DoubleRegister result, Register array, Register index)
void LoadUnsignedField(Register result, MemOperand operand, int element_size)
void JumpIfObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *target, Label::Distance distance=Label::kFar)
void CheckInt32IsSmi(Register obj, Label *fail, Register scratch=Register::no_reg())
void ReverseByteOrder(Register value, int element_size)
Condition FunctionEntryStackCheck(int stack_check_offset)
void LoadHeapNumberValue(DoubleRegister result, Register heap_number)
void Jump(Label *target, Label::Distance distance=Label::kFar)
MemOperand DataViewElementOperand(Register data_pointer, Register index)
void StoreTaggedSignedField(Register object, int offset, Register value)
void CompareSmiAndJumpIf(Register r1, Tagged< Smi > value, Condition cond, Label *target, Label::Distance distance=Label::kFar)
TemporaryRegisterScope * scratch_register_scope() const
void TestUint8AndJumpIfAnySet(MemOperand operand, uint8_t mask, Label *target, Label::Distance distance=Label::kFar)
void MoveRepr(MachineRepresentation repr, Dest dst, Source src)
void StoreFixedDoubleArrayElement(Register array, Register index, DoubleRegister value)
void SmiTagInt32AndSetFlags(Register dst, Register src)
void LoadInstanceType(Register instance_type, Register heap_object)
void StoreHeapInt32Value(Register value, Register heap_number)
void LoadInt32(Register dst, MemOperand src)
void StoreFixedArrayElementNoWriteBarrier(Register array, Register index, Register value)
void LoadAddress(Register dst, MemOperand location)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Label *target, Label::Distance distance=Label::kFar)
void LoadBoundedSizeFromObject(Register result, Register object, int offset)
void SetSlotAddressForTaggedField(Register slot_reg, Register object, int offset)
void CompareFloat64AndBranch(DoubleRegister src1, DoubleRegister src2, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block, BasicBlock *nan_failed)
void CompareTaggedAndJumpIf(Register reg, Tagged< Smi > smi, Condition cond, Label *target, Label::Distance distance=Label::kFar)
void StoreInt32Field(Register object, int offset, int32_t value)
void BranchOnObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *if_true, Label::Distance true_distance, bool fallthrough_when_true, Label *if_false, Label::Distance false_distance, bool fallthrough_when_false)
void LoadExternalPointerField(Register result, MemOperand operand)
void StoreInt32(MemOperand dst, Register src)
void BuildTypedArrayDataPointer(Register data_pointer, Register object)
void JumpIfObjectTypeNotInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal, Label::Distance distance=Label::kFar)
void EmitEnterExitFrame(int extra_slots, StackFrame::Type frame_type, Register c_function, Register scratch)
void BranchOnObjectType(Register heap_object, InstanceType type, Label *if_true, Label::Distance true_distance, bool fallthrough_when_true, Label *if_false, Label::Distance false_distance, bool fallthrough_when_false)
void LoadHeapInt32Value(Register result, Register heap_number)
void CompareInstanceTypeRange(Register map, InstanceType lower_limit, InstanceType higher_limit)
void Move(StackSlot dst, Register src)
void SignExtend32To64Bits(Register dst, Register src)
void LoadFixedArrayElementWithoutDecompressing(Register result, Register array, Register index)
void LoadUnalignedFloat64AndReverseByteOrder(DoubleRegister dst, Register base, Register index)
void IncrementAddress(Register reg, int32_t delta)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void JumpIfNan(DoubleRegister value, Label *target, Label::Distance distance=Label::kFar)
void TruncateDoubleToInt32(Register dst, DoubleRegister src)
void JumpIfNotSmi(Register src, Label *on_not_smi, Label::Distance near_jump=Label::kFar)
MemOperand TypedArrayElementOperand(Register data_pointer, Register index, int element_size)
MaglevCompilationInfo * compilation_info() const
void TestInt32AndJumpIfAllClear(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
int GetFramePointerOffsetForStackSlot(const compiler::AllocatedOperand &operand)
void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
MaglevCodeGenState * code_gen_state() const
void SetSlotAddressForFixedArrayElement(Register slot_reg, Register object, Register index)
void LoadTaggedFieldByIndex(Register result, Register object, Register index, int scale, int offset)
void CompareIntPtrAndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
void CompareInt32AndBranch(Register r1, int32_t value, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void CompareSmiAndAssert(Register r1, Tagged< Smi > value, Condition cond, AbortReason reason)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void LoadFloat64(DoubleRegister dst, MemOperand src)
void LoadUnalignedFloat64(DoubleRegister dst, Register base, Register index)
void LoadTaggedFieldWithoutDecompressing(Register result, Register object, int offset)
Condition IsRootConstant(Input input, RootIndex root_index)
void StoreFloat32(MemOperand dst, DoubleRegister src)
void EmitEagerDeoptIf(Condition cond, DeoptimizeReason reason, NodeT *node)
void StoreFloat64(MemOperand dst, DoubleRegister src)
void EmitEagerDeoptIfNotEqual(DeoptimizeReason reason, NodeT *node)
void StoreTaggedFieldNoWriteBarrier(Register object, int offset, Register value)
MemOperand ToMemOperand(const compiler::InstructionOperand &operand)
void MoveHeapNumber(Register dst, double value)
void LoadTaggedField(Register result, MemOperand operand)
void LoadByte(Register dst, MemOperand src)
void MoveTagged(Register dst, Handle< HeapObject > obj)
void JumpIfNotHoleNan(DoubleRegister value, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void DeoptIfBufferDetached(Register array, Register scratch, NodeT *node)
void StoreUnalignedFloat64(Register base, Register index, DoubleRegister src)
void TestInt32AndJumpIfAnySet(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
void JumpIfHoleNan(DoubleRegister value, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void Uint32ToDouble(DoubleRegister result, Register src)
void CompareFloat64AndJumpIf(DoubleRegister src1, DoubleRegister src2, Condition cond, Label *target, Label *nan_failed, Label::Distance distance=Label::kFar)
void CompareIntPtrAndBranch(Register r1, int32_t value, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void Int32ToDouble(DoubleRegister result, Register src)
void JumpIfObjectType(Register heap_object, InstanceType type, Label *target, Label::Distance distance=Label::kFar)
void AssertObjectType(Register heap_object, InstanceType type, AbortReason reason)
void SmiSubConstant(Register dst, Register src, int value, Label *fail, Label::Distance distance=Label::kFar)
void CompareByteAndJumpIf(MemOperand left, int8_t right, Condition cond, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0)
static int TemporaryCount(size_t map_count)
MapCompare(MaglevAssembler *masm, Register object, size_t map_count)
void Generate(Handle< Map > map, Condition cond, Label *if_true, Label::Distance distance=Label::kFar)
static ZoneLabelRef UnsafeFromLabelPointer(Label *label)
Register const object_
Register const value_
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
const MapRef map_
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
ZoneVector< RpoNumber > & result
LiftoffRegister reg
uint32_t const mask
base::SmallVector< int32_t, 1 > stack_slots
MaglevAssembler *const masm_
void PushAllReverse(BaselineAssembler *basm, Args... args)
void PushAllReverse(MaglevAssembler *masm, Args... args)
void PushAligned(MaglevAssembler *masm, Arg1 arg1, Arg2 arg2)
void PushAll(MaglevAssembler *masm, Args... args)
void PushIterator(MaglevAssembler *masm, base::iterator_range< T > range, Args... args)
void PushIteratorReverse(MaglevAssembler *masm, base::iterator_range< T > range, Args... args)
constexpr Condition ConditionForNaN()
constexpr Condition ConditionFor(Operation operation)
Register ToRegister(const compiler::InstructionOperand &operand)
constexpr Condition ConditionForFloat64(Operation operation)
NodeTMixin< Node, Derived > NodeT
Definition maglev-ir.h:2858
constexpr int kTaggedSize
Definition globals.h:542
DwVfpRegister DoubleRegister
constexpr ShiftOp LSL
constexpr int B
Address Tagged_t
Definition globals.h:547
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int kStackLimitSlackForDeoptimizationInBytes
Definition globals.h:213
constexpr int kTaggedSizeLog2
Definition globals.h:543
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
constexpr uint32_t kHoleNanUpper32
Definition globals.h:1952
return value
Definition map-inl.h:893
constexpr int kDoubleSizeLog2
Definition globals.h:421
constexpr int kDoubleSize
Definition globals.h:407
constexpr Register padreg
constexpr bool PointerCompressionIsEnabled()
Local< T > Handle
Operation
Definition operation.h:43
#define FATAL(...)
Definition logging.h:47
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
static void Push(MaglevAssembler *masm, Arg1 arg1, Arg2 arg2, Args... args)
static void PushReverse(MaglevAssembler *masm, Arg1 arg1, Arg2 arg2, Args... args)
static void PushReverse(MaglevAssembler *masm, Arg arg)
static void Push(MaglevAssembler *masm, Arg arg)
#define OFFSET_OF_DATA_START(Type)
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001