v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-assembler-arm-inl.h
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_MAGLEV_ARM_MAGLEV_ASSEMBLER_ARM_INL_H_
6#define V8_MAGLEV_ARM_MAGLEV_ASSEMBLER_ARM_INL_H_
7
11#include "src/common/globals.h"
16
17namespace v8 {
18namespace internal {
19namespace maglev {
20
22 return ConditionFor(operation);
23}
24
25constexpr Condition ConditionForNaN() { return vs; }
26
27inline int ShiftFromScale(int n) {
28 switch (n) {
29 case 1:
30 return 0;
31 case 2:
32 return 1;
33 case 4:
34 return 2;
35 case 8:
36 return 3;
37 default:
39 }
40}
41
42class MaglevAssembler::TemporaryRegisterScope
43 : public TemporaryRegisterScopeBase<TemporaryRegisterScope> {
45
46 public:
47 struct SavedData : public Base::SavedData {
50 };
51
53 : Base(masm), scratch_scope_(masm) {
54 if (prev_scope_ == nullptr) {
55 // Add extra scratch register if no previous scope.
57 }
58 }
60 const SavedData& saved_data)
61 : Base(masm, saved_data), scratch_scope_(masm) {
64 }
65
77
85
91
92 private:
94};
95
97 size_t map_count)
98 : masm_(masm), object_(object), map_count_(map_count) {
99 map_ = masm_->scratch_register_scope()->Acquire();
102}
103
105 Label::Distance distance) {
107 Register temp = temps.AcquireScratch();
108 masm_->Move(temp, map);
109 masm_->cmp(map_, temp);
110 masm_->JumpIf(cond, if_true, distance);
111}
112
114
115int MapCompare::TemporaryCount(size_t map_count) { return 1; }
116
117namespace detail {
118
119template <typename... Args>
120struct PushAllHelper;
121
122template <>
123struct PushAllHelper<> {
124 static void Push(MaglevAssembler* masm) {}
125 static void PushReverse(MaglevAssembler* masm) {}
126};
127
128inline void PushInput(MaglevAssembler* masm, const Input& input) {
129 if (input.operand().IsConstant()) {
131 Register scratch = temps.AcquireScratch();
132 input.node()->LoadToRegister(masm, scratch);
133 masm->Push(scratch);
134 } else {
135 // TODO(leszeks): Consider special casing the value. (Toon: could possibly
136 // be done through Input directly?)
137 const compiler::AllocatedOperand& operand =
138 compiler::AllocatedOperand::cast(input.operand());
139 if (operand.IsRegister()) {
140 masm->Push(operand.GetRegister());
141 } else {
142 DCHECK(operand.IsStackSlot());
144 Register scratch = temps.AcquireScratch();
145 masm->ldr(scratch, masm->GetStackSlot(operand));
146 masm->Push(scratch);
147 }
148 }
149}
150
151template <typename T, typename... Args>
153 Args... args) {
154 for (auto iter = range.begin(), end = range.end(); iter != end; ++iter) {
155 masm->Push(*iter);
156 }
158}
159
160template <typename T, typename... Args>
162 base::iterator_range<T> range, Args... args) {
164 for (auto iter = range.rbegin(), end = range.rend(); iter != end; ++iter) {
165 masm->Push(*iter);
166 }
167}
168
169template <typename... Args>
170struct PushAllHelper<Input, Args...> {
171 static void Push(MaglevAssembler* masm, const Input& arg, Args... args) {
172 PushInput(masm, arg);
174 }
175 static void PushReverse(MaglevAssembler* masm, const Input& arg,
176 Args... args) {
178 PushInput(masm, arg);
179 }
180};
181template <typename Arg, typename... Args>
182struct PushAllHelper<Arg, Args...> {
183 static void Push(MaglevAssembler* masm, Arg arg, Args... args) {
184 if constexpr (is_iterator_range<Arg>::value) {
185 PushIterator(masm, arg, args...);
186 } else {
187 masm->MacroAssembler::Push(arg);
189 }
190 }
191 static void PushReverse(MaglevAssembler* masm, Arg arg, Args... args) {
192 if constexpr (is_iterator_range<Arg>::value) {
193 PushIteratorReverse(masm, arg, args...);
194 } else {
196 masm->Push(arg);
197 }
198 }
199};
200
201} // namespace detail
202
203template <typename... T>
204void MaglevAssembler::Push(T... vals) {
206}
207
208template <typename... T>
212
214
216 bind(block->label());
217}
218
220 Register src) {
221 add(dst, src, src, SetCC);
222}
223
225 Register scratch) {
226 static_assert(!SmiValuesAre32Bits());
227
228 TemporaryRegisterScope temps(this);
229 if (scratch == Register::no_reg()) {
230 scratch = temps.AcquireScratch();
231 }
232 add(scratch, obj, obj, SetCC);
233 JumpIf(kOverflow, fail);
234}
235
237 int value, Label* fail,
238 Label::Distance distance) {
239 static_assert(!SmiValuesAre32Bits());
240 AssertSmi(src);
241 if (value != 0) {
242 add(dst, src, Operand(Smi::FromInt(value)), SetCC);
243 JumpIf(kOverflow, fail, distance);
244 } else {
245 Move(dst, src);
246 }
247}
248
250 int value, Label* fail,
251 Label::Distance distance) {
252 static_assert(!SmiValuesAre32Bits());
253 AssertSmi(src);
254 if (value != 0) {
255 sub(dst, src, Operand(Smi::FromInt(value)), SetCC);
256 JumpIf(kOverflow, fail, distance);
257 } else {
258 Move(dst, src);
259 }
260}
261
262inline void MaglevAssembler::MoveHeapNumber(Register dst, double value) {
263 mov(dst, Operand::EmbeddedNumber(value));
264}
265
267 RootIndex root_index) {
268 if (input.operand().IsRegister()) {
269 CompareRoot(ToRegister(input), root_index);
270 } else {
271 DCHECK(input.operand().IsStackSlot());
272 TemporaryRegisterScope temps(this);
273 Register scratch = temps.AcquireScratch();
274 ldr(scratch, ToMemOperand(input));
275 CompareRoot(scratch, root_index);
276 }
277 return eq;
278}
279
283
285
286// TODO(Victorgomes): Unify this to use StackSlot struct.
291
296
298 return ToMemOperand(location.operand());
299}
300
302 Register object) {
303 DCHECK_NE(data_pointer, object);
304 ldr(data_pointer,
305 FieldMemOperand(object, JSTypedArray::kExternalPointerOffset));
306 if (JSTypedArray::kMaxSizeInHeap == 0) return;
307 TemporaryRegisterScope temps(this);
308 Register base = temps.AcquireScratch();
309 ldr(base, FieldMemOperand(object, JSTypedArray::kBasePointerOffset));
310 add(data_pointer, data_pointer, base);
311}
312
314 Register data_pointer, Register index, int element_size) {
315 add(data_pointer, data_pointer,
316 Operand(index, LSL, ShiftFromScale(element_size)));
317 return MemOperand(data_pointer);
318}
319
321 Register index) {
322 return MemOperand(data_pointer, index);
323}
324
332
338
340 MemOperand operand) {
341 Move(result, operand);
342}
343
345 Register index) {
346 if (v8_flags.debug_code) {
347 AssertObjectType(array, FIXED_ARRAY_TYPE, AbortReason::kUnexpectedValue);
349 AbortReason::kUnexpectedNegativeValue);
350 }
353}
354
356 Register result, Register array, Register index) {
357 // No compression mode on arm.
358 LoadFixedArrayElement(result, array, index);
359}
360
362 Register array,
363 Register index) {
364 TemporaryRegisterScope temps(this);
365 Register scratch = temps.AcquireScratch();
366 if (v8_flags.debug_code) {
367 AssertObjectType(array, FIXED_DOUBLE_ARRAY_TYPE,
368 AbortReason::kUnexpectedValue);
370 AbortReason::kUnexpectedNegativeValue);
371 }
372 add(scratch, array, Operand(index, LSL, kDoubleSizeLog2));
374}
375
377 Register array, Register index, DoubleRegister value) {
378 TemporaryRegisterScope temps(this);
379 Register scratch = temps.AcquireScratch();
380 add(scratch, array, Operand(index, LSL, kDoubleSizeLog2));
382}
383
385 MemOperand operand, int size) {
386 if (size == 1) {
387 ldrsb(result, operand);
388 } else if (size == 2) {
389 ldrsh(result, operand);
390 } else {
391 DCHECK_EQ(size, 4);
392 ldr(result, operand);
393 }
394}
395
397 MemOperand operand, int size) {
398 if (size == 1) {
399 ldrb(result, operand);
400 } else if (size == 2) {
401 ldrh(result, operand);
402 } else {
403 DCHECK_EQ(size, 4);
404 ldr(result, operand);
405 }
406}
407
409 Register object,
410 int offset) {
411 add(slot_reg, object, Operand(offset - kHeapObjectTag));
412}
414 Register slot_reg, Register object, Register index) {
415 add(slot_reg, object,
417 add(slot_reg, slot_reg, Operand(index, LSL, kTaggedSizeLog2));
418}
419
425
427 Register array, Register index, Register value) {
428 TemporaryRegisterScope temps(this);
429 Register scratch = temps.AcquireScratch();
430 add(scratch, array, Operand(index, LSL, kTaggedSizeLog2));
433}
434
436 Register value) {
437 AssertSmi(value);
439}
440
442 Tagged<Smi> value) {
443 TemporaryRegisterScope scope(this);
444 Register scratch = scope.AcquireScratch();
445 Move(scratch, value);
447}
448
450 int32_t value) {
451 TemporaryRegisterScope scope(this);
452 Register scratch = scope.AcquireScratch();
453 Move(scratch, value);
454 str(scratch, FieldMemOperand(object, offset));
455}
456
458 int size) {
459 DCHECK(size == 1 || size == 2 || size == 4);
460 if (size == 1) {
461 strb(value, operand);
462 } else if (size == 2) {
463 strh(value, operand);
464 } else {
465 DCHECK_EQ(size, 4);
466 str(value, operand);
467 }
468}
469
470inline void MaglevAssembler::ReverseByteOrder(Register value, int size) {
471 if (size == 2) {
472 rev(value, value);
473 asr(value, value, Operand(16));
474 } else if (size == 4) {
475 rev(value, value);
476 } else {
477 DCHECK_EQ(size, 1);
478 }
479}
480
484
488
489inline void MaglevAssembler::AddInt32(Register reg, int amount) {
490 add(reg, reg, Operand(amount));
491}
492
496
498 orr(reg, reg, Operand(mask));
499}
500
502 and_(reg, reg, other);
503}
504
506 orr(reg, reg, other);
507}
508
509inline void MaglevAssembler::ShiftLeft(Register reg, int amount) {
510 lsl(reg, reg, Operand(amount));
511}
512
513inline void MaglevAssembler::IncrementAddress(Register reg, int32_t delta) {
514 add(reg, reg, Operand(delta));
515}
516
518 DCHECK_EQ(location.am(), Offset);
519 add(dst, location.rn(), Operand(location.offset()));
520}
521
522inline void MaglevAssembler::Call(Label* target) { bl(target); }
523
524inline void MaglevAssembler::EmitEnterExitFrame(int extra_slots,
525 StackFrame::Type frame_type,
526 Register c_function,
527 Register scratch) {
528 EnterExitFrame(scratch, extra_slots, frame_type);
529}
530
532 str(src, StackSlotOperand(dst));
533}
535 vstr(src, StackSlotOperand(dst));
536}
538 ldr(dst, StackSlotOperand(src));
539}
541 vldr(dst, StackSlotOperand(src));
542}
544 str(src, dst);
545}
547 ldr(dst, src);
548}
550 if (dst != src) {
551 vmov(dst, src);
552 }
553}
555 MacroAssembler::Move(dst, src);
556}
558 MacroAssembler::Move(dst, src);
559}
561 if (dst != src) {
562 mov(dst, src);
563 }
564}
566 mov(dst, Operand(i.ptr()));
567}
568inline void MaglevAssembler::Move(Register dst, int32_t i) {
569 mov(dst, Operand(i));
570}
571inline void MaglevAssembler::Move(Register dst, uint32_t i) {
572 mov(dst, Operand(static_cast<int32_t>(i)));
573}
574inline void MaglevAssembler::Move(DoubleRegister dst, double n) {
575 vmov(dst, base::Double(n));
576}
578 vmov(dst, base::Double(n.get_bits()));
579}
581 MacroAssembler::Move(dst, obj);
582}
584 Move(dst, obj);
585}
586
588 ldr(dst, src);
589}
590
592 str(src, dst);
593}
594
596 UseScratchRegisterScope temps(this);
598 if (dst.code() < 16) {
599 temp_vfps = LowDwVfpRegister::from_code(dst.code()).low();
600 } else {
601 temp_vfps = temps.AcquireS();
602 }
603 vldr(temp_vfps, src);
604 vcvt_f64_f32(dst, temp_vfps);
605}
607 UseScratchRegisterScope temps(this);
608 SwVfpRegister temp_vfps = temps.AcquireS();
609 vcvt_f32_f64(temp_vfps, src);
610 vstr(temp_vfps, dst);
611}
613 vldr(dst, src);
614}
616 vstr(src, dst);
617}
618
620 Register base,
621 Register index) {
622 // vldr only works on 4 bytes aligned access.
623 TemporaryRegisterScope temps(this);
624 Register scratch = temps.AcquireScratch();
625 ldr(scratch, MemOperand(base, index));
626 VmovLow(dst, scratch);
627 add(scratch, index, Operand(4));
628 ldr(scratch, MemOperand(base, scratch));
629 VmovHigh(dst, scratch);
630}
632 DoubleRegister dst, Register base, Register index) {
633 // vldr only works on 4 bytes aligned access.
634 TemporaryRegisterScope temps(this);
635 Register scratch = temps.AcquireScratch();
636 ldr(scratch, MemOperand(base, index));
637 rev(scratch, scratch);
638 VmovHigh(dst, scratch);
639 add(scratch, index, Operand(4));
640 ldr(scratch, MemOperand(base, scratch));
641 rev(scratch, scratch);
642 VmovLow(dst, scratch);
643}
645 Register index,
646 DoubleRegister src) {
647 // vstr only works on 4 bytes aligned access.
648 TemporaryRegisterScope temps(this);
649 Register scratch = temps.AcquireScratch();
650 Register index_scratch = temps.AcquireScratch();
651 VmovLow(scratch, src);
652 str(scratch, MemOperand(base, index));
653 add(index_scratch, index, Operand(4));
654 VmovHigh(scratch, src);
655 str(scratch, MemOperand(base, index_scratch));
656}
658 Register base, Register index, DoubleRegister src) {
659 // vstr only works on 4 bytes aligned access.
660 TemporaryRegisterScope temps(this);
661 Register scratch = temps.AcquireScratch();
662 Register index_scratch = temps.AcquireScratch();
663 VmovHigh(scratch, src);
664 rev(scratch, scratch);
665 str(scratch, MemOperand(base, index));
666 add(index_scratch, index, Operand(4));
667 VmovLow(scratch, src);
668 rev(scratch, scratch);
669 str(scratch, MemOperand(base, index_scratch));
670}
671
673 // No 64-bit registers.
674}
676 rsb(val, val, Operand(0));
677}
678
680 DoubleRegister value, Label* min,
681 Label* max, Label* done) {
682 CpuFeatureScope scope(this, ARMv8);
683 TemporaryRegisterScope temps(this);
684 DoubleRegister scratch = temps.AcquireScratchDouble();
685 Move(scratch, 0.0);
686 VFPCompareAndSetFlags(scratch, value);
687 // Set to 0 if NaN.
688 JumpIf(kOverflow, min);
690 Move(scratch, 255.0);
691 VFPCompareAndSetFlags(value, scratch);
693 // if value in [0, 255], then round up to the nearest.
694 vrintn(scratch, value);
696 Jump(done);
697}
698
699template <typename NodeT>
701 Register scratch,
702 NodeT* node) {
703 // A detached buffer leads to megamorphic feedback, so we won't have a deopt
704 // loop if we deopt here.
705 LoadTaggedField(scratch,
706 FieldMemOperand(array, JSArrayBufferView::kBufferOffset));
707 LoadTaggedField(scratch,
708 FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
709 tst(scratch, Operand(JSArrayBuffer::WasDetachedBit::kMask));
710 EmitEagerDeoptIf(ne, DeoptimizeReason::kArrayBufferWasDetached, node);
711}
712
714 ldrb(dst, src);
715}
716
718 Register map, Register scratch) {
719 ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
720 and_(scratch, scratch,
721 Operand(Map::Bits1::IsUndetectableBit::kMask |
722 Map::Bits1::IsCallableBit::kMask));
723 cmp(scratch, Operand(Map::Bits1::IsCallableBit::kMask));
724 return kEqual;
725}
726
728 Register map, Register scratch) {
729 ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
730 tst(scratch, Operand(Map::Bits1::IsUndetectableBit::kMask |
731 Map::Bits1::IsCallableBit::kMask));
732 return kEqual;
733}
734
736 Register heap_object) {
737 LoadMap(instance_type, heap_object);
738 ldrh(instance_type, FieldMemOperand(instance_type, Map::kInstanceTypeOffset));
739}
740
742 InstanceType type, Label* target,
743 Label::Distance distance) {
744 TemporaryRegisterScope temps(this);
745 Register scratch = temps.AcquireScratch();
746 CompareObjectType(heap_object, scratch, scratch, type);
747 JumpIf(kEqual, target, distance);
748}
749
751 InstanceType type,
752 Label* target,
753 Label::Distance distance) {
754 TemporaryRegisterScope temps(this);
755 Register scratch = temps.AcquireScratch();
756 CompareObjectType(heap_object, scratch, scratch, type);
757 JumpIf(kNotEqual, target, distance);
758}
759
761 InstanceType type,
762 AbortReason reason) {
763 TemporaryRegisterScope temps(this);
764 Register scratch = temps.AcquireScratch();
765 AssertNotSmi(heap_object);
766 CompareObjectType(heap_object, scratch, scratch, type);
767 Assert(kEqual, reason);
768}
769
771 Register heap_object, InstanceType type, Label* if_true,
772 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
773 Label::Distance false_distance, bool fallthrough_when_false) {
774 TemporaryRegisterScope temps(this);
775 Register scratch = temps.AcquireScratch();
776 CompareObjectType(heap_object, scratch, scratch, type);
777 Branch(kEqual, if_true, true_distance, fallthrough_when_true, if_false,
778 false_distance, fallthrough_when_false);
779}
780
782 InstanceType lower_limit,
783 InstanceType higher_limit,
784 Label* target,
785 Label::Distance distance) {
786 TemporaryRegisterScope temps(this);
787 Register scratch = temps.AcquireScratch();
788 CompareObjectTypeRange(heap_object, scratch, scratch, scratch, lower_limit,
789 higher_limit);
790 JumpIf(kUnsignedLessThanEqual, target, distance);
791}
792
794 Register heap_object, InstanceType lower_limit, InstanceType higher_limit,
795 Label* target, Label::Distance distance) {
796 TemporaryRegisterScope temps(this);
797 Register scratch = temps.AcquireScratch();
798 CompareObjectTypeRange(heap_object, scratch, scratch, scratch, lower_limit,
799 higher_limit);
800 JumpIf(kUnsignedGreaterThan, target, distance);
801}
802
804 InstanceType lower_limit,
805 InstanceType higher_limit,
806 AbortReason reason) {
807 TemporaryRegisterScope temps(this);
808 Register scratch = temps.AcquireScratch();
809 AssertNotSmi(heap_object);
810 CompareObjectTypeRange(heap_object, scratch, scratch, scratch, lower_limit,
811 higher_limit);
813}
814
816 Register heap_object, InstanceType lower_limit, InstanceType higher_limit,
817 Label* if_true, Label::Distance true_distance, bool fallthrough_when_true,
818 Label* if_false, Label::Distance false_distance,
819 bool fallthrough_when_false) {
820 TemporaryRegisterScope temps(this);
821 Register scratch = temps.AcquireScratch();
822 CompareObjectTypeRange(heap_object, scratch, scratch, scratch, lower_limit,
823 higher_limit);
824 Branch(kUnsignedLessThanEqual, if_true, true_distance, fallthrough_when_true,
825 if_false, false_distance, fallthrough_when_false);
826}
827
829 Register heap_object, Label* target, Label::Distance distance) {
830 // If the type of the result (stored in its map) is less than
831 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
832 static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
833 TemporaryRegisterScope temps(this);
834 Register scratch = temps.AcquireScratch();
835 MacroAssembler::CompareObjectType(heap_object, scratch, scratch,
836 FIRST_JS_RECEIVER_TYPE);
837 JumpIf(kUnsignedGreaterThanEqual, target, distance);
838}
839
841 RootIndex index,
842 Register scratch) {
843 LoadMap(scratch, object);
844 CompareRoot(scratch, index);
845}
846
848 InstanceType instance_type) {
849 TemporaryRegisterScope temps(this);
850 Register scratch = temps.AcquireScratch();
851 MacroAssembler::CompareInstanceType(map, scratch, instance_type);
852}
853
855 Register map, Register instance_type_out, InstanceType lower_limit,
856 InstanceType higher_limit) {
857 TemporaryRegisterScope temps(this);
858 Register scratch = temps.AcquireScratch();
859 MacroAssembler::CompareInstanceTypeRange(map, instance_type_out, scratch,
860 lower_limit, higher_limit);
862}
863
865 DoubleRegister src1, DoubleRegister src2, Condition cond, Label* target,
866 Label* nan_failed, Label::Distance distance) {
867 VFPCompareAndSetFlags(src1, src2);
868 JumpIf(ConditionForNaN(), nan_failed);
869 JumpIf(cond, target, distance);
870}
871
873 DoubleRegister src1, DoubleRegister src2, Condition cond,
874 BasicBlock* if_true, BasicBlock* if_false, BasicBlock* next_block,
875 BasicBlock* nan_failed) {
876 VFPCompareAndSetFlags(src1, src2);
877 JumpIf(ConditionForNaN(), nan_failed->label());
878 Branch(cond, if_true, if_false, next_block);
879}
880
881inline void MaglevAssembler::PrepareCallCFunction(int num_reg_arguments,
882 int num_double_registers) {
883 MacroAssembler::PrepareCallCFunction(num_reg_arguments, num_double_registers);
884}
885
887 DCHECK(code_gen_state()->entry_label()->is_bound());
888 bl(code_gen_state()->entry_label());
889}
890
892 // Any eager deopts should go through JumpIf to enable us to support the
893 // `--deopt-every-n-times` stress mode. See EmitEagerDeoptStress.
894 DCHECK(!IsDeoptLabel(target));
895 b(target);
896}
897
899 DCHECK(IsDeoptLabel(target));
900 b(target);
901}
902
904 // TODO(olivf): On arm `--deopt-every-n-times` is currently not supported.
905 // Supporting it would require to implement this method, additionally handle
906 // deopt branches in Cbz, and handle all cases where we fall through to the
907 // deopt branch (like Int32Divide).
908}
909
910inline void MaglevAssembler::JumpIf(Condition cond, Label* target,
912 b(target, cond);
913}
914
916 Label* if_equal,
917 Label::Distance distance) {
918 MacroAssembler::JumpIfRoot(with, index, if_equal);
919}
920
922 Label* if_not_equal,
923 Label::Distance distance) {
924 MacroAssembler::JumpIfNotRoot(with, index, if_not_equal);
925}
926
928 Label::Distance distance) {
929 MacroAssembler::JumpIfSmi(src, on_smi);
930}
931
933 Label::Distance distance) {
934 MacroAssembler::JumpIfNotSmi(src, on_smi);
935}
936
938 Label* target, Label::Distance) {
939 cmp(value, Operand(byte));
940 b(cc, target);
941}
942
944 Label* target, Label::Distance distance) {
945 // TODO(leszeks): Right now this only accepts Zone-allocated target labels.
946 // This works because all callsites are jumping to either a deopt, deferred
947 // code, or a basic block. If we ever need to jump to an on-stack label, we
948 // have to add support for it here change the caller to pass a ZoneLabelRef.
949 DCHECK(compilation_info()->zone()->Contains(target));
951 ZoneLabelRef is_not_hole(this);
952 VFPCompareAndSetFlags(value, value);
955 [](MaglevAssembler* masm, DoubleRegister value, Register scratch,
956 ZoneLabelRef is_hole, ZoneLabelRef is_not_hole) {
957 masm->VmovHigh(scratch, value);
959 *is_hole);
960 masm->Jump(*is_not_hole);
961 },
962 value, scratch, is_hole, is_not_hole));
963 bind(*is_not_hole);
964}
965
967 Label* target,
968 Label::Distance distance) {
969 JumpIfNotNan(value, target, distance);
970 VmovHigh(scratch, value);
971 CompareInt32AndJumpIf(scratch, kHoleNanUpper32, kNotEqual, target, distance);
972}
973
975 Label::Distance distance) {
977 Register upper_bits = temps.AcquireScratch();
978 DCHECK(operand.IsImmediateOffset());
979 ldr(upper_bits, MemOperand(operand.rn(), operand.offset() + (kDoubleSize / 2),
980 operand.am()));
982 distance);
983}
984
986 Label::Distance distance) {
987 VFPCompareAndSetFlags(value, value);
988 JumpIf(ConditionForNaN(), target, distance);
989}
990
992 Label::Distance distance) {
993 VFPCompareAndSetFlags(value, value);
994 JumpIf(NegateCondition(ConditionForNaN()), target, distance);
995}
996
998 Condition cond,
999 Label* target,
1000 Label::Distance distance) {
1001 cmp(r1, r2);
1002 JumpIf(cond, target);
1003}
1004
1006 Condition cond,
1007 Label* target,
1008 Label::Distance distance) {
1009 cmp(r1, r2);
1010 JumpIf(cond, target);
1011}
1012
1014 Condition cond,
1015 Label* target,
1016 Label::Distance distance) {
1017 cmp(r1, Operand(value));
1018 JumpIf(cond, target);
1019}
1020
1022 Condition cond,
1023 Label* target,
1024 Label::Distance distance) {
1025 cmp(r1, Operand(value));
1026 JumpIf(cond, target);
1027}
1028
1030 Condition cond,
1031 AbortReason reason) {
1032 cmp(r1, r2);
1033 Assert(cond, reason);
1034}
1036 Condition cond,
1037 AbortReason reason) {
1038 cmp(r1, Operand(value));
1039 Assert(cond, reason);
1040}
1041
1043 Register r1, int32_t value, Condition cond, Label* if_true,
1044 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1045 Label::Distance false_distance, bool fallthrough_when_false) {
1046 cmp(r1, Operand(value));
1047 Branch(cond, if_true, true_distance, fallthrough_when_true, if_false,
1048 false_distance, fallthrough_when_false);
1049}
1050
1052 Register r1, Register r2, Condition cond, Label* if_true,
1053 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1054 Label::Distance false_distance, bool fallthrough_when_false) {
1055 cmp(r1, r2);
1056 Branch(cond, if_true, true_distance, fallthrough_when_true, if_false,
1057 false_distance, fallthrough_when_false);
1058}
1059
1061 Register r1, int32_t value, Condition cond, Label* if_true,
1062 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1063 Label::Distance false_distance, bool fallthrough_when_false) {
1064 cmp(r1, Operand(value));
1065 Branch(cond, if_true, true_distance, fallthrough_when_true, if_false,
1066 false_distance, fallthrough_when_false);
1067}
1068
1070 Condition cond, Label* target,
1071 Label::Distance distance) {
1072 cmp(r1, Operand(value));
1073 JumpIf(cond, target);
1074}
1075
1077 Condition cond,
1078 AbortReason reason) {
1079 if (!v8_flags.debug_code) return;
1080 AssertSmi(r1);
1081 cmp(r1, Operand(value));
1082 Assert(cond, reason);
1083}
1084
1086 Condition cond,
1087 Register scratch,
1088 Label* target,
1089 Label::Distance distance) {
1090 LoadByte(scratch, left);
1091 Cmp(scratch, right);
1092 JumpIf(cond, target, distance);
1093}
1094
1096 Tagged<Smi> value,
1097 Condition cond,
1098 Label* target,
1099 Label::Distance distance) {
1100 cmp(r1, Operand(value));
1101 JumpIf(cond, target);
1102}
1103
1106 Condition cond,
1107 Label* target,
1108 Label::Distance distance) {
1109 cmp(reg, Operand(obj));
1110 b(cond, target);
1111}
1112
1114 Register src2,
1115 Condition cond,
1116 Label* target,
1117 Label::Distance distance) {
1118 CmpTagged(src1, src2);
1119 JumpIf(cond, target, distance);
1120}
1121
1123 DoubleRegister reg, Label* target, Label::Distance distance) {
1125 JumpIf(eq, target);
1126 JumpIf(vs, target); // NaN check
1127}
1128
1130 MemOperand operand, Label* target, Label::Distance distance) {
1131 TemporaryRegisterScope temps(this);
1132 DoubleRegister value_double = temps.AcquireScratchDouble();
1133 vldr(value_double, operand);
1134 CompareDoubleAndJumpIfZeroOrNaN(value_double, target, distance);
1135}
1136
1138 Register r1, int32_t mask, Label* target, Label::Distance distance) {
1139 tst(r1, Operand(mask));
1140 b(ne, target);
1141}
1142
1144 MemOperand operand, int32_t mask, Label* target, Label::Distance distance) {
1145 TemporaryRegisterScope temps(this);
1146 Register value = temps.AcquireScratch();
1147 ldr(value, operand);
1148 TestInt32AndJumpIfAnySet(value, mask, target);
1149}
1150
1152 MemOperand operand, uint8_t mask, Label* target, Label::Distance distance) {
1153 TemporaryRegisterScope temps(this);
1154 Register value = temps.AcquireScratch();
1155 ldrb(value, operand);
1156 TestInt32AndJumpIfAnySet(value, mask, target);
1157}
1158
1160 Register r1, int32_t mask, Label* target, Label::Distance distance) {
1161 tst(r1, Operand(mask));
1162 b(eq, target);
1163}
1164
1166 MemOperand operand, int32_t mask, Label* target, Label::Distance distance) {
1167 TemporaryRegisterScope temps(this);
1168 Register value = temps.AcquireScratch();
1169 ldr(value, operand);
1170 TestInt32AndJumpIfAllClear(value, mask, target);
1171}
1172
1174 MemOperand operand, uint8_t mask, Label* target, Label::Distance distance) {
1175 TemporaryRegisterScope temps(this);
1176 Register value = temps.AcquireScratch();
1177 LoadByte(value, operand);
1178 TestInt32AndJumpIfAllClear(value, mask, target);
1179}
1180
1182 Register heap_number) {
1183 vldr(result, FieldMemOperand(heap_number, offsetof(HeapNumber, value_)));
1184}
1185
1187 Register heap_number) {
1188 ldr(result, FieldMemOperand(heap_number, offsetof(HeapNumber, value_)));
1189}
1190
1192 Register heap_number) {
1193 str(value, (FieldMemOperand(heap_number, offsetof(HeapNumber, value_))));
1194}
1195
1197 Register src) {
1198 UseScratchRegisterScope temps(this);
1200 if (result.code() < 16) {
1201 temp_vfps = LowDwVfpRegister::from_code(result.code()).low();
1202 } else {
1203 temp_vfps = temps.AcquireS();
1204 }
1205 vmov(temp_vfps, src);
1206 vcvt_f64_s32(result, temp_vfps);
1207}
1208
1210 Register src) {
1211 return Int32ToDouble(result, src);
1212}
1213
1215 Register src) {
1216 UseScratchRegisterScope temps(this);
1218 if (result.code() < 16) {
1219 temp_vfps = LowDwVfpRegister::from_code(result.code()).low();
1220 } else {
1221 temp_vfps = temps.AcquireS();
1222 }
1223 vmov(temp_vfps, src);
1224 vcvt_f64_u32(result, temp_vfps);
1225}
1226
1227inline void MaglevAssembler::Pop(Register dst) { pop(dst); }
1228
1230 if (v8_flags.slow_debug_code) {
1231 TemporaryRegisterScope temps(this);
1232 Register scratch = temps.AcquireScratch();
1233 add(scratch, sp,
1236 cmp(scratch, fp);
1237 Assert(eq, AbortReason::kStackAccessBelowStackPointer);
1238 }
1239}
1240
1242 int stack_check_offset) {
1243 TemporaryRegisterScope temps(this);
1244 Register stack_cmp_reg = sp;
1245 if (stack_check_offset >= kStackLimitSlackForDeoptimizationInBytes) {
1246 stack_cmp_reg = temps.AcquireScratch();
1247 sub(stack_cmp_reg, sp, Operand(stack_check_offset));
1248 }
1249 Register interrupt_stack_limit = temps.AcquireScratch();
1251 cmp(stack_cmp_reg, interrupt_stack_limit);
1253}
1254
1255inline void MaglevAssembler::FinishCode() { CheckConstPool(true, false); }
1256
1257template <typename NodeT>
1259 NodeT* node) {
1260 EmitEagerDeoptIf(ne, reason, node);
1261}
1262
1263template <>
1265 Register src) {
1266 Move(dst, src);
1267}
1268template <>
1270 MemOperand src) {
1271 switch (repr) {
1276 return ldr(dst, src);
1277 default:
1278 UNREACHABLE();
1279 }
1280}
1281template <>
1283 MemOperand dst, Register src) {
1284 switch (repr) {
1289 return str(src, dst);
1290 default:
1291 UNREACHABLE();
1292 }
1293}
1294template <>
1296 MemOperand dst, MemOperand src) {
1297 TemporaryRegisterScope temps(this);
1298 Register scratch = temps.AcquireScratch();
1299 MoveRepr(repr, scratch, src);
1300 MoveRepr(repr, dst, scratch);
1301}
1302
1304 // Implemented only for x64.
1305}
1306
1312
1313} // namespace maglev
1314} // namespace internal
1315} // namespace v8
1316
1317#endif // V8_MAGLEV_ARM_MAGLEV_ASSEMBLER_ARM_INL_H_
#define T
#define Assert(condition)
interpreter::OperandScale scale
Definition builtins.cc:44
void rsb(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
static VfpRegList DefaultFPTmpList()
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void CheckConstPool(bool force_emit, bool require_jump)
void bl(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void vmov(const SwVfpRegister dst, Float32 imm)
void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void cmp(Register src1, const Operand &src2, Condition cond=al)
void ldrsh(Register dst, const MemOperand &src, Condition cond=al)
void ldrsb(Register dst, const MemOperand &src, Condition cond=al)
void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void str(Register src, const MemOperand &dst, Condition cond=al)
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vrintn(const SwVfpRegister dst, const SwVfpRegister src)
void strb(Register src, const MemOperand &dst, Condition cond=al)
void lsl(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void strh(Register src, const MemOperand &dst, Condition cond=al)
void rev(Register dst, Register src, Condition cond=al)
void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
static RegList DefaultTmpList()
void orr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vstr(const DwVfpRegister src, const Register base, int offset, const Condition cond=al)
void tst(Register src1, const Operand &src2, Condition cond=al)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void vldr(const DwVfpRegister dst, const Register base, int offset, const Condition cond=al)
void asr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static constexpr size_t kMaxSizeInHeap
SwVfpRegister low() const
void JumpIfRoot(Register with, RootIndex index, Label *if_equal)
void LoadStackLimit(Register destination, StackLimitKind kind)
void Cmp(const Register &rn, int imm)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void mov(Register rd, Register rj)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal)
void CompareRoot(Register obj, RootIndex index)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void Move(Register dst, Tagged< Smi > smi)
void JumpIfSmi(Register value, Label *smi_label)
void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond=al)
void VmovLow(Register dst, DwVfpRegister src)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void CompareObjectTypeRange(Register heap_object, Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void VmovHigh(Register dst, DwVfpRegister src)
void CmpTagged(const Register &r1, const Register &r2)
void CompareInstanceTypeRange(Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadMap(Register destination, Register object)
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand)
static Operand EmbeddedNumber(double number)
constexpr bool has(RegisterT reg) const
static constexpr LowDwVfpRegister from_code(int8_t code)
constexpr int8_t code() const
static constexpr Register no_reg()
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
void SetAvailableVfp(VfpRegList available)
void Include(const Register &reg1, const Register &reg2=no_reg)
static LocationOperand * cast(InstructionOperand *op)
TemporaryRegisterScope(MaglevAssembler *masm, const SavedData &saved_data)
void CompareMapWithRoot(Register object, RootIndex index, Register scratch)
void LoadFixedArrayElement(Register result, Register array, Register index)
void CompareInstanceType(Register map, InstanceType instance_type)
void SmiAddConstant(Register dst, Register src, int value, Label *fail, Label::Distance distance=Label::kFar)
void JumpIfByte(Condition cc, Register value, int32_t byte, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotNan(DoubleRegister value, Label *target, Label::Distance distance=Label::kFar)
Condition IsNotCallableNorUndetactable(Register map, Register scratch)
void JumpIfRoot(Register with, RootIndex index, Label *if_equal, Label::Distance distance=Label::kFar)
void ToUint8Clamped(Register result, DoubleRegister value, Label *min, Label *max, Label *done)
Condition IsCallableAndNotUndetectable(Register map, Register scratch)
void LoadFloat32(DoubleRegister dst, MemOperand src)
void JumpIfNotObjectType(Register heap_object, InstanceType type, Label *target, Label::Distance distance=Label::kFar)
void ReverseByteOrderAndStoreUnalignedFloat64(Register base, Register index, DoubleRegister src)
void IntPtrToDouble(DoubleRegister result, Register src)
void Branch(Condition condition, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
MemOperand GetStackSlot(const compiler::AllocatedOperand &operand)
void StoreField(MemOperand operand, Register value, int element_size)
void LoadSignedField(Register result, MemOperand operand, int element_size)
void JumpIfSmi(Register src, Label *on_smi, Label::Distance near_jump=Label::kFar)
void TestUint8AndJumpIfAllClear(MemOperand operand, uint8_t mask, Label *target, Label::Distance distance=Label::kFar)
void AssertObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, AbortReason reason)
void CompareInt32AndAssert(Register r1, Register r2, Condition cond, AbortReason reason)
void CompareDoubleAndJumpIfZeroOrNaN(DoubleRegister reg, Label *target, Label::Distance distance=Label::kFar)
void LoadFixedDoubleArrayElement(DoubleRegister result, Register array, Register index)
void LoadUnsignedField(Register result, MemOperand operand, int element_size)
void JumpIfObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *target, Label::Distance distance=Label::kFar)
void CheckInt32IsSmi(Register obj, Label *fail, Register scratch=Register::no_reg())
void ReverseByteOrder(Register value, int element_size)
Condition FunctionEntryStackCheck(int stack_check_offset)
void LoadHeapNumberValue(DoubleRegister result, Register heap_number)
void Jump(Label *target, Label::Distance distance=Label::kFar)
MemOperand DataViewElementOperand(Register data_pointer, Register index)
void StoreTaggedSignedField(Register object, int offset, Register value)
void CompareSmiAndJumpIf(Register r1, Tagged< Smi > value, Condition cond, Label *target, Label::Distance distance=Label::kFar)
TemporaryRegisterScope * scratch_register_scope() const
void TestUint8AndJumpIfAnySet(MemOperand operand, uint8_t mask, Label *target, Label::Distance distance=Label::kFar)
void MoveRepr(MachineRepresentation repr, Dest dst, Source src)
void StoreFixedDoubleArrayElement(Register array, Register index, DoubleRegister value)
void SmiTagInt32AndSetFlags(Register dst, Register src)
void LoadInstanceType(Register instance_type, Register heap_object)
void StoreHeapInt32Value(Register value, Register heap_number)
void LoadInt32(Register dst, MemOperand src)
void StoreFixedArrayElementNoWriteBarrier(Register array, Register index, Register value)
void LoadAddress(Register dst, MemOperand location)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Label *target, Label::Distance distance=Label::kFar)
void LoadBoundedSizeFromObject(Register result, Register object, int offset)
void SetSlotAddressForTaggedField(Register slot_reg, Register object, int offset)
void CompareFloat64AndBranch(DoubleRegister src1, DoubleRegister src2, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block, BasicBlock *nan_failed)
void CompareTaggedAndJumpIf(Register reg, Tagged< Smi > smi, Condition cond, Label *target, Label::Distance distance=Label::kFar)
void StoreInt32Field(Register object, int offset, int32_t value)
void BranchOnObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *if_true, Label::Distance true_distance, bool fallthrough_when_true, Label *if_false, Label::Distance false_distance, bool fallthrough_when_false)
void LoadExternalPointerField(Register result, MemOperand operand)
void StoreInt32(MemOperand dst, Register src)
void BuildTypedArrayDataPointer(Register data_pointer, Register object)
void JumpIfObjectTypeNotInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal, Label::Distance distance=Label::kFar)
void EmitEnterExitFrame(int extra_slots, StackFrame::Type frame_type, Register c_function, Register scratch)
void BranchOnObjectType(Register heap_object, InstanceType type, Label *if_true, Label::Distance true_distance, bool fallthrough_when_true, Label *if_false, Label::Distance false_distance, bool fallthrough_when_false)
void LoadHeapInt32Value(Register result, Register heap_number)
void CompareInstanceTypeRange(Register map, InstanceType lower_limit, InstanceType higher_limit)
void Move(StackSlot dst, Register src)
void SignExtend32To64Bits(Register dst, Register src)
void LoadFixedArrayElementWithoutDecompressing(Register result, Register array, Register index)
void LoadUnalignedFloat64AndReverseByteOrder(DoubleRegister dst, Register base, Register index)
void IncrementAddress(Register reg, int32_t delta)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void JumpIfNan(DoubleRegister value, Label *target, Label::Distance distance=Label::kFar)
void TruncateDoubleToInt32(Register dst, DoubleRegister src)
void JumpIfNotSmi(Register src, Label *on_not_smi, Label::Distance near_jump=Label::kFar)
MemOperand TypedArrayElementOperand(Register data_pointer, Register index, int element_size)
MaglevCompilationInfo * compilation_info() const
void TestInt32AndJumpIfAllClear(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
int GetFramePointerOffsetForStackSlot(const compiler::AllocatedOperand &operand)
void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
MaglevCodeGenState * code_gen_state() const
void SetSlotAddressForFixedArrayElement(Register slot_reg, Register object, Register index)
void LoadTaggedFieldByIndex(Register result, Register object, Register index, int scale, int offset)
void CompareIntPtrAndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
void CompareInt32AndBranch(Register r1, int32_t value, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void CompareSmiAndAssert(Register r1, Tagged< Smi > value, Condition cond, AbortReason reason)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void LoadFloat64(DoubleRegister dst, MemOperand src)
void LoadUnalignedFloat64(DoubleRegister dst, Register base, Register index)
void LoadTaggedFieldWithoutDecompressing(Register result, Register object, int offset)
Condition IsRootConstant(Input input, RootIndex root_index)
void StoreFloat32(MemOperand dst, DoubleRegister src)
void EmitEagerDeoptIf(Condition cond, DeoptimizeReason reason, NodeT *node)
void StoreFloat64(MemOperand dst, DoubleRegister src)
void EmitEagerDeoptIfNotEqual(DeoptimizeReason reason, NodeT *node)
void StoreTaggedFieldNoWriteBarrier(Register object, int offset, Register value)
MemOperand ToMemOperand(const compiler::InstructionOperand &operand)
void MoveHeapNumber(Register dst, double value)
void LoadTaggedField(Register result, MemOperand operand)
void LoadByte(Register dst, MemOperand src)
void MoveTagged(Register dst, Handle< HeapObject > obj)
void JumpIfNotHoleNan(DoubleRegister value, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void DeoptIfBufferDetached(Register array, Register scratch, NodeT *node)
void StoreUnalignedFloat64(Register base, Register index, DoubleRegister src)
void TestInt32AndJumpIfAnySet(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
void JumpIfHoleNan(DoubleRegister value, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void Uint32ToDouble(DoubleRegister result, Register src)
void CompareFloat64AndJumpIf(DoubleRegister src1, DoubleRegister src2, Condition cond, Label *target, Label *nan_failed, Label::Distance distance=Label::kFar)
void CompareIntPtrAndBranch(Register r1, int32_t value, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void Int32ToDouble(DoubleRegister result, Register src)
void JumpIfObjectType(Register heap_object, InstanceType type, Label *target, Label::Distance distance=Label::kFar)
void AssertObjectType(Register heap_object, InstanceType type, AbortReason reason)
void SmiSubConstant(Register dst, Register src, int value, Label *fail, Label::Distance distance=Label::kFar)
void CompareByteAndJumpIf(MemOperand left, int8_t right, Condition cond, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0)
static int TemporaryCount(size_t map_count)
MapCompare(MaglevAssembler *masm, Register object, size_t map_count)
void Generate(Handle< Map > map, Condition cond, Label *if_true, Label::Distance distance=Label::kFar)
const compiler::InstructionOperand & operand() const
Definition maglev-ir.h:1280
static ZoneLabelRef UnsafeFromLabelPointer(Label *label)
Register const object_
Register const value_
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
ZoneVector< RpoNumber > & result
LiftoffRegister reg
uint32_t const mask
base::SmallVector< int32_t, 1 > stack_slots
MaglevAssembler *const masm_
void PushIterator(MaglevAssembler *masm, base::iterator_range< T > range, Args... args)
void PushIteratorReverse(MaglevAssembler *masm, base::iterator_range< T > range, Args... args)
void PushInput(MaglevAssembler *masm, const Input &input)
constexpr Condition ConditionForNaN()
constexpr Condition ConditionFor(Operation operation)
Register ToRegister(const compiler::InstructionOperand &operand)
constexpr Condition ConditionForFloat64(Operation operation)
constexpr int kTaggedSize
Definition globals.h:542
constexpr AddrMode Offset
uint64_t VfpRegList
constexpr ShiftOp LSL
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int kStackLimitSlackForDeoptimizationInBytes
Definition globals.h:213
constexpr int kTaggedSizeLog2
Definition globals.h:543
constexpr Register kMaglevExtraScratchRegister
Condition NegateCondition(Condition cond)
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
constexpr uint32_t kHoleNanUpper32
Definition globals.h:1952
constexpr SBit SetCC
return value
Definition map-inl.h:893
constexpr int kDoubleSizeLog2
Definition globals.h:421
constexpr int kDoubleSize
Definition globals.h:407
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
static void Push(MaglevAssembler *masm, Arg arg, Args... args)
static void PushReverse(MaglevAssembler *masm, Arg arg, Args... args)
static void Push(MaglevAssembler *masm, const Input &arg, Args... args)
static void PushReverse(MaglevAssembler *masm, const Input &arg, Args... args)
#define OFFSET_OF_DATA_START(Type)