v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-assembler-riscv-inl.h
Go to the documentation of this file.
1// Copyright 2024 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_MAGLEV_RISCV_MAGLEV_ASSEMBLER_RISCV_INL_H_
6#define V8_MAGLEV_RISCV_MAGLEV_ASSEMBLER_RISCV_INL_H_
7
10#include "src/common/globals.h"
17
18namespace v8 {
19namespace internal {
20namespace maglev {
21
22constexpr Condition ConditionForFloat64(Operation operation) {
23 return ConditionFor(operation);
24}
25
26inline int ShiftFromScale(int n) {
27 switch (n) {
28 case 1:
29 return 0;
30 case 2:
31 return 1;
32 case 4:
33 return 2;
34 case 8:
35 return 3;
36 default:
38 }
39}
40
42 switch (condition) {
43 case kEqual:
44 return EQ;
45 case kNotEqual:
46 return NE;
48 case kLessThan:
49 return LT;
52 return GE;
54 case kLessThanEqual:
55 return LE;
57 case kGreaterThan:
58 return GT;
59 default:
60 break;
61 }
63}
64
65class MaglevAssembler::TemporaryRegisterScope
66 : public TemporaryRegisterScopeBase<TemporaryRegisterScope> {
67 using Base = TemporaryRegisterScopeBase<TemporaryRegisterScope>;
68
69 public:
70 struct SavedData : public Base::SavedData {
73 };
74
76 : Base(masm), scratch_scope_(masm) {
77 if (prev_scope_ == nullptr) {
78 // Add extra scratch register if no previous scope.
80 }
81 }
83 const SavedData& saved_data)
84 : Base(masm, saved_data), scratch_scope_(masm) {
85 scratch_scope_.SetAvailable(saved_data.available_scratch_);
86 scratch_scope_.SetAvailableDouble(saved_data.available_fp_scratch_);
87 }
88
100
108
114
115 private:
117};
118
120 size_t map_count)
121 : masm_(masm), object_(object), map_count_(map_count) {
125 } else {
127 }
128 USE(map_count_);
129}
130
131void MapCompare::Generate(Handle<Map> map, Condition cond, Label* if_true,
132 Label::Distance distance) {
133 MaglevAssembler::TemporaryRegisterScope temps(masm_);
134 Register temp = temps.AcquireScratch();
135 masm_->Move(temp, map);
136 // FIXME: reimplement with CmpTagged/JumpIf
138 masm_->Sub32(temp, map_, temp);
139 } else {
140 masm_->SubWord(temp, map_, temp);
141 }
142 masm_->MacroAssembler::Branch(if_true, cond, temp, Operand(zero_reg),
143 distance);
144}
145
148 masm_->DecompressTagged(map_, map_);
149 }
150 return map_;
151}
152
153int MapCompare::TemporaryCount(size_t map_count) { return 1; }
154
155namespace detail {
156
157// Check if the argument is already in a register and doesn't need any
158// scratches to reload. This should be in sync with `ToRegister` function below.
159template <typename Arg>
160inline bool AlreadyInARegister(Arg arg) {
161 return false;
162}
163
164inline bool AlreadyInARegister(Register reg) { return true; }
165
166inline bool AlreadyInARegister(const Input& input) {
167 if (input.operand().IsConstant()) {
168 return false;
169 }
170 const compiler::AllocatedOperand& operand =
171 compiler::AllocatedOperand::cast(input.operand());
172 if (operand.IsRegister()) {
173 return true;
174 }
175 DCHECK(operand.IsStackSlot());
176 return false;
177}
178
179template <typename Arg>
180inline Register ToRegister(MaglevAssembler* masm,
181 MaglevAssembler::TemporaryRegisterScope* scratch,
182 Arg arg) {
183 Register reg = scratch->AcquireScratch();
184 masm->Move(reg, arg);
185 return reg;
186}
187inline Register ToRegister(MaglevAssembler* masm,
188 MaglevAssembler::TemporaryRegisterScope* scratch,
189 Register reg) {
190 return reg;
191}
192inline Register ToRegister(MaglevAssembler* masm,
193 MaglevAssembler::TemporaryRegisterScope* scratch,
194 const Input& input) {
195 if (input.operand().IsConstant()) {
196 Register reg = scratch->AcquireScratch();
197 input.node()->LoadToRegister(masm, reg);
198 return reg;
199 }
200 const compiler::AllocatedOperand& operand =
201 compiler::AllocatedOperand::cast(input.operand());
202 if (operand.IsRegister()) {
203 return ToRegister(input);
204 } else {
205 DCHECK(operand.IsStackSlot());
206 Register reg = scratch->AcquireScratch();
207 masm->Move(reg, masm->ToMemOperand(input));
208 return reg;
209 }
210}
211
212template <typename... Args>
213struct PushAllHelper;
214
215template <>
216struct PushAllHelper<> {
217 static void Push(MaglevAssembler* masm) {}
218 static void PushReverse(MaglevAssembler* masm) {}
219};
220
221inline void PushInput(MaglevAssembler* masm, const Input& input) {
222 if (input.operand().IsConstant()) {
224 Register scratch = temps.AcquireScratch();
225 input.node()->LoadToRegister(masm, scratch);
226 masm->Push(scratch);
227 } else {
228 // TODO(leszeks): Consider special casing the value. (Toon: could possibly
229 // be done through Input directly?)
230 const compiler::AllocatedOperand& operand =
231 compiler::AllocatedOperand::cast(input.operand());
232 if (operand.IsRegister()) {
233 masm->Push(operand.GetRegister());
234 } else {
235 DCHECK(operand.IsStackSlot());
236 MaglevAssembler::TemporaryRegisterScope temps(masm);
237 Register scratch = temps.AcquireScratch();
238 masm->LoadWord(scratch, masm->GetStackSlot(operand));
239 masm->Push(scratch);
240 }
241 }
242}
243
244template <typename T, typename... Args>
245inline void PushIterator(MaglevAssembler* masm, base::iterator_range<T> range,
246 Args... args) {
247 for (auto iter = range.begin(), end = range.end(); iter != end; ++iter) {
248 masm->Push(*iter);
249 }
250 PushAllHelper<Args...>::Push(masm, args...);
251}
252
253template <typename T, typename... Args>
254inline void PushIteratorReverse(MaglevAssembler* masm,
255 base::iterator_range<T> range, Args... args) {
256 PushAllHelper<Args...>::PushReverse(masm, args...);
257 for (auto iter = range.rbegin(), end = range.rend(); iter != end; ++iter) {
258 masm->Push(*iter);
259 }
260}
261
262template <typename... Args>
263struct PushAllHelper<Input, Args...> {
264 static void Push(MaglevAssembler* masm, const Input& arg, Args... args) {
265 PushInput(masm, arg);
267 }
268 static void PushReverse(MaglevAssembler* masm, const Input& arg,
269 Args... args) {
271 PushInput(masm, arg);
272 }
273};
274template <typename Arg, typename... Args>
275struct PushAllHelper<Arg, Args...> {
276 static void Push(MaglevAssembler* masm, Arg arg, Args... args) {
277 if constexpr (is_iterator_range<Arg>::value) {
278 PushIterator(masm, arg, args...);
279 } else {
280 masm->MacroAssembler::Push(arg);
282 }
283 }
284 static void PushReverse(MaglevAssembler* masm, Arg arg, Args... args) {
285 if constexpr (is_iterator_range<Arg>::value) {
286 PushIteratorReverse(masm, arg, args...);
287 } else {
289 masm->Push(arg);
290 }
291 }
292};
293
294} // namespace detail
295
296template <typename... T>
297void MaglevAssembler::Push(T... vals) {
298 detail::PushAllHelper<T...>::Push(this, vals...);
299}
300
301template <typename... T>
302void MaglevAssembler::PushReverse(T... vals) {
303 detail::PushAllHelper<T...>::PushReverse(this, vals...);
304}
305
306inline void MaglevAssembler::BindJumpTarget(Label* label) {
308}
309
310inline void MaglevAssembler::BindBlock(BasicBlock* block) {
311 if (block->is_start_block_of_switch_case()) {
312 BindJumpTarget(block->label());
313 } else {
314 bind(block->label());
315 }
316}
317
318inline Condition MaglevAssembler::CheckSmi(Register src) {
319 Register cmp_flag = MaglevAssembler::GetFlagsRegister();
320 // Pointers to heap objects have a 1 set for the bottom bit,
321 // so cmp_flag is set to 0 if src is Smi.
322 MacroAssembler::SmiTst(src, cmp_flag);
323 return eq;
324}
325
326#ifdef V8_ENABLE_DEBUG_CODE
327inline void MaglevAssembler::AssertMap(Register object) {
328 if (!v8_flags.debug_code) return;
329 ASM_CODE_COMMENT(this);
330 AssertNotSmi(object, AbortReason::kOperandIsNotAMap);
331
332 MaglevAssembler::TemporaryRegisterScope temps(this);
333 Register temp = temps.AcquireScratch();
334 Label ConditionMet, Done;
335 MacroAssembler::JumpIfObjectType(&Done, Condition::kEqual, object, MAP_TYPE,
336 temp);
337 Abort(AbortReason::kOperandIsNotAMap);
338 bind(&Done);
339}
340#endif
341
342inline void MaglevAssembler::SmiTagInt32AndSetFlags(Register dst,
343 Register src) {
344 // FIXME check callsites and subsequent calls to Assert!
345 ASM_CODE_COMMENT(this);
346 static_assert(kSmiTag == 0);
347 // NB: JumpIf expects the result in dedicated "flag" register
348 Register overflow_flag = MaglevAssembler::GetFlagsRegister();
349 if (SmiValuesAre31Bits()) {
350 // Smi is shifted left by 1, so double incoming integer using 64- and 32-bit
351 // addition operations and then compare the results to detect overflow. The
352 // order does matter cuz in common way dst != src is NOT guarantied
353 Add64(overflow_flag, src, src);
354 Add32(dst, src, src);
355 Sne(overflow_flag, overflow_flag, Operand(dst));
356 } else {
357 // Smi goes to upper 32
358 slli(dst, src, 32);
359 // no overflow happens (check!)
360 Move(overflow_flag, zero_reg);
361 }
362}
363
364inline void MaglevAssembler::CheckInt32IsSmi(Register maybeSmi, Label* fail,
365 Register scratch) {
367 // Smi is shifted left by 1
368 MaglevAssembler::TemporaryRegisterScope temps(this);
369 if (scratch == Register::no_reg()) {
370 scratch = temps.AcquireScratch();
371 }
372 Register sum32 = scratch;
373 Register sum64 = temps.AcquireScratch();
374 Add32(sum32, maybeSmi, Operand(maybeSmi));
375 Add64(sum64, maybeSmi, Operand(maybeSmi));
376 // overflow happened if sum64 != sum32
377 MacroAssembler::Branch(fail, ne, sum64, Operand(sum32));
378}
379
380inline void MaglevAssembler::SmiAddConstant(Register dst, Register src,
381 int value, Label* fail,
382 Label::Distance distance) {
383 AssertSmi(src);
384 if (value != 0) {
385 MaglevAssembler::TemporaryRegisterScope temps(this);
386 Register overflow = temps.AcquireScratch();
387 Operand addend = Operand(Smi::FromInt(value));
388 if (SmiValuesAre31Bits()) {
389 Add64(overflow, src, addend);
390 Add32(dst, src, addend);
391 Sub64(overflow, dst, overflow);
392 MacroAssembler::Branch(fail, ne, overflow, Operand(zero_reg), distance);
393 } else {
394 AddOverflow64(dst, src, addend, overflow);
395 MacroAssembler::Branch(fail, lt, overflow, Operand(zero_reg), distance);
396 }
397 } else {
398 Move(dst, src);
399 }
400}
401
402inline void MaglevAssembler::SmiSubConstant(Register dst, Register src,
403 int value, Label* fail,
404 Label::Distance distance) {
405 AssertSmi(src);
406 if (value != 0) {
407 MaglevAssembler::TemporaryRegisterScope temps(this);
408 Register overflow = temps.AcquireScratch();
409 Operand subtrahend = Operand(Smi::FromInt(value));
410 if (SmiValuesAre31Bits()) {
411 Sub64(overflow, src, subtrahend);
412 Sub32(dst, src, subtrahend);
413 Sub64(overflow, dst, overflow);
414 MacroAssembler::Branch(fail, ne, overflow, Operand(zero_reg), distance);
415 } else {
416 SubOverflow64(dst, src, subtrahend, overflow);
417 MacroAssembler::Branch(fail, lt, overflow, Operand(zero_reg), distance);
418 }
419 } else {
420 Move(dst, src);
421 }
422}
423
424inline void MaglevAssembler::MoveHeapNumber(Register dst, double value) {
425 li(dst, Operand::EmbeddedNumber(value));
426}
427
428// Compare the object in a register to a value from the root list.
429inline void MaglevAssembler::CompareRoot(const Register& obj, RootIndex index,
430 ComparisonMode mode) {
431 constexpr Register aflag = MaglevAssembler::GetFlagsRegister();
432 MacroAssembler::CompareRoot(obj, index, aflag, mode);
433}
434
435inline void MaglevAssembler::CompareTaggedRoot(const Register& obj,
436 RootIndex index) {
437 constexpr Register cmp_result = MaglevAssembler::GetFlagsRegister();
438 MacroAssembler::CompareTaggedRoot(obj, index, cmp_result);
439}
440
441inline void MaglevAssembler::CmpTagged(const Register& rs1,
442 const Register& rs2) {
443 constexpr Register aflag = MaglevAssembler::GetFlagsRegister();
444 MacroAssembler::CmpTagged(aflag, rs1, rs2);
445}
446
447// Cmp and Assert are only used in maglev unittests, so to make them happy.
448// It's only used with subsequent Assert kEqual,
449// so pseudo flag should be 0 if rn equals imm
450inline void MaglevAssembler::Cmp(const Register& rn, int imm) {
451 constexpr Register aflag = MaglevAssembler::GetFlagsRegister();
452 SubWord(aflag, rn, Operand(imm));
453}
454
455inline void MaglevAssembler::Assert(Condition cond, AbortReason reason) {
456 constexpr Register aflag = MaglevAssembler::GetFlagsRegister();
457 MacroAssembler::Assert(cond, reason, aflag, Operand(zero_reg));
458}
459
461 RootIndex root_index) {
462 constexpr Register aflag = MaglevAssembler::GetFlagsRegister();
463
464 if (input.operand().IsRegister()) {
465 MacroAssembler::CompareRoot(ToRegister(input), root_index, aflag);
466 } else {
467 DCHECK(input.operand().IsStackSlot());
468 MaglevAssembler::TemporaryRegisterScope temps(this);
469 Register scratch = temps.AcquireScratch();
470 LoadWord(scratch, ToMemOperand(input));
471 MacroAssembler::CompareRoot(scratch, root_index, aflag);
472 }
473 return eq;
474}
475
476inline MemOperand MaglevAssembler::StackSlotOperand(StackSlot slot) {
477 return MemOperand(fp, slot.index);
478}
479
480inline Register MaglevAssembler::GetFramePointer() { return fp; }
481
482// TODO(Victorgomes): Unify this to use StackSlot struct.
484 const compiler::AllocatedOperand& operand) {
486}
487
489 const compiler::InstructionOperand& operand) {
491}
492
493inline MemOperand MaglevAssembler::ToMemOperand(const ValueLocation& location) {
494 return ToMemOperand(location.operand());
495}
496
497inline void MaglevAssembler::BuildTypedArrayDataPointer(Register data_pointer,
498 Register object) {
499 DCHECK_NE(data_pointer, object);
501 data_pointer,
502 FieldMemOperand(object, JSTypedArray::kExternalPointerOffset));
503 if (JSTypedArray::kMaxSizeInHeap == 0) return;
504 MaglevAssembler::TemporaryRegisterScope scope(this);
505 Register base = scope.AcquireScratch();
507 Load32U(base, FieldMemOperand(object, JSTypedArray::kBasePointerOffset));
508 } else {
509 LoadWord(base, FieldMemOperand(object, JSTypedArray::kBasePointerOffset));
510 }
511 Add64(data_pointer, data_pointer, base);
512}
513
515 Register data_pointer, Register index, int element_size) {
516 const int shift = ShiftFromScale(element_size);
517 if (shift == 0) {
518 AddWord(data_pointer, data_pointer, index);
519 } else {
520 CalcScaledAddress(data_pointer, data_pointer, index, shift);
521 }
522 return MemOperand(data_pointer);
523}
524
525inline MemOperand MaglevAssembler::DataViewElementOperand(Register data_pointer,
526 Register index) {
527 Add64(data_pointer, data_pointer,
528 index); // FIXME: should we check for COMPRESSED PTRS enabled here ?
529 return MemOperand(data_pointer);
530}
531
533 Register object,
534 Register index, int scale,
535 int offset) {
536 const int shift = ShiftFromScale(scale);
537 if (shift == 0) {
538 AddWord(result, object, index);
539 } else {
540 CalcScaledAddress(result, object, index, shift);
541 }
543}
544
546 Register object,
547 int offset) {
549#ifdef V8_ENABLE_SANDBOX
550 SrlWord(result, result, Operand(kBoundedSizeShift));
551#endif // V8_ENABLE_SANDBOX
552}
553
555 MemOperand operand) {
556#ifdef V8_ENABLE_SANDBOX
558#else
559 Move(result, operand);
560#endif
561}
562
563void MaglevAssembler::LoadFixedArrayElement(Register result, Register array,
564 Register index) {
565 if (v8_flags.debug_code) {
566 AssertObjectType(array, FIXED_ARRAY_TYPE, AbortReason::kUnexpectedValue);
568 AbortReason::kUnexpectedNegativeValue);
569 }
571 OFFSET_OF_DATA_START(FixedArray));
572}
573
575 Register result, Register object, int offset) {
577 result, FieldMemOperand(object, offset));
578}
579
581 Register result, Register array, Register index) {
582 if (v8_flags.debug_code) {
583 AssertObjectType(array, FIXED_ARRAY_TYPE, AbortReason::kUnexpectedValue);
585 AbortReason::kUnexpectedNegativeValue);
586 }
590}
591
593 Register array,
594 Register index) {
595 if (v8_flags.debug_code) {
596 AssertObjectType(array, FIXED_DOUBLE_ARRAY_TYPE,
597 AbortReason::kUnexpectedValue);
599 AbortReason::kUnexpectedNegativeValue);
600 }
601 MaglevAssembler::TemporaryRegisterScope temps(this);
602 Register scratch = temps.AcquireScratch();
603 CalcScaledAddress(scratch, array, index, kDoubleSizeLog2);
605 FieldMemOperand(scratch, OFFSET_OF_DATA_START(FixedArray)));
606}
607
609 Register array, Register index, DoubleRegister value) {
610 MaglevAssembler::TemporaryRegisterScope temps(this);
611 Register scratch = temps.AcquireScratch();
612 CalcScaledAddress(scratch, array, index, kDoubleSizeLog2);
613 StoreDouble(value,
614 FieldMemOperand(scratch, OFFSET_OF_DATA_START(FixedArray)));
615}
616
617inline void MaglevAssembler::LoadSignedField(Register result,
618 MemOperand operand, int size) {
619 if (size == 1) {
620 Lb(result, operand);
621 } else if (size == 2) {
622 Lh(result, operand);
623 } else {
624 DCHECK_EQ(size, 4);
625 Lw(result, operand);
626 }
627}
628
629inline void MaglevAssembler::LoadUnsignedField(Register result,
630 MemOperand operand, int size) {
631 if (size == 1) {
632 Lbu(result, operand);
633 } else if (size == 2) {
634 Lhu(result, operand);
635 } else {
636 DCHECK_EQ(size, 4);
637 Lwu(result, operand);
638 }
639}
640
641inline void MaglevAssembler::SetSlotAddressForTaggedField(Register slot_reg,
642 Register object,
643 int offset) {
644 Add64(slot_reg, object, offset - kHeapObjectTag);
645}
646
648 Register slot_reg, Register object, Register index) {
649 Add64(slot_reg, object, OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag);
650 CalcScaledAddress(slot_reg, slot_reg, index, kTaggedSizeLog2);
651}
652
653inline void MaglevAssembler::StoreTaggedFieldNoWriteBarrier(Register object,
654 int offset,
655 Register value) {
657}
658
660 Register array, Register index, Register value) {
661 MaglevAssembler::TemporaryRegisterScope temps(this);
662 Register scratch = temps.AcquireScratch();
663 CalcScaledAddress(scratch, array, index, kTaggedSizeLog2);
665 value, FieldMemOperand(scratch, OFFSET_OF_DATA_START(FixedArray)));
666}
667
668inline void MaglevAssembler::StoreTaggedSignedField(Register object, int offset,
669 Register value) {
670 AssertSmi(value);
672}
673
674inline void MaglevAssembler::StoreTaggedSignedField(Register object, int offset,
675 Tagged<Smi> value) {
676 MaglevAssembler::TemporaryRegisterScope temps(this);
677 Register scratch = temps.AcquireScratch();
678 Move(scratch, value);
680}
681
682inline void MaglevAssembler::StoreInt32Field(Register object, int offset,
683 int32_t value) {
684 if (value == 0) {
685 Sw(zero_reg, FieldMemOperand(object, offset));
686 return;
687 }
688 MaglevAssembler::TemporaryRegisterScope scope(this);
689 Register scratch = scope.AcquireScratch();
690 Move(scratch, value);
691 Sw(scratch, FieldMemOperand(object, offset));
692}
693
694inline void MaglevAssembler::StoreField(MemOperand operand, Register value,
695 int size) {
696 DCHECK(size == 1 || size == 2 || size == 4);
697 if (size == 1) {
698 Sb(value, operand);
699 } else if (size == 2) {
700 Sh(value, operand);
701 } else {
702 DCHECK_EQ(size, 4);
703 Sw(value, operand);
704 }
705}
706
707#ifdef V8_ENABLE_SANDBOX
708inline void MaglevAssembler::StoreTrustedPointerFieldNoWriteBarrier(
709 Register object, int offset, Register value) {
711 FieldMemOperand(object, offset));
712}
713#endif // V8_ENABLE_SANDBOX
714
715inline void MaglevAssembler::ReverseByteOrder(Register value, int size) {
716 MaglevAssembler::TemporaryRegisterScope temps(this);
717 Register scratch = temps.AcquireScratch();
718 if (size == 2) {
719 ByteSwap(value, value, 4, scratch);
720 srai(value, value, 16);
721 } else if (size == 4) {
722 ByteSwap(value, value, 4, scratch);
723 } else {
724 DCHECK_EQ(size, 1);
725 }
726}
727
728inline void MaglevAssembler::IncrementInt32(Register reg) {
729 Add32(reg, reg, Operand(1));
730}
731
732inline void MaglevAssembler::DecrementInt32(Register reg) {
733 Sub32(reg, reg, Operand(1));
734}
735
736inline void MaglevAssembler::AddInt32(Register reg, int amount) {
737 Add32(reg, reg, Operand(amount));
738}
739
740inline void MaglevAssembler::AndInt32(Register reg, int mask) {
741 // check if size of immediate exceeds 32 bits
742 if constexpr (sizeof(intptr_t) > sizeof(mask)) {
743 // set the upper bits of the immediate and so make sure that AND operation
744 // won't touch the upper part of target register
745 static constexpr intptr_t lsb_mask = 0xFFFFFFFF;
746 And(reg, reg, Operand(~lsb_mask | mask));
747 } else {
748 And(reg, reg, Operand(mask));
749 }
750}
751
752inline void MaglevAssembler::OrInt32(Register reg, int mask) {
753 // OR won't touch the upper part of target register
754 Or(reg, reg, Operand(mask));
755}
756
757inline void MaglevAssembler::AndInt32(Register reg, Register other) {
758 And(reg, reg, other);
759}
760inline void MaglevAssembler::OrInt32(Register reg, Register other) {
761 Or(reg, reg, other);
762}
763
764inline void MaglevAssembler::ShiftLeft(Register reg, int amount) {
765 Sll32(reg, reg, Operand(amount));
766}
767
768inline void MaglevAssembler::IncrementAddress(Register reg, int32_t delta) {
769 Add64(reg, reg, Operand(delta));
770}
771
772inline void MaglevAssembler::LoadAddress(Register dst, MemOperand location) {
773 DCHECK(location.is_reg());
774 Add64(dst, location.rm(), location.offset());
775}
776
777inline void MaglevAssembler::Call(Label* target) {
778 MacroAssembler::Call(target);
779}
780
781inline void MaglevAssembler::EmitEnterExitFrame(int extra_slots,
782 StackFrame::Type frame_type,
783 Register c_function,
784 Register scratch) {
785 EnterExitFrame(scratch, extra_slots, frame_type);
786}
787
788inline void MaglevAssembler::Move(StackSlot dst, Register src) {
789 StoreWord(src, StackSlotOperand(dst));
790}
791inline void MaglevAssembler::Move(StackSlot dst, DoubleRegister src) {
792 StoreDouble(src, StackSlotOperand(dst));
793}
794inline void MaglevAssembler::Move(Register dst, StackSlot src) {
795 LoadWord(dst, StackSlotOperand(src));
796}
797inline void MaglevAssembler::Move(DoubleRegister dst, StackSlot src) {
798 LoadDouble(dst, StackSlotOperand(src));
799}
800inline void MaglevAssembler::Move(MemOperand dst, Register src) {
801 StoreWord(src, dst);
802}
803inline void MaglevAssembler::Move(Register dst, MemOperand src) {
804 LoadWord(dst, src);
805}
807 MoveDouble(dst, src);
808}
809inline void MaglevAssembler::Move(Register dst, Tagged<Smi> src) {
810 MacroAssembler::Move(dst, src);
811}
812inline void MaglevAssembler::Move(Register dst, ExternalReference src) {
813 li(dst, src);
814}
815inline void MaglevAssembler::Move(Register dst, Register src) {
816 MacroAssembler::Move(dst, src);
817}
818inline void MaglevAssembler::Move(Register dst, Tagged<TaggedIndex> i) {
819 li(dst, Operand(i.ptr()));
820}
821inline void MaglevAssembler::Move(Register dst, int32_t i) {
822 li(dst, Operand(i));
823}
824inline void MaglevAssembler::Move(Register dst, uint32_t i) {
825 li(dst, Operand(i));
826}
827inline void MaglevAssembler::Move(Register dst, IndirectPointerTag i) {
828 li(dst, Operand(i));
829}
830inline void MaglevAssembler::Move(DoubleRegister dst, double n) {
831 LoadFPRImmediate(dst, n);
832}
833inline void MaglevAssembler::Move(DoubleRegister dst, Float64 n) {
834 LoadFPRImmediate(dst, n.get_scalar());
835}
836inline void MaglevAssembler::Move(Register dst, Handle<HeapObject> obj) {
837 li(dst, obj);
838}
839void MaglevAssembler::MoveTagged(Register dst, Handle<HeapObject> obj) {
840#ifdef V8_COMPRESS_POINTERS
842#else
843 ASM_CODE_COMMENT_STRING(this, "MaglevAsm::MoveTagged");
844 Move(dst, obj);
845#endif
846}
847
848inline void MaglevAssembler::LoadInt32(Register dst, MemOperand src) {
849 Load32U(dst, src);
850}
851inline void MaglevAssembler::StoreInt32(MemOperand dst, Register src) {
852 Sw(src, dst);
853}
854
856 LoadFloat(dst, src);
857 // Convert Float32 to double(Float64)
858 fcvt_d_s(dst, dst);
859}
861 MaglevAssembler::TemporaryRegisterScope temps(this);
862 DoubleRegister scratch = temps.AcquireScratchDouble();
863 // Convert double(Float64) to Float32
864 fcvt_s_d(scratch, src);
865 StoreFloat(scratch, dst);
866}
868 LoadDouble(dst, src);
869}
871 StoreDouble(src, dst);
872}
873
875 Register base,
876 Register index) {
877 MaglevAssembler::TemporaryRegisterScope temps(this);
878 Register address = temps.AcquireScratch();
879 Add64(address, base, index);
880 ULoadDouble(dst, MemOperand(address));
881}
883 DoubleRegister dst, Register base, Register index) {
884 MaglevAssembler::TemporaryRegisterScope temps(this);
885 Register address = temps.AcquireScratch();
886 Add64(address, base, index);
887 Register scratch = base; // reuse base as scratch register
888 Uld(scratch, MemOperand(address));
889 ByteSwap(scratch, scratch, 8, address);
890 MacroAssembler::Move(dst, scratch);
891}
892inline void MaglevAssembler::StoreUnalignedFloat64(Register base,
893 Register index,
894 DoubleRegister src) {
895 MaglevAssembler::TemporaryRegisterScope temps(this);
896 Register address = temps.AcquireScratch();
897 Add64(address, base, index);
898 UStoreDouble(src, MemOperand(address));
899}
901 Register base, Register index, DoubleRegister src) {
902 MaglevAssembler::TemporaryRegisterScope temps(this);
903 Register scratch = temps.AcquireScratch();
904 Register address = temps.AcquireScratch();
905 MacroAssembler::Move(scratch, src);
906 ByteSwap(scratch, scratch, 8, address); // reuse address as scratch register
907 Add64(address, base, index);
908 Usd(scratch, MemOperand(address));
909}
910
911inline void MaglevAssembler::SignExtend32To64Bits(Register dst, Register src) {
912 SignExtendWord(dst, src);
913}
914
915inline void MaglevAssembler::NegateInt32(Register val) {
916 SignExtendWord(val, val);
917 Neg(val, val);
918}
919
920inline void MaglevAssembler::ToUint8Clamped(Register result,
921 DoubleRegister value, Label* min,
922 Label* max, Label* done) {
923 MaglevAssembler::TemporaryRegisterScope temps(this);
924 Register scratch = temps.AcquireScratch();
925 Register scratch2 = temps.AcquireScratch();
926 DoubleRegister ftmp1 = temps.AcquireScratchDouble();
927 DCHECK(ftmp1 != value);
928
929 // if value is NOT in (0.0, 255.0), then fallback to min or max.
930 fclass_d(scratch, value);
931 constexpr int32_t nan_neg_mask =
934 constexpr int32_t pos_inf_mask = kPositiveInfinity;
935 And(scratch2, scratch, Operand(nan_neg_mask));
936 MacroAssembler::Branch(min, // value is NaN or value <= 0.0
937 not_equal, scratch2, Operand(zero_reg));
938 And(scratch2, scratch, Operand(pos_inf_mask));
939 MacroAssembler::Branch(max, // value is +Infinity
940 not_equal, scratch2, Operand(zero_reg));
941 // 255.0 is 0x406F_E000_0000_0000 in IEEE-754 floating point format
942 Add32(scratch, zero_reg, Operand(0x406FE));
943 Sll64(scratch, scratch, Operand(44));
944 fmv_d_x(ftmp1, scratch);
945 MacroAssembler::CompareF64(scratch, GE, value, ftmp1);
946 MacroAssembler::Branch(max, // value >= 255.0
947 not_equal, scratch, Operand(zero_reg));
948
949 // value in (0.0, 255.0)
950 fmv_x_d(result, value);
951 // check if fractional part in result is absent
952 Label has_fraction;
953 Mv(scratch, result);
954 SllWord(scratch, scratch, Operand(64 - kFloat64MantissaBits));
955 MacroAssembler::Branch(&has_fraction, not_equal, scratch, Operand(zero_reg));
956 // no fractional part, compute exponent part taking bias into account.
957 SrlWord(result, result, Operand(kFloat64MantissaBits));
960
961 bind(&has_fraction);
962 // Actual rounding is here. Notice that ToUint8Clamp does “round half to even”
963 // tie-breaking and that differs from Math.round which does “round half up”
964 // tie-breaking.
965 fcvt_l_d(scratch, value, RNE);
966 fcvt_d_l(ftmp1, scratch, RNE);
967 // A special handling is needed if the result is a very small positive number
968 // that rounds to zero. JS semantics requires that the rounded result retains
969 // the sign of the input, so a very small positive floating-point number
970 // should be rounded to positive 0.
971 fsgnj_d(ftmp1, ftmp1, value);
972 fmv_x_d(result, ftmp1);
974}
975
976template <typename NodeT>
977inline void MaglevAssembler::DeoptIfBufferDetached(Register array,
978 Register scratch,
979 NodeT* node) {
980 // A detached buffer leads to megamorphic feedback, so we won't have a deopt
981 // loop if we deopt here.
982 LoadTaggedField(scratch,
983 FieldMemOperand(array, JSArrayBufferView::kBufferOffset));
984 LoadTaggedField(scratch,
985 FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
986 ZeroExtendWord(scratch, scratch);
987 And(scratch, scratch, Operand(JSArrayBuffer::WasDetachedBit::kMask));
988 Label* deopt_label =
989 GetDeoptLabel(node, DeoptimizeReason::kArrayBufferWasDetached);
990 RecordComment("-- Jump to eager deopt");
991 MacroAssembler::Branch(deopt_label, not_equal, scratch, Operand(zero_reg));
992}
993
994inline void MaglevAssembler::LoadByte(Register dst, MemOperand src) {
995 Lbu(dst, src);
996}
997
999 Register map, Register scratch) {
1000 Load32U(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1001 And(scratch, scratch,
1002 Operand(Map::Bits1::IsUndetectableBit::kMask |
1003 Map::Bits1::IsCallableBit::kMask));
1004 // NB: TestTypeOf=>Branch=>JumpIf expects the result of a comparison
1005 // in dedicated "flag" register
1006 constexpr Register bit_set_flag = MaglevAssembler::GetFlagsRegister();
1007 Sub32(bit_set_flag, scratch, Operand(Map::Bits1::IsCallableBit::kMask));
1008 return kEqual;
1009}
1010
1012 Register map, Register scratch) {
1013 Load32U(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1014
1015 // NB: TestTypeOf=>Branch=>JumpIf expects the result of a comparison
1016 // in dedicated "flag" register
1017 constexpr Register bits_unset_flag = MaglevAssembler::GetFlagsRegister();
1018 And(bits_unset_flag, scratch,
1019 Operand(Map::Bits1::IsUndetectableBit::kMask |
1020 Map::Bits1::IsCallableBit::kMask));
1021 return kEqual;
1022}
1023
1024inline void MaglevAssembler::LoadInstanceType(Register instance_type,
1025 Register heap_object) {
1026 LoadMap(instance_type, heap_object);
1027 Lhu(instance_type, FieldMemOperand(instance_type, Map::kInstanceTypeOffset));
1028}
1029
1031 Register map, InstanceType type, Condition cond, Label* target,
1032 Label::Distance distance) {
1033 MaglevAssembler::TemporaryRegisterScope temps(this);
1034 Register scratch = temps.AcquireScratch();
1035 Lhu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1036 // we could be out of scratch registers by this moment already, and Branch
1037 // with Immediate operand require one so use Sub and compare against x0 later,
1038 // saves a register. can be done only for signed comparisons
1039 bool can_sub = false;
1040 switch (cond) {
1041 case Condition::kEqual:
1042 case Condition::kNotEqual:
1043 case Condition::kLessThan:
1044 case Condition::kLessThanEqual:
1045 case Condition::kGreaterThan:
1046 case Condition::kGreaterThanEqual:
1047 can_sub = true;
1048 break;
1049 default:
1050 break;
1051 }
1052 if (can_sub) {
1053 SubWord(scratch, scratch, Operand(type));
1054 type = static_cast<InstanceType>(0);
1055 }
1056 MacroAssembler::Branch(target, cond, scratch, Operand(type), distance);
1057}
1058
1060 Register map, Register instance_type_out, InstanceType lower_limit,
1061 InstanceType higher_limit) {
1062 DCHECK_LT(lower_limit, higher_limit);
1063 Lhu(instance_type_out, FieldMemOperand(map, Map::kInstanceTypeOffset));
1064 Sub32(instance_type_out, instance_type_out, Operand(lower_limit));
1065 Register aflag = MaglevAssembler::GetFlagsRegister();
1066 // NB: JumpIf expects the result in dedicated "flag" register
1067 Sleu(aflag, instance_type_out, Operand(higher_limit - lower_limit));
1068 return kNotZero;
1069}
1070
1071inline void MaglevAssembler::JumpIfNotObjectType(Register heap_object,
1072 InstanceType type,
1073 Label* target,
1074 Label::Distance distance) {
1075 constexpr Register flag = MaglevAssembler::GetFlagsRegister();
1076 IsObjectType(heap_object, flag, flag, type);
1077 // NB: JumpIf expects the result in dedicated "flag" register
1078 JumpIf(kNotEqual, target, distance);
1079}
1080
1081inline void MaglevAssembler::AssertObjectType(Register heap_object,
1082 InstanceType type,
1083 AbortReason reason) {
1084 AssertNotSmi(heap_object);
1085 constexpr Register flag = MaglevAssembler::GetFlagsRegister();
1086 IsObjectType(heap_object, flag, flag, type);
1087 // NB: Assert expects the result in dedicated "flag" register
1088 Assert(Condition::kEqual, reason);
1089}
1090
1092 Register heap_object, InstanceType type, Label* if_true,
1093 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1094 Label::Distance false_distance, bool fallthrough_when_false) {
1095 MaglevAssembler::TemporaryRegisterScope temps(this);
1096 Register scratch = temps.AcquireScratch();
1097 IsObjectType(heap_object, scratch, scratch, type);
1098 Branch(kEqual, if_true, true_distance, fallthrough_when_true, if_false,
1099 false_distance, fallthrough_when_false);
1100}
1101
1102inline void MaglevAssembler::JumpIfObjectTypeInRange(Register heap_object,
1103 InstanceType lower_limit,
1104 InstanceType higher_limit,
1105 Label* target,
1106 Label::Distance distance) {
1107 TemporaryRegisterScope temps(this);
1108 Register scratch = temps.AcquireScratch();
1109 LoadMap(scratch, heap_object);
1110 Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1111 Sub32(scratch, scratch, Operand(lower_limit));
1113 Operand(higher_limit - lower_limit));
1114}
1115
1117 Register heap_object, InstanceType lower_limit, InstanceType higher_limit,
1118 Label* target, Label::Distance distance) {
1119 TemporaryRegisterScope temps(this);
1120 Register scratch = temps.AcquireScratch();
1121 LoadMap(scratch, heap_object);
1122 Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1123 Sub32(scratch, scratch, Operand(lower_limit));
1125 Operand(higher_limit - lower_limit));
1126}
1127
1128inline void MaglevAssembler::AssertObjectTypeInRange(Register heap_object,
1129 InstanceType lower_limit,
1130 InstanceType higher_limit,
1131 AbortReason reason) {
1132 AssertNotSmi(heap_object);
1133 TemporaryRegisterScope temps(this);
1134 Register scratch = temps.AcquireScratch();
1135 LoadMap(scratch, heap_object);
1136 Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1137 Sub32(scratch, scratch, Operand(lower_limit));
1139 Operand(higher_limit - lower_limit));
1140}
1141
1143 Register heap_object, InstanceType lower_limit, InstanceType higher_limit,
1144 Label* if_true, Label::Distance true_distance, bool fallthrough_when_true,
1145 Label* if_false, Label::Distance false_distance,
1146 bool fallthrough_when_false) {
1147 TemporaryRegisterScope temps(this);
1148 Register scratch = temps.AcquireScratch();
1149 LoadMap(scratch, heap_object);
1150 Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1151 Sub32(scratch, scratch, Operand(lower_limit));
1152 constexpr Register flags_reg = MaglevAssembler::GetFlagsRegister();
1153 // if scratch <= (higher_limit - lower_limit) then flags_reg = 0 else
1154 // flags_reg = 1
1155 CompareI(flags_reg, scratch, Operand(higher_limit - lower_limit),
1156 Condition::kUnsignedGreaterThan);
1157 // now compare against 0 witj kEqual
1158 Branch(Condition::kEqual, if_true, true_distance, fallthrough_when_true,
1159 if_false, false_distance, fallthrough_when_false);
1160}
1161
1162#if V8_STATIC_ROOTS_BOOL
1163// FIXME: not tested
1164inline void MaglevAssembler::JumpIfObjectInRange(Register heap_object,
1165 Tagged_t lower_limit,
1166 Tagged_t higher_limit,
1167 Label* target,
1168 Label::Distance distance) {
1169 // Only allowed for comparisons against RORoots.
1170 DCHECK_LE(lower_limit, StaticReadOnlyRoot::kLastAllocatedRoot);
1171 DCHECK_LE(higher_limit, StaticReadOnlyRoot::kLastAllocatedRoot);
1172 TemporaryRegisterScope temps(this);
1173 AssertNotSmi(heap_object);
1174 Register scratch = temps.AcquireScratch();
1175 BranchRange(target, kUnsignedLessThanEqual, heap_object, scratch, lower_limit,
1176 higher_limit, distance);
1177}
1178
1179inline void MaglevAssembler::JumpIfObjectNotInRange(Register heap_object,
1180 Tagged_t lower_limit,
1181 Tagged_t higher_limit,
1182 Label* target,
1183 Label::Distance distance) {
1184 // Only allowed for comparisons against RORoots.
1185 DCHECK_LE(lower_limit, StaticReadOnlyRoot::kLastAllocatedRoot);
1186 DCHECK_LE(higher_limit, StaticReadOnlyRoot::kLastAllocatedRoot);
1187 TemporaryRegisterScope temps(this);
1188 AssertNotSmi(heap_object);
1189 Register scratch = temps.AcquireScratch();
1190 BranchRange(target, kUnsignedGreaterThan, heap_object, scratch, lower_limit,
1191 higher_limit, distance);
1192}
1193
1194inline void MaglevAssembler::AssertObjectInRange(Register heap_object,
1195 Tagged_t lower_limit,
1196 Tagged_t higher_limit,
1197 AbortReason reason) {
1198 // Only allowed for comparisons against RORoots.
1199 DCHECK_LE(lower_limit, StaticReadOnlyRoot::kLastAllocatedRoot);
1200 DCHECK_LE(higher_limit, StaticReadOnlyRoot::kLastAllocatedRoot);
1201 TemporaryRegisterScope temps(this);
1202 AssertNotSmi(heap_object);
1203 Register scratch = temps.AcquireScratch();
1204 AssertRange(kUnsignedLessThanEqual, reason, heap_object, scratch, lower_limit,
1205 higher_limit);
1206}
1207#endif
1208
1210 Register heap_object, Label* target, Label::Distance distance) {
1211 TemporaryRegisterScope temps(this);
1212 Register scratch = temps.AcquireScratch();
1213 MacroAssembler::JumpIfJSAnyIsNotPrimitive(heap_object, scratch, target,
1214 distance);
1215}
1216
1217template <typename NodeT>
1219 Register reg, RootIndex index, Condition cond, DeoptimizeReason reason,
1220 NodeT* node) {
1221 Label Deopt, Done;
1222
1223 CompareRootAndBranch(reg, index, cond, &Deopt);
1224 Jump(&Done, Label::kNear);
1225 bind(&Deopt);
1226 EmitEagerDeopt(node, reason);
1227 bind(&Done);
1228}
1229
1230template <typename NodeT>
1232 Register reg, RootIndex index, Register scratch, Condition cond,
1233 DeoptimizeReason reason, NodeT* node) {
1234 CompareMapWithRoot(reg, index, scratch);
1235 DCHECK_EQ(cond, kNotEqual); // so far we only support kNotEqual, flag reg is
1236 // 0 for equal, 1 for not equal
1237 EmitEagerDeoptIf(cond, reason,
1238 node); // Jump to deopt only if flag reg is not equal 0
1239}
1240
1241template <typename NodeT>
1243 Register reg, RootIndex index, Condition cond, DeoptimizeReason reason,
1244 NodeT* node) {
1245 DCHECK_EQ(cond, kNotEqual);
1246 MaglevAssembler::TemporaryRegisterScope temps(this);
1247 Register cmp_result = temps.AcquireScratch();
1248 MacroAssembler::CompareTaggedRoot(reg, index, cmp_result);
1249 Label* deopt_label = GetDeoptLabel(node, reason);
1250 RecordComment("-- Jump to eager deopt");
1251 MacroAssembler::Branch(deopt_label, cond, cmp_result, Operand(zero_reg));
1252}
1253
1254template <typename NodeT>
1256 Register reg, int imm, Condition cond, DeoptimizeReason reason,
1257 NodeT* node) {
1258 Label Deopt, Done;
1259
1260 MacroAssembler::Branch(&Deopt, cond, reg, Operand(imm), Label::kNear);
1261 Jump(&Done, Label::kNear);
1262 bind(&Deopt);
1263 EmitEagerDeopt(node, reason);
1264 bind(&Done);
1265}
1266
1267inline void MaglevAssembler::CompareMapWithRoot(Register object,
1268 RootIndex index,
1269 Register scratch) {
1270 constexpr Register Jump_flag = MaglevAssembler::GetFlagsRegister();
1271
1273 LoadCompressedMap(scratch, object);
1274 MaglevAssembler::TemporaryRegisterScope temps(this);
1275 Register index_reg = temps.AcquireScratch();
1276 Li(index_reg, ReadOnlyRootPtr(index));
1277 MacroAssembler::CmpTagged(Jump_flag, scratch, index_reg);
1278 return;
1279 }
1280 LoadMap(scratch, object);
1281 MacroAssembler::CompareRoot(scratch, index,
1282 Jump_flag); // so 0 if equal, 1 if not
1283}
1284
1285template <typename NodeT>
1287 Register map, Register instance_type_out, InstanceType lower_limit,
1288 InstanceType higher_limit, Condition cond, DeoptimizeReason reason,
1289 NodeT* node) {
1290 DCHECK_LT(lower_limit, higher_limit);
1291 Lhu(instance_type_out, FieldMemOperand(map, Map::kInstanceTypeOffset));
1292 Sub32(instance_type_out, instance_type_out, Operand(lower_limit));
1294 Label* deopt_label = GetDeoptLabel(node, reason);
1295 RecordComment("-- Jump to eager deopt");
1296 MacroAssembler::Branch(deopt_label, Ugreater, instance_type_out,
1297 Operand(higher_limit - lower_limit));
1298}
1299
1301 DoubleRegister src1, DoubleRegister src2, Condition cond, Label* target,
1302 Label* nan_failed, Label::Distance distance) {
1303 MaglevAssembler::TemporaryRegisterScope temps(this);
1304 Register scratch = temps.AcquireScratch();
1305 Register scratch2 = temps.AcquireScratch();
1306 Register cmp = temps.AcquireScratch();
1307 // FIXME: check, which condition can be on input
1308 // If cond - condition for overflow, skip check for NaN,
1309 // such check implemented below, using fclass results
1310 if (cond != kOverflow && cond != kNoOverflow) {
1311 feq_d(scratch, src1, src1);
1312 feq_d(scratch2, src2, src2);
1313 And(scratch2, scratch, scratch2);
1314 MacroAssembler::Branch(nan_failed, equal, scratch2, Operand(zero_reg));
1315 // actual comparison
1317 MacroAssembler::CompareF64(cmp, fcond, src1, src2);
1318 MacroAssembler::Branch(target, not_equal, cmp, Operand(zero_reg), distance);
1319 } else {
1320 // Case for conditions connected with overflow should be checked,
1321 // and, maybe, removed in future (FPUCondition does not implement overflow
1322 // cases)
1323 fclass_d(scratch, src1);
1324 fclass_d(scratch2, src2);
1325 Or(scratch2, scratch, scratch2);
1327 MacroAssembler::Branch(nan_failed, not_equal, cmp, Operand(zero_reg));
1328 And(cmp, scratch2,
1330 MacroAssembler::Branch(target, not_equal, cmp, Operand(zero_reg), distance);
1331 }
1332}
1333
1335 DoubleRegister src1, DoubleRegister src2, Condition cond,
1336 BasicBlock* if_true, BasicBlock* if_false, BasicBlock* next_block,
1337 BasicBlock* nan_failed) {
1338 MaglevAssembler::TemporaryRegisterScope temps(this);
1339 Register scratch1 = temps.AcquireScratch();
1340 Register scratch2 = temps.AcquireScratch();
1341 // emit check for NaN
1342 feq_d(scratch1, src1, src1);
1343 feq_d(scratch2, src2, src2);
1344 Register any_nan = scratch2;
1345 And(any_nan, scratch1, scratch2);
1346 MacroAssembler::Branch(nan_failed->label(), equal, any_nan,
1347 Operand(zero_reg));
1348 // actual comparison
1349 Register cmp = temps.AcquireScratch();
1350 bool fallthrough_when_true = (if_true == next_block);
1351 bool fallthrough_when_false = (if_false == next_block);
1352 Label* if_true_label = if_true->label();
1353 Label* if_false_label = if_false->label();
1354 if (fallthrough_when_false) {
1355 if (fallthrough_when_true) {
1356 // If both paths are a fallthrough, do nothing.
1357 DCHECK_EQ(if_true_label, if_false_label);
1358 return;
1359 }
1361 MacroAssembler::CompareF64(cmp, fcond, src1, src2);
1362 // Jump over the false block if true, otherwise fall through into it.
1363 MacroAssembler::Branch(if_true_label, ne, cmp, Operand(zero_reg),
1364 Label::kFar);
1365 } else {
1367 MacroAssembler::CompareF64(cmp, neg_fcond, src1, src2);
1368 // Jump to the false block if true.
1369 MacroAssembler::Branch(if_false_label, ne, cmp, Operand(zero_reg),
1370 Label::kFar);
1371 // Jump to the true block if it's not the next block.
1372 if (!fallthrough_when_true) {
1373 MacroAssembler::Branch(if_true_label, Label::kFar);
1374 }
1375 }
1376}
1377
1378inline void MaglevAssembler::PrepareCallCFunction(int num_reg_arguments,
1379 int num_double_registers) {
1380 MaglevAssembler::TemporaryRegisterScope temps(this);
1381 Register scratch = temps.AcquireScratch();
1382 MacroAssembler::PrepareCallCFunction(num_reg_arguments, num_double_registers,
1383 scratch);
1384}
1385
1386inline void MaglevAssembler::CallSelf() {
1387 DCHECK(allow_call());
1388 DCHECK(code_gen_state()->entry_label()->is_bound());
1389 MacroAssembler::Call(code_gen_state()->entry_label());
1390}
1391
1392inline void MaglevAssembler::Jump(Label* target, Label::Distance distance) {
1393 DCHECK(!IsDeoptLabel(target));
1394 MacroAssembler::Branch(target, distance);
1395}
1396
1397inline void MaglevAssembler::JumpToDeopt(Label* target) {
1398 DCHECK(IsDeoptLabel(target));
1399 MacroAssembler::Branch(target);
1400}
1401
1402inline void MaglevAssembler::EmitEagerDeoptStress(Label* target) {
1403 // TODO(olivf): On arm `--deopt-every-n-times` is currently not supported.
1404 // Supporting it would require to implement this method, additionally handle
1405 // deopt branches in Cbz, and handle all cases where we fall through to the
1406 // deopt branch (like Int32Divide).
1407}
1408
1409inline void MaglevAssembler::JumpIf(Condition cond, Label* target,
1410 Label::Distance distance) {
1411 // NOTE: for now keep in mind that we always put the result of a comparison
1412 // into dedicated register ("set flag"), and then compare it with x0.
1413 constexpr Register aflag = MaglevAssembler::GetFlagsRegister();
1414 MacroAssembler::Branch(target, cond, aflag, Operand(zero_reg), distance);
1415}
1416
1417inline void MaglevAssembler::JumpIfRoot(Register with, RootIndex index,
1418 Label* if_equal,
1419 Label::Distance distance) {
1420 MacroAssembler::JumpIfRoot(with, index, if_equal, distance);
1421}
1422
1423inline void MaglevAssembler::JumpIfNotRoot(Register with, RootIndex index,
1424 Label* if_not_equal,
1425 Label::Distance distance) {
1426 MacroAssembler::JumpIfNotRoot(with, index, if_not_equal, distance);
1427}
1428
1429inline void MaglevAssembler::JumpIfSmi(Register src, Label* on_smi,
1430 Label::Distance distance) {
1431 MacroAssembler::JumpIfSmi(src, on_smi, distance);
1432}
1433
1434inline void MaglevAssembler::JumpIfNotSmi(Register src, Label* on_smi,
1435 Label::Distance distance) {
1436 MacroAssembler::JumpIfNotSmi(src, on_smi, distance);
1437}
1438
1439void MaglevAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
1440 Label* target, Label::Distance distance) {
1441 MacroAssembler::Branch(target, cc, value, Operand(byte), distance);
1442}
1443
1444void MaglevAssembler::JumpIfHoleNan(DoubleRegister value, Register scratch,
1445 Label* target, Label::Distance distance) {
1446 // TODO(leszeks): Right now this only accepts Zone-allocated target labels.
1447 // This works because all callsites are jumping to either a deopt, deferred
1448 // code, or a basic block. If we ever need to jump to an on-stack label, we
1449 // have to add support for it here change the caller to pass a ZoneLabelRef.
1450 DCHECK(compilation_info()->zone()->Contains(target));
1451 ZoneLabelRef is_hole = ZoneLabelRef::UnsafeFromLabelPointer(target);
1452 ZoneLabelRef is_not_hole(this);
1453 MaglevAssembler::TemporaryRegisterScope temps(this);
1454
1455 Label* deferred_code = MakeDeferredCode(
1456 [](MaglevAssembler* masm, DoubleRegister value, Register scratch,
1457 ZoneLabelRef is_hole, ZoneLabelRef is_not_hole) {
1458 masm->ExtractHighWordFromF64(scratch, value);
1459 masm->CompareInt32AndJumpIf(scratch, kHoleNanUpper32, kEqual, *is_hole);
1460 masm->MacroAssembler::Branch(*is_not_hole);
1461 },
1462 value, scratch, is_hole, is_not_hole);
1463 Register scratch2 = temps.AcquireScratch();
1464 feq_d(scratch2, value, value); // 0 if value is NaN
1465 MacroAssembler::Branch(deferred_code, equal, scratch2, Operand(zero_reg));
1466 bind(*is_not_hole);
1467}
1468
1469void MaglevAssembler::JumpIfNotHoleNan(DoubleRegister value, Register scratch,
1470 Label* target,
1471 Label::Distance distance) {
1472 JumpIfNotNan(value, target, distance);
1473 ExtractHighWordFromF64(scratch, value);
1474 CompareInt32AndJumpIf(scratch, kHoleNanUpper32, kNotEqual, target, distance);
1475}
1476
1477void MaglevAssembler::JumpIfNotHoleNan(MemOperand operand, Label* target,
1478 Label::Distance distance) {
1479 MaglevAssembler::TemporaryRegisterScope temps(this);
1480 Register upper_bits = temps.AcquireScratch();
1481 Load32U(upper_bits,
1482 MemOperand(operand.rm(), operand.offset() + (kDoubleSize / 2)));
1483 CompareInt32AndJumpIf(upper_bits, kHoleNanUpper32, kNotEqual, target,
1484 distance);
1485}
1486
1487void MaglevAssembler::JumpIfNan(DoubleRegister value, Label* target,
1488 Label::Distance distance) {
1489 MaglevAssembler::TemporaryRegisterScope temps(this);
1490 Register scratch = temps.AcquireScratch();
1491 feq_d(scratch, value, value); // 0 if value is NaN
1492 MacroAssembler::Branch(target, equal, scratch, Operand(zero_reg), distance);
1493}
1494
1495void MaglevAssembler::JumpIfNotNan(DoubleRegister value, Label* target,
1496 Label::Distance distance) {
1497 MaglevAssembler::TemporaryRegisterScope temps(this);
1498 Register scratch = temps.AcquireScratch();
1499 feq_d(scratch, value, value); // 1 if value is not NaN
1500 MacroAssembler::Branch(target, not_equal, scratch, Operand(zero_reg),
1501 distance);
1502}
1503
1504inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, Register r2,
1505 Condition cond,
1506 Label* target,
1507 Label::Distance distance) {
1508 MaglevAssembler::TemporaryRegisterScope temps(this);
1509 Register r1w = temps.AcquireScratch();
1510 Register r2w = temps.AcquireScratch();
1511 // TODO(Yuri Gaevsky): is zero/sign extension really needed here?
1512 switch (cond) {
1513 case ult:
1514 case uge:
1515 case ule:
1516 case ugt:
1517 ZeroExtendWord(r1w, r1);
1518 ZeroExtendWord(r2w, r2);
1519 break;
1520 default:
1521 SignExtend32To64Bits(r1w, r1);
1522 SignExtend32To64Bits(r2w, r2);
1523 }
1524 MacroAssembler::Branch(target, cond, r1w, Operand(r2w), distance);
1525}
1526
1527inline void MaglevAssembler::CompareIntPtrAndJumpIf(Register r1, int32_t value,
1528 Condition cond,
1529 Label* target,
1530 Label::Distance distance) {
1531 MacroAssembler::Branch(target, cond, r1, Operand(value), distance);
1532}
1533
1534void MaglevAssembler::CompareIntPtrAndJumpIf(Register r1, Register r2,
1535 Condition cond, Label* target,
1536 Label::Distance distance) {
1537 MacroAssembler::Branch(target, cond, r1, Operand(r2), distance);
1538}
1539
1541 Register r1, int32_t value, Condition cond, Label* if_true,
1542 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1543 Label::Distance false_distance, bool fallthrough_when_false) {
1544 // expect only specific conditions
1545 switch (cond) {
1546 case eq:
1547 case ne:
1548 case greater:
1549 case greater_equal:
1550 case less:
1551 case less_equal:
1552 case Ugreater:
1553 case Ugreater_equal:
1554 case Uless:
1555 case Uless_equal:
1556 break; // expected
1557 case cc_always:
1558 default:
1559 UNREACHABLE(); // not expected
1560 }
1561
1562 MaglevAssembler::TemporaryRegisterScope temps(this);
1563 Register lhs = temps.AcquireScratch();
1564 if (fallthrough_when_false) {
1565 if (fallthrough_when_true) {
1566 // If both paths are a fallthrough, do nothing.
1567 DCHECK_EQ(if_true, if_false);
1568 return;
1569 }
1570 // Jump over the false block if true, otherwise fall through into it.
1571 MacroAssembler::Branch(if_true, cond, lhs, Operand(value), true_distance);
1572 } else {
1573 // Jump to the false block if true.
1574 MacroAssembler::Branch(if_false, NegateCondition(cond), lhs, Operand(value),
1575 false_distance);
1576 // Jump to the true block if it's not the next block.
1577 if (!fallthrough_when_true) {
1578 MacroAssembler::Branch(if_true, true_distance);
1579 }
1580 }
1581}
1582
1583inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, int32_t value,
1584 Condition cond,
1585 Label* target,
1586 Label::Distance distance) {
1587 MaglevAssembler::TemporaryRegisterScope temps(this);
1588 Register r1w = temps.AcquireScratch();
1589 // TODO(Yuri Gaevsky): is zero/sign extension really needed here?
1590 switch (cond) {
1591 case ult:
1592 case uge:
1593 case ule:
1594 case ugt:
1595 ZeroExtendWord(r1w, r1);
1596 break;
1597 default:
1598 SignExtend32To64Bits(r1w, r1);
1599 }
1600 MacroAssembler::Branch(target, cond, r1w, Operand(value), distance);
1601}
1602
1603inline void MaglevAssembler::CompareInt32AndAssert(Register r1, Register r2,
1604 Condition cond,
1605 AbortReason reason) {
1606 MaglevAssembler::TemporaryRegisterScope temps(this);
1607 Register r1w = temps.AcquireScratch();
1608 Register r2w = temps.AcquireScratch();
1609 // TODO(Yuri Gaevsky): is zero/sign extension really needed here?
1610 switch (cond) {
1611 case ult:
1612 case uge:
1613 case ule:
1614 case ugt:
1615 ZeroExtendWord(r1w, r1);
1616 ZeroExtendWord(r2w, r2);
1617 break;
1618 default:
1619 SignExtend32To64Bits(r1w, r1);
1620 SignExtend32To64Bits(r2w, r2);
1621 }
1622 MacroAssembler::Assert(cond, reason, r1w, Operand(r2w));
1623}
1624
1625inline void MaglevAssembler::CompareInt32AndAssert(Register r1, int32_t value,
1626 Condition cond,
1627 AbortReason reason) {
1628 MaglevAssembler::TemporaryRegisterScope temps(this);
1629 Register r1w = temps.AcquireScratch();
1630 // TODO(Yuri Gaevsky): is zero/sign extension really needed here?
1631 switch (cond) {
1632 case ult:
1633 case uge:
1634 case ule:
1635 case ugt:
1636 ZeroExtendWord(r1w, r1);
1637 break;
1638 default:
1639 SignExtend32To64Bits(r1w, r1);
1640 }
1641 MacroAssembler::Assert(cond, reason, r1w, Operand(value));
1642}
1643
1645 Register r1, int32_t value, Condition cond, Label* if_true,
1646 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1647 Label::Distance false_distance, bool fallthrough_when_false) {
1648 // expect only specific conditions
1649 switch (cond) {
1650 case eq:
1651 case ne:
1652 case greater:
1653 case greater_equal:
1654 case less:
1655 case less_equal:
1656 case Ugreater:
1657 case Ugreater_equal:
1658 case Uless:
1659 case Uless_equal:
1660 break; // expected
1661 case cc_always:
1662 default:
1663 UNREACHABLE(); // not expected
1664 }
1665
1666 MaglevAssembler::TemporaryRegisterScope temps(this);
1667 Register lhs = temps.AcquireScratch();
1668 if (fallthrough_when_false) {
1669 if (fallthrough_when_true) {
1670 // If both paths are a fallthrough, do nothing.
1671 DCHECK_EQ(if_true, if_false);
1672 return;
1673 }
1674 // TODO(Yuri Gaevsky): is zero/sign extension really needed here?
1675 switch (cond) {
1676 case ult:
1677 case uge:
1678 case ule:
1679 case ugt:
1680 ZeroExtendWord(lhs, r1);
1681 break;
1682 default:
1683 SignExtend32To64Bits(lhs, r1);
1684 }
1685 // Jump over the false block if true, otherwise fall through into it.
1686 MacroAssembler::Branch(if_true, cond, lhs, Operand(value), true_distance);
1687 } else {
1688 // TODO(Yuri Gaevsky): is zero/sign extension really needed here?
1689 switch (cond) {
1690 case ult:
1691 case uge:
1692 case ule:
1693 case ugt:
1694 ZeroExtendWord(lhs, r1);
1695 break;
1696 default:
1697 SignExtend32To64Bits(lhs, r1);
1698 }
1699 // Jump to the false block if true.
1700 MacroAssembler::Branch(if_false, NegateCondition(cond), lhs, Operand(value),
1701 false_distance);
1702 // Jump to the true block if it's not the next block.
1703 if (!fallthrough_when_true) {
1704 MacroAssembler::Branch(if_true, true_distance);
1705 }
1706 }
1707}
1708
1710 Register r1, Register value, Condition cond, Label* if_true,
1711 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1712 Label::Distance false_distance, bool fallthrough_when_false) {
1713 // expect only specific conditions
1714 switch (cond) {
1715 case eq:
1716 case ne:
1717 case greater:
1718 case greater_equal:
1719 case less:
1720 case less_equal:
1721 case Ugreater:
1722 case Ugreater_equal:
1723 case Uless:
1724 case Uless_equal:
1725 break; // expected
1726 case cc_always:
1727 default:
1728 UNREACHABLE(); // not expected
1729 }
1730
1731 MaglevAssembler::TemporaryRegisterScope temps(this);
1732 Register lhs = temps.AcquireScratch();
1733 Register rhs = temps.AcquireScratch();
1734 if (fallthrough_when_false) {
1735 if (fallthrough_when_true) {
1736 // If both paths are a fallthrough, do nothing.
1737 DCHECK_EQ(if_true, if_false);
1738 return;
1739 }
1740 // TODO(Yuri Gaevsky): is zero/sign extension really needed here?
1741 switch (cond) {
1742 case ult:
1743 case uge:
1744 case ule:
1745 case ugt:
1746 ZeroExtendWord(lhs, r1);
1747 ZeroExtendWord(rhs, value);
1748 break;
1749 default:
1750 SignExtend32To64Bits(lhs, r1);
1751 SignExtend32To64Bits(rhs, value);
1752 }
1753 // Jump over the false block if true, otherwise fall through into it.
1754 MacroAssembler::Branch(if_true, cond, lhs, Operand(rhs), true_distance);
1755 } else {
1756 switch (cond) {
1757 // TODO(Yuri Gaevsky): is zero/sign extension really needed here?
1758 case ult:
1759 case uge:
1760 case ule:
1761 case ugt:
1762 ZeroExtendWord(lhs, r1);
1763 ZeroExtendWord(rhs, value);
1764 break;
1765 default:
1766 SignExtend32To64Bits(lhs, r1);
1767 SignExtend32To64Bits(rhs, value);
1768 }
1769 // Jump to the false block if true.
1770 MacroAssembler::Branch(if_false, NegateCondition(cond), lhs, Operand(rhs),
1771 false_distance);
1772 // Jump to the true block if it's not the next block.
1773 if (!fallthrough_when_true) {
1774 MacroAssembler::Branch(if_true, true_distance);
1775 }
1776 }
1777}
1778
1779inline void MaglevAssembler::CompareSmiAndJumpIf(Register r1, Tagged<Smi> value,
1780 Condition cond, Label* target,
1781 Label::Distance distance) {
1782 AssertSmi(r1);
1783 CompareTaggedAndBranch(target, cond, r1, Operand(value), distance);
1784}
1785
1786inline void MaglevAssembler::CompareByteAndJumpIf(MemOperand left, int8_t right,
1787 Condition cond,
1788 Register scratch,
1789 Label* target,
1790 Label::Distance distance) {
1791 LoadByte(scratch, left);
1792 MacroAssembler::Branch(target, cond, scratch, Operand(right), distance);
1793}
1794
1795inline void MaglevAssembler::CompareTaggedAndJumpIf(Register r1,
1796 Tagged<Smi> value,
1797 Condition cond,
1798 Label* target,
1799 Label::Distance distance) {
1800 CompareTaggedAndBranch(target, cond, r1, Operand(value), distance);
1801}
1802
1803inline void MaglevAssembler::CompareTaggedAndJumpIf(Register r1,
1804 Handle<HeapObject> obj,
1805 Condition cond,
1806 Label* target,
1807 Label::Distance distance) {
1808 CompareTaggedAndBranch(target, cond, r1, Operand(obj), distance);
1809}
1810
1811inline void MaglevAssembler::CompareTaggedAndJumpIf(Register src1,
1812 Register src2,
1813 Condition cond,
1814 Label* target,
1815 Label::Distance distance) {
1816 CompareTaggedAndBranch(target, cond, src1, Operand(src2), distance);
1817}
1818
1820 DoubleRegister reg, Label* target, Label::Distance distance) {
1821 MaglevAssembler::TemporaryRegisterScope temps(this);
1822 Register scratch = temps.AcquireScratch();
1823 fclass_d(scratch, reg);
1824 And(scratch, scratch,
1826 MacroAssembler::Branch(target, not_equal, scratch, Operand(zero_reg),
1827 distance);
1828}
1829
1831 MemOperand operand, Label* target, Label::Distance distance) {
1832 MaglevAssembler::TemporaryRegisterScope temps(this);
1833 DoubleRegister value_double = temps.AcquireScratchDouble();
1834 LoadDouble(value_double, operand);
1835 CompareDoubleAndJumpIfZeroOrNaN(value_double, target, distance);
1836}
1837
1839 Register r1, int32_t mask, Label* target, Label::Distance distance) {
1840 MaglevAssembler::TemporaryRegisterScope temps(this);
1841 Register scratch = temps.AcquireScratch();
1842 // TODO(Yuri Gaevsky): is zero extension really needed here?
1843 if (mask < 0) { // high-bits are all 1s due to
1844 And(scratch, r1, Operand(mask)); // sign-promotion, so we need
1845 ZeroExtendWord(scratch, scratch); // to clear them all
1846 } else {
1847 And(scratch, r1, Operand(mask));
1848 }
1849 MacroAssembler::Branch(target, kNotZero, scratch, Operand(zero_reg),
1850 distance);
1851}
1852
1854 MemOperand operand, int32_t mask, Label* target, Label::Distance distance) {
1855 MaglevAssembler::TemporaryRegisterScope temps(this);
1856 Register scratch = temps.AcquireScratch();
1857 Lwu(scratch, operand);
1858 And(scratch, scratch, Operand(mask));
1859 MacroAssembler::Branch(target, kNotZero, scratch, Operand(zero_reg),
1860 distance);
1861}
1862
1864 Register r1, int32_t mask, Label* target, Label::Distance distance) {
1865 MaglevAssembler::TemporaryRegisterScope temps(this);
1866 Register scratch = temps.AcquireScratch();
1867 // TODO(Yuri Gaevsky): is zero extension really needed here?
1868 if (mask < 0) { // high-bits are all 1s due to
1869 And(scratch, r1, Operand(mask)); // sign-promotion, so we need
1870 ZeroExtendWord(scratch, scratch); // to clear them all
1871 } else {
1872 And(scratch, r1, Operand(mask));
1873 }
1874 MacroAssembler::Branch(target, kZero, scratch, Operand(zero_reg), distance);
1875}
1876
1878 MemOperand operand, int32_t mask, Label* target, Label::Distance distance) {
1879 MaglevAssembler::TemporaryRegisterScope temps(this);
1880 Register scratch = temps.AcquireScratch();
1881 Lwu(scratch, operand);
1882 And(scratch, scratch, Operand(mask));
1883 MacroAssembler::Branch(target, kZero, scratch, Operand(zero_reg), distance);
1884}
1885
1887 MemOperand operand, uint8_t mask, Label* target, Label::Distance distance) {
1888 MaglevAssembler::TemporaryRegisterScope temps(this);
1889 Register scratch = temps.AcquireScratch();
1890 Lbu(scratch, operand);
1891 And(scratch, scratch, Operand(mask));
1892 MacroAssembler::Branch(target, kNotZero, scratch, Operand(zero_reg),
1893 distance);
1894}
1895
1897 MemOperand operand, uint8_t mask, Label* target, Label::Distance distance) {
1898 MaglevAssembler::TemporaryRegisterScope temps(this);
1899 Register scratch = temps.AcquireScratch();
1900 Lbu(scratch, operand);
1901 And(scratch, scratch, Operand(mask));
1902 MacroAssembler::Branch(target, kZero, scratch, Operand(zero_reg), distance);
1903}
1904
1906 Register heap_number) {
1908 FieldMemOperand(heap_number, offsetof(HeapNumber, value_)));
1909}
1910
1911inline void MaglevAssembler::LoadHeapInt32Value(Register result,
1912 Register heap_number) {
1913 Load32U(result, FieldMemOperand(heap_number, offsetof(HeapNumber, value_)));
1914}
1915
1916inline void MaglevAssembler::StoreHeapInt32Value(Register value,
1917 Register heap_number) {
1918 Sw(value, (FieldMemOperand(heap_number, offsetof(HeapNumber, value_))));
1919}
1920
1922 Register src) {
1923 Cvt_d_w(result, src);
1924}
1925
1927 Register src) {
1928 fcvt_d_l(result, src);
1929}
1930
1932 Register src) {
1933 Cvt_d_uw(result, src);
1934}
1935
1936inline void MaglevAssembler::Pop(Register dst) { MacroAssembler::Pop(dst); }
1937
1939 if (v8_flags.slow_debug_code) {
1940 MaglevAssembler::TemporaryRegisterScope temps(this);
1941 Register scratch = temps.AcquireScratch();
1942 Add64(scratch, sp,
1945 MacroAssembler::Assert(eq, AbortReason::kStackAccessBelowStackPointer,
1946 scratch, Operand(fp));
1947 }
1948}
1949
1951 int stack_check_offset) {
1952 MaglevAssembler::TemporaryRegisterScope temps(this);
1953 Register stack_cmp_reg = sp;
1954 if (stack_check_offset >= kStackLimitSlackForDeoptimizationInBytes) {
1955 stack_cmp_reg = temps.AcquireScratch();
1956 Sub64(stack_cmp_reg, sp, stack_check_offset);
1957 }
1958 Register interrupt_stack_limit = temps.AcquireScratch();
1960 // Flags register is used in subsequent JumpIfs
1961 constexpr Register flags_reg = MaglevAssembler::GetFlagsRegister();
1962 // FLAGS = ( predicted stack pointer < stack limit ) ? 1 : 0
1963 // 0 - we're Ok
1964 // 1 - stack will be overflown
1965 CompareI(flags_reg, stack_cmp_reg, Operand(interrupt_stack_limit),
1966 Condition::kUnsignedLessThan);
1967 return kZero;
1968}
1969
1970inline void MaglevAssembler::FinishCode() {
1972}
1973
1974template <>
1976 Register src) {
1977 Move(dst, src);
1978}
1979template <>
1981 MemOperand src) {
1982 switch (repr) {
1984 return Lw(dst, src);
1989 return LoadWord(dst, src);
1990 default:
1991 UNREACHABLE();
1992 }
1993}
1994template <>
1996 MemOperand dst, Register src) {
1997 switch (repr) {
1999 return Sw(src, dst);
2004 return StoreWord(src, dst);
2005 default:
2006 UNREACHABLE();
2007 }
2008}
2009template <>
2011 MemOperand dst, MemOperand src) {
2013 Register scratch = temps.AcquireScratch();
2014 MoveRepr(repr, scratch, src);
2015 MoveRepr(repr, dst, scratch);
2016}
2017
2019 // Implemented only for x64.
2020}
2021
2022} // namespace maglev
2023} // namespace internal
2024} // namespace v8
2025
2026#endif // V8_MAGLEV_RISCV_MAGLEV_ASSEMBLER_RISCV_INL_H_
#define T
#define Assert(condition)
interpreter::OperandScale scale
Definition builtins.cc:44
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
void feq_d(Register rd, FPURegister rs1, FPURegister rs2)
void fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2)
void srai(Register rd, Register rs1, uint8_t shamt)
void slli(Register rd, Register rs1, uint8_t shamt)
void fcvt_d_s(FPURegister fd, FPURegister fj)
void fcvt_s_d(FPURegister fd, FPURegister fj)
void fclass_d(FPURegister fd, FPURegister fj)
static VfpRegList DefaultFPTmpList()
void cmp(Register src1, const Operand &src2, Condition cond=al)
void shift(Operand dst, Immediate shift_amount, int subcode, int size)
static RegList DefaultTmpList()
static constexpr size_t kMaxSizeInHeap
Tagged_t ReadOnlyRootPtr(RootIndex index)
void JumpIfRoot(Register with, RootIndex index, Label *if_equal)
void LoadStackLimit(Register destination, StackLimitKind kind)
void Call(Register target, Condition cond=al)
void Lbu(Register rd, const MemOperand &rs)
void LoadFloat(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void Cmp(const Register &rn, int imm)
void AssertMap(Register object) NOOP_UNLESS_DEBUG_CODE
void Neg(const Register &rd, const Operand &operand)
void Sh(Register rd, const MemOperand &rs)
void IsObjectType(Register heap_object, Register scratch1, Register scratch2, InstanceType type)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal)
void Lb(Register rd, const MemOperand &rs)
void BranchRange(Label *L, Condition cond, Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label::Distance distance=Label::kFar)
void Uld(Register rd, const MemOperand &rs)
void CompareRoot(Register obj, RootIndex index)
void MoveDouble(FPURegister dst, FPURegister src)
void Move(Register dst, Tagged< Smi > smi)
void SmiTst(Register value)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void JumpIfSmi(Register value, Label *smi_label)
void JumpIfObjectType(Register object, Register map, Register type_reg, InstanceType type, Label *if_cond_pass, Condition cond=eq)
void LoadSandboxedPointerField(Register destination, MemOperand field_operand)
void Lwu(Register rd, const MemOperand &rs)
void LoadFPRImmediate(FPURegister dst, float imm)
void CompareTaggedRoot(Register with, RootIndex index)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Register scratch, Label *target, Label::Distance distance=Label::kFar, Condition condition=Condition::kUnsignedGreaterThanEqual)
void CompareTaggedAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void StoreDouble(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void Sw(Register rd, const MemOperand &rs)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void Lhu(Register rd, const MemOperand &rs)
void ByteSwap(Register dest, Register src, int operand_size)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void ULoadDouble(FPURegister fd, const MemOperand &rs)
void StoreFloat(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void Cvt_d_w(FPURegister fd, Register rs)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void Lw(Register rd, const MemOperand &rs)
void LoadCompressedMap(Register dst, Register object)
void Lh(Register rd, const MemOperand &rs)
void ExtractHighWordFromF64(Register dst_high, FPURegister src)
void CompareI(Register rd, Register rs, const Operand &rt, Condition cond)
void Cvt_d_uw(FPURegister fd, FPURegister fs)
void LoadDouble(FPURegister fd, const MemOperand &src, Trapper &&trapper=[](int){})
void CmpTagged(const Register &r1, const Register &r2)
void Usd(Register rd, const MemOperand &rs)
void Or(Register dst, Register src)
void StoreTrustedPointerField(Register value, MemOperand dst_field_operand)
void CompareRootAndBranch(const Register &obj, RootIndex index, Condition cc, Label *target, ComparisonMode mode=ComparisonMode::kDefault)
void AssertRange(Condition cond, AbortReason reason, Register value, Register scratch, unsigned lower_limit, unsigned higher_limit) NOOP_UNLESS_DEBUG_CODE
void Sb(Register rd, const MemOperand &rs)
void Branch(Label *label, bool need_link=false)
void UStoreDouble(FPURegister fd, const MemOperand &rs)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void CalcScaledAddress(Register rd, Register rt, Register rs, uint8_t sa)
Condition CheckSmi(Register src)
void LoadMap(Register destination, Register object)
void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand)
static Operand EmbeddedNumber(double number)
constexpr bool has(RegisterT reg) const
static constexpr Register no_reg()
static constexpr bool IsReadOnly(RootIndex root_index)
Definition roots.h:623
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
void Include(const Register &reg1, const Register &reg2=no_reg)
void SetAvailableDouble(DoubleRegList available_double)
static LocationOperand * cast(InstructionOperand *op)
TemporaryRegisterScope(MaglevAssembler *masm, const SavedData &saved_data)
void CompareMapWithRoot(Register object, RootIndex index, Register scratch)
void LoadFixedArrayElement(Register result, Register array, Register index)
void SmiAddConstant(Register dst, Register src, int value, Label *fail, Label::Distance distance=Label::kFar)
void JumpIfByte(Condition cc, Register value, int32_t byte, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotNan(DoubleRegister value, Label *target, Label::Distance distance=Label::kFar)
Condition IsNotCallableNorUndetactable(Register map, Register scratch)
void JumpIfRoot(Register with, RootIndex index, Label *if_equal, Label::Distance distance=Label::kFar)
void ToUint8Clamped(Register result, DoubleRegister value, Label *min, Label *max, Label *done)
Condition IsCallableAndNotUndetectable(Register map, Register scratch)
void LoadFloat32(DoubleRegister dst, MemOperand src)
void JumpIfNotObjectType(Register heap_object, InstanceType type, Label *target, Label::Distance distance=Label::kFar)
void ReverseByteOrderAndStoreUnalignedFloat64(Register base, Register index, DoubleRegister src)
void IntPtrToDouble(DoubleRegister result, Register src)
void Branch(Condition condition, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
Label * GetDeoptLabel(NodeT *node, DeoptimizeReason reason)
MemOperand GetStackSlot(const compiler::AllocatedOperand &operand)
void StoreField(MemOperand operand, Register value, int element_size)
void LoadSignedField(Register result, MemOperand operand, int element_size)
void JumpIfSmi(Register src, Label *on_smi, Label::Distance near_jump=Label::kFar)
MaglevAssembler(Isolate *isolate, Zone *zone, MaglevCodeGenState *code_gen_state)
void TestUint8AndJumpIfAllClear(MemOperand operand, uint8_t mask, Label *target, Label::Distance distance=Label::kFar)
void AssertObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, AbortReason reason)
void CompareInt32AndAssert(Register r1, Register r2, Condition cond, AbortReason reason)
void CompareDoubleAndJumpIfZeroOrNaN(DoubleRegister reg, Label *target, Label::Distance distance=Label::kFar)
void LoadFixedDoubleArrayElement(DoubleRegister result, Register array, Register index)
void LoadUnsignedField(Register result, MemOperand operand, int element_size)
void JumpIfObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *target, Label::Distance distance=Label::kFar)
void CheckInt32IsSmi(Register obj, Label *fail, Register scratch=Register::no_reg())
void ReverseByteOrder(Register value, int element_size)
Condition FunctionEntryStackCheck(int stack_check_offset)
void LoadHeapNumberValue(DoubleRegister result, Register heap_number)
void Jump(Label *target, Label::Distance distance=Label::kFar)
MemOperand DataViewElementOperand(Register data_pointer, Register index)
void StoreTaggedSignedField(Register object, int offset, Register value)
void CompareSmiAndJumpIf(Register r1, Tagged< Smi > value, Condition cond, Label *target, Label::Distance distance=Label::kFar)
TemporaryRegisterScope * scratch_register_scope() const
void TestUint8AndJumpIfAnySet(MemOperand operand, uint8_t mask, Label *target, Label::Distance distance=Label::kFar)
void MoveRepr(MachineRepresentation repr, Dest dst, Source src)
void StoreFixedDoubleArrayElement(Register array, Register index, DoubleRegister value)
void CompareRootAndEmitEagerDeoptIf(Register reg, RootIndex index, Condition cond, DeoptimizeReason reason, NodeT *node)
void SmiTagInt32AndSetFlags(Register dst, Register src)
void LoadInstanceType(Register instance_type, Register heap_object)
void CompareTaggedRootAndEmitEagerDeoptIf(Register reg, RootIndex index, Condition cond, DeoptimizeReason reason, NodeT *node)
void StoreHeapInt32Value(Register value, Register heap_number)
void LoadInt32(Register dst, MemOperand src)
void StoreFixedArrayElementNoWriteBarrier(Register array, Register index, Register value)
void LoadAddress(Register dst, MemOperand location)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Label *target, Label::Distance distance=Label::kFar)
void LoadBoundedSizeFromObject(Register result, Register object, int offset)
void SetSlotAddressForTaggedField(Register slot_reg, Register object, int offset)
void CompareFloat64AndBranch(DoubleRegister src1, DoubleRegister src2, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block, BasicBlock *nan_failed)
void CompareTaggedAndJumpIf(Register reg, Tagged< Smi > smi, Condition cond, Label *target, Label::Distance distance=Label::kFar)
void StoreInt32Field(Register object, int offset, int32_t value)
void CompareUInt32AndEmitEagerDeoptIf(Register reg, int imm, Condition cond, DeoptimizeReason reason, NodeT *node)
void BranchOnObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *if_true, Label::Distance true_distance, bool fallthrough_when_true, Label *if_false, Label::Distance false_distance, bool fallthrough_when_false)
void LoadExternalPointerField(Register result, MemOperand operand)
void StoreInt32(MemOperand dst, Register src)
void BuildTypedArrayDataPointer(Register data_pointer, Register object)
void JumpIfObjectTypeNotInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal, Label::Distance distance=Label::kFar)
void EmitEnterExitFrame(int extra_slots, StackFrame::Type frame_type, Register c_function, Register scratch)
void BranchOnObjectType(Register heap_object, InstanceType type, Label *if_true, Label::Distance true_distance, bool fallthrough_when_true, Label *if_false, Label::Distance false_distance, bool fallthrough_when_false)
void LoadHeapInt32Value(Register result, Register heap_number)
void CompareInstanceTypeRange(Register map, InstanceType lower_limit, InstanceType higher_limit)
void Move(StackSlot dst, Register src)
void SignExtend32To64Bits(Register dst, Register src)
void LoadFixedArrayElementWithoutDecompressing(Register result, Register array, Register index)
void LoadUnalignedFloat64AndReverseByteOrder(DoubleRegister dst, Register base, Register index)
void IncrementAddress(Register reg, int32_t delta)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void JumpIfNan(DoubleRegister value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotSmi(Register src, Label *on_not_smi, Label::Distance near_jump=Label::kFar)
MemOperand TypedArrayElementOperand(Register data_pointer, Register index, int element_size)
MaglevCompilationInfo * compilation_info() const
void CompareMapWithRootAndEmitEagerDeoptIf(Register reg, RootIndex index, Register scratch, Condition cond, DeoptimizeReason reason, NodeT *node)
void TestInt32AndJumpIfAllClear(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
int GetFramePointerOffsetForStackSlot(const compiler::AllocatedOperand &operand)
void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
MaglevCodeGenState * code_gen_state() const
void SetSlotAddressForFixedArrayElement(Register slot_reg, Register object, Register index)
void LoadTaggedFieldByIndex(Register result, Register object, Register index, int scale, int offset)
void CompareIntPtrAndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
void CompareInt32AndBranch(Register r1, int32_t value, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void LoadFloat64(DoubleRegister dst, MemOperand src)
void LoadUnalignedFloat64(DoubleRegister dst, Register base, Register index)
void LoadTaggedFieldWithoutDecompressing(Register result, Register object, int offset)
Condition IsRootConstant(Input input, RootIndex root_index)
void StoreFloat32(MemOperand dst, DoubleRegister src)
void EmitEagerDeoptIf(Condition cond, DeoptimizeReason reason, NodeT *node)
void StoreFloat64(MemOperand dst, DoubleRegister src)
void StoreTaggedFieldNoWriteBarrier(Register object, int offset, Register value)
MemOperand ToMemOperand(const compiler::InstructionOperand &operand)
void MoveHeapNumber(Register dst, double value)
void LoadTaggedField(Register result, MemOperand operand)
void LoadByte(Register dst, MemOperand src)
void MoveTagged(Register dst, Handle< HeapObject > obj)
void JumpIfNotHoleNan(DoubleRegister value, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void DeoptIfBufferDetached(Register array, Register scratch, NodeT *node)
void StoreUnalignedFloat64(Register base, Register index, DoubleRegister src)
void TestInt32AndJumpIfAnySet(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
void JumpIfHoleNan(DoubleRegister value, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void Uint32ToDouble(DoubleRegister result, Register src)
void CompareFloat64AndJumpIf(DoubleRegister src1, DoubleRegister src2, Condition cond, Label *target, Label *nan_failed, Label::Distance distance=Label::kFar)
void CompareIntPtrAndBranch(Register r1, int32_t value, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void Int32ToDouble(DoubleRegister result, Register src)
void AssertObjectType(Register heap_object, InstanceType type, AbortReason reason)
void SmiSubConstant(Register dst, Register src, int value, Label *fail, Label::Distance distance=Label::kFar)
void CompareInstanceTypeRangeAndEagerDeoptIf(Register map, Register instance_type_out, InstanceType lower_limit, InstanceType higher_limit, Condition cond, DeoptimizeReason reason, NodeT *node)
void EmitEagerDeopt(NodeT *node, DeoptimizeReason reason)
void CompareInstanceTypeAndJumpIf(Register map, InstanceType type, Condition cond, Label *target, Label::Distance distance)
void CompareByteAndJumpIf(MemOperand left, int8_t right, Condition cond, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0)
static int TemporaryCount(size_t map_count)
MapCompare(MaglevAssembler *masm, Register object, size_t map_count)
void Generate(Handle< Map > map, Condition cond, Label *if_true, Label::Distance distance=Label::kFar)
static ZoneLabelRef UnsafeFromLabelPointer(Label *label)
Register const object_
Register const value_
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
const MapRef map_
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
ZoneVector< RpoNumber > & result
LiftoffRegister reg
uint32_t const mask
ComparisonMode
base::SmallVector< int32_t, 1 > stack_slots
MaglevAssembler *const masm_
int int32_t
Definition unicode.cc:40
Register ToRegister(BaselineAssembler *basm, BaselineAssembler::ScratchRegisterScope *scope, Arg arg)
FloatWithBits< 64 > Float64
Definition index.h:234
void PushIterator(MaglevAssembler *masm, base::iterator_range< T > range, Args... args)
void PushIteratorReverse(MaglevAssembler *masm, base::iterator_range< T > range, Args... args)
void PushInput(MaglevAssembler *masm, const Input &input)
constexpr Condition ConditionFor(Operation operation)
Register ToRegister(const compiler::InstructionOperand &operand)
constexpr Condition ConditionForFloat64(Operation operation)
NodeTMixin< Node, Derived > NodeT
Definition maglev-ir.h:2858
FPUCondition ConditionToConditionCmpFPU(Condition condition)
constexpr int kTaggedSize
Definition globals.h:542
DwVfpRegister DoubleRegister
const int kFloat64MantissaBits
const int kFloat64ExponentBias
Address Tagged_t
Definition globals.h:547
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int kStackLimitSlackForDeoptimizationInBytes
Definition globals.h:213
constexpr int kTaggedSizeLog2
Definition globals.h:543
constexpr Register kMaglevExtraScratchRegister
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
const int kHeapObjectTag
Definition v8-internal.h:72
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
constexpr uint32_t kHoleNanUpper32
Definition globals.h:1952
return value
Definition map-inl.h:893
const int kSmiTag
Definition v8-internal.h:86
constexpr int kDoubleSizeLog2
Definition globals.h:421
constexpr int kDoubleSize
Definition globals.h:407
constexpr bool PointerCompressionIsEnabled()
Local< T > Handle
Operation
Definition operation.h:43
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
static void Push(MaglevAssembler *masm, Arg arg, Args... args)
static void PushReverse(MaglevAssembler *masm, Arg arg, Args... args)
static void Push(MaglevAssembler *masm, const Input &arg, Args... args)
static void PushReverse(MaglevAssembler *masm, const Input &arg, Args... args)
#define OFFSET_OF_DATA_START(Type)
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001