v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-assembler-s390-inl.h
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_MAGLEV_S390_MAGLEV_ASSEMBLER_S390_INL_H_
6#define V8_MAGLEV_S390_MAGLEV_ASSEMBLER_S390_INL_H_
7
12#include "src/common/globals.h"
17
18namespace v8 {
19namespace internal {
20namespace maglev {
21
22constexpr Condition ConditionForFloat64(Operation operation) {
23 return ConditionFor(operation);
24}
25
26// constexpr Condition ConditionForNaN() { return vs; }
27
28inline int ShiftFromScale(int n) {
29 switch (n) {
30 case 1:
31 return 0;
32 case 2:
33 return 1;
34 case 4:
35 return 2;
36 case 8:
37 return 3;
38 default:
40 }
41}
42
43class MaglevAssembler::TemporaryRegisterScope
44 : public TemporaryRegisterScopeBase<TemporaryRegisterScope> {
45 using Base = TemporaryRegisterScopeBase<TemporaryRegisterScope>;
46
47 public:
48 struct SavedData : public Base::SavedData {
51 };
52
54 : Base(masm), scratch_scope_(masm) {
55 if (prev_scope_ == nullptr) {
56 // Add extra scratch register if no previous scope.
57 // scratch_scope_.Include(kMaglevExtraScratchRegister);
58 }
59 }
61 const SavedData& saved_data)
62 : Base(masm, saved_data), scratch_scope_(masm) {
63 scratch_scope_.SetAvailable(saved_data.available_scratch_);
64 scratch_scope_.SetAvailableDoubleRegList(saved_data.available_fp_scratch_);
65 }
66
78
86
91
92 private:
94};
95
97 size_t map_count)
98 : masm_(masm), object_(object), map_count_(map_count) {
101 USE(map_count_);
102}
103
104void MapCompare::Generate(Handle<Map> map, Condition cond, Label* if_true,
105 Label::Distance distance) {
106 MaglevAssembler::TemporaryRegisterScope temps(masm_);
107 Register temp = temps.AcquireScratch();
108 masm_->Move(temp, map);
109 masm_->CmpS64(map_, temp);
110 CHECK(is_signed(cond));
111 masm_->JumpIf(cond, if_true, distance);
112}
113
114Register MapCompare::GetMap() { return map_; }
115
116int MapCompare::TemporaryCount(size_t map_count) { return 1; }
117
118namespace detail {
119
120template <typename... Args>
121struct PushAllHelper;
122
123template <>
124struct PushAllHelper<> {
125 static void Push(MaglevAssembler* masm) {}
126 static void PushReverse(MaglevAssembler* masm) {}
127};
128
129inline void PushInput(MaglevAssembler* masm, const Input& input) {
130 if (input.operand().IsConstant()) {
132 Register scratch = temps.AcquireScratch();
133 input.node()->LoadToRegister(masm, scratch);
134 masm->Push(scratch);
135 } else {
136 // TODO(leszeks): Consider special casing the value. (Toon: could possibly
137 // be done through Input directly?)
138 const compiler::AllocatedOperand& operand =
139 compiler::AllocatedOperand::cast(input.operand());
140 if (operand.IsRegister()) {
141 masm->Push(operand.GetRegister());
142 } else {
143 DCHECK(operand.IsStackSlot());
144 masm->LoadU64(r0, masm->GetStackSlot(operand));
145 masm->Push(r0);
146 }
147 }
148}
149
150template <typename T, typename... Args>
151inline void PushIterator(MaglevAssembler* masm, base::iterator_range<T> range,
152 Args... args) {
153 for (auto iter = range.begin(), end = range.end(); iter != end; ++iter) {
154 masm->Push(*iter);
155 }
156 PushAllHelper<Args...>::Push(masm, args...);
157}
158
159template <typename T, typename... Args>
160inline void PushIteratorReverse(MaglevAssembler* masm,
161 base::iterator_range<T> range, Args... args) {
162 PushAllHelper<Args...>::PushReverse(masm, args...);
163 for (auto iter = range.rbegin(), end = range.rend(); iter != end; ++iter) {
164 masm->Push(*iter);
165 }
166}
167
168template <typename... Args>
169struct PushAllHelper<Input, Args...> {
170 static void Push(MaglevAssembler* masm, const Input& arg, Args... args) {
171 PushInput(masm, arg);
173 }
174 static void PushReverse(MaglevAssembler* masm, const Input& arg,
175 Args... args) {
177 PushInput(masm, arg);
178 }
179};
180template <typename Arg, typename... Args>
181struct PushAllHelper<Arg, Args...> {
182 static void Push(MaglevAssembler* masm, Arg arg, Args... args) {
183 if constexpr (is_iterator_range<Arg>::value) {
184 PushIterator(masm, arg, args...);
185 } else {
186 masm->MacroAssembler::Push(arg);
188 }
189 }
190 static void PushReverse(MaglevAssembler* masm, Arg arg, Args... args) {
191 if constexpr (is_iterator_range<Arg>::value) {
192 PushIteratorReverse(masm, arg, args...);
193 } else {
195 masm->Push(arg);
196 }
197 }
198};
199
200} // namespace detail
201
202template <typename... T>
203void MaglevAssembler::Push(T... vals) {
204 detail::PushAllHelper<T...>::Push(this, vals...);
205}
206
207template <typename... T>
208void MaglevAssembler::PushReverse(T... vals) {
209 detail::PushAllHelper<T...>::PushReverse(this, vals...);
210}
211
212inline void MaglevAssembler::BindJumpTarget(Label* label) { bind(label); }
213
214inline void MaglevAssembler::BindBlock(BasicBlock* block) {
215 bind(block->label());
216}
217
218inline void MaglevAssembler::SmiTagInt32AndSetFlags(Register dst,
219 Register src) {
220 if (SmiValuesAre31Bits()) {
221 AddS32(dst, src, src);
222 } else {
223 SmiTag(dst, src);
224 }
225}
226
227inline void MaglevAssembler::CheckInt32IsSmi(Register obj, Label* fail,
228 Register scratch) {
230 if (scratch == Register::no_reg()) {
231 scratch = r0;
232 }
233 mov(scratch, obj);
234 AddS32(scratch, scratch);
235 JumpIf(kOverflow, fail);
236}
237
238inline void MaglevAssembler::SmiAddConstant(Register dst, Register src,
239 int value, Label* fail,
240 Label::Distance distance) {
241 AssertSmi(src);
242 Move(dst, src);
243 if (value != 0) {
244 Register scratch = r0;
245 Move(scratch, Smi::FromInt(value));
246 if (SmiValuesAre31Bits()) {
247 AddS32(dst, scratch);
248 } else {
249 AddS64(dst, scratch);
250 }
251 JumpIf(kOverflow, fail, distance);
252 }
253}
254
255inline void MaglevAssembler::SmiSubConstant(Register dst, Register src,
256 int value, Label* fail,
257 Label::Distance distance) {
258 AssertSmi(src);
259 Move(dst, src);
260 if (value != 0) {
261 Register scratch = r0;
262 Move(scratch, Smi::FromInt(value));
263 if (SmiValuesAre31Bits()) {
264 SubS32(dst, scratch);
265 } else {
266 SubS64(dst, scratch);
267 }
268 JumpIf(kOverflow, fail, distance);
269 }
270}
271
272inline void MaglevAssembler::MoveHeapNumber(Register dst, double value) {
273 mov(dst, Operand::EmbeddedNumber(value));
274}
275
277 RootIndex root_index) {
278 if (input.operand().IsRegister()) {
279 CompareRoot(ToRegister(input), root_index);
280 } else {
281 DCHECK(input.operand().IsStackSlot());
282 TemporaryRegisterScope temps(this);
283 Register scratch = temps.AcquireScratch();
284 LoadU64(scratch, ToMemOperand(input), scratch);
285 CompareRoot(scratch, root_index);
286 }
287 return eq;
288}
289
290inline MemOperand MaglevAssembler::StackSlotOperand(StackSlot slot) {
291 return MemOperand(fp, slot.index);
292}
293
294inline Register MaglevAssembler::GetFramePointer() { return fp; }
295
296// TODO(Victorgomes): Unify this to use StackSlot struct.
298 const compiler::AllocatedOperand& operand) {
300}
301
303 const compiler::InstructionOperand& operand) {
305}
306
307inline MemOperand MaglevAssembler::ToMemOperand(const ValueLocation& location) {
308 return ToMemOperand(location.operand());
309}
310
311inline void MaglevAssembler::BuildTypedArrayDataPointer(Register data_pointer,
312 Register object) {
313 DCHECK_NE(data_pointer, object);
315 data_pointer,
316 FieldMemOperand(object, JSTypedArray::kExternalPointerOffset));
317 if (JSTypedArray::kMaxSizeInHeap == 0) return;
318 // TemporaryRegisterScope temps(this);
319 Register base = r0;
321 LoadU32(base, FieldMemOperand(object, JSTypedArray::kBasePointerOffset));
322 } else {
323 LoadU64(base, FieldMemOperand(object, JSTypedArray::kBasePointerOffset));
324 }
325 AddU64(data_pointer, data_pointer, base);
326}
327
329 Register data_pointer, Register index, int element_size) {
330 // TemporaryRegisterScope temps(this);
331 Register temp = r0;
332 ShiftLeftU64(temp, index, Operand(ShiftFromScale(element_size)));
333 AddU64(data_pointer, data_pointer, temp);
334 return MemOperand(data_pointer);
335}
336
337inline MemOperand MaglevAssembler::DataViewElementOperand(Register data_pointer,
338 Register index) {
339 return MemOperand(data_pointer, index);
340}
341
343 Register object,
344 Register index, int scale,
345 int offset) {
346 TemporaryRegisterScope temps(this);
347 Register scratch = temps.AcquireScratch();
348 ShiftLeftU64(scratch, index, Operand(ShiftFromScale(scale)));
349 AddU64(scratch, scratch, object);
351}
352
354 Register object,
355 int offset) {
357}
358
360 MemOperand operand) {
361 Move(result, operand);
362}
363
364void MaglevAssembler::LoadFixedArrayElement(Register result, Register array,
365 Register index) {
366 if (v8_flags.debug_code) {
367 AssertObjectType(array, FIXED_ARRAY_TYPE, AbortReason::kUnexpectedValue);
369 AbortReason::kUnexpectedNegativeValue);
370 }
372 OFFSET_OF_DATA_START(FixedArray));
373}
374
376 Register result, Register object, int offset) {
377 TemporaryRegisterScope temps(this);
378 Register scratch = temps.AcquireScratch();
380 result, FieldMemOperand(object, offset), scratch);
381}
382
384 Register result, Register array, Register index) {
385 if (v8_flags.debug_code) {
386 AssertObjectType(array, FIXED_ARRAY_TYPE, AbortReason::kUnexpectedValue);
388 AbortReason::kUnexpectedNegativeValue);
389 }
390 int times_tagged_size = (kTaggedSize == 8) ? 3 : 2;
391 TemporaryRegisterScope temps(this);
392 Register scratch = temps.AcquireScratch();
393 Register scratch2 = temps.AcquireScratch();
394 ShiftLeftU64(scratch, index, Operand(times_tagged_size));
396 result, FieldMemOperand(array, scratch, OFFSET_OF_DATA_START(FixedArray)),
397 scratch2);
398}
399
401 Register array,
402 Register index) {
403 TemporaryRegisterScope temps(this);
404 Register scratch = temps.AcquireScratch();
405 if (v8_flags.debug_code) {
406 AssertObjectType(array, FIXED_DOUBLE_ARRAY_TYPE,
407 AbortReason::kUnexpectedValue);
409 AbortReason::kUnexpectedNegativeValue);
410 }
411 ShiftLeftU64(scratch, index, Operand(kDoubleSizeLog2));
412 LoadF64(result, FieldMemOperand(array, scratch,
413 OFFSET_OF_DATA_START(FixedDoubleArray)));
414}
415
417 Register array, Register index, DoubleRegister value) {
418 TemporaryRegisterScope temps(this);
419 Register scratch = temps.AcquireScratch();
420 ShiftLeftU64(scratch, index, Operand(kDoubleSizeLog2));
421 StoreF64(value, FieldMemOperand(array, scratch,
422 OFFSET_OF_DATA_START(FixedDoubleArray)));
423}
424
425inline void MaglevAssembler::LoadSignedField(Register result,
426 MemOperand operand, int size) {
427 if (size == 1) {
428 LoadS8(result, operand);
429 } else if (size == 2) {
430 LoadS16(result, operand);
431 } else {
432 DCHECK_EQ(size, 4);
433 LoadS32(result, operand);
434 }
435}
436
437inline void MaglevAssembler::LoadUnsignedField(Register result,
438 MemOperand operand, int size) {
439 if (size == 1) {
440 LoadU8(result, operand);
441 } else if (size == 2) {
442 LoadU16(result, operand);
443 } else {
444 DCHECK_EQ(size, 4);
445 LoadU32(result, operand);
446 }
447}
448
449inline void MaglevAssembler::SetSlotAddressForTaggedField(Register slot_reg,
450 Register object,
451 int offset) {
452 mov(slot_reg, object);
453 AddS64(slot_reg, Operand(offset - kHeapObjectTag));
454}
455
457 Register slot_reg, Register object, Register index) {
458 // TemporaryRegisterScope temps(this);
459 Register scratch = r0;
460 mov(slot_reg, object);
461 AddU64(slot_reg, Operand(OFFSET_OF_DATA_START(FixedArray) - kHeapObjectTag));
462 ShiftLeftU64(scratch, index, Operand(kTaggedSizeLog2));
463 AddU64(slot_reg, slot_reg, scratch);
464}
465
466inline void MaglevAssembler::StoreTaggedFieldNoWriteBarrier(Register object,
467 int offset,
468 Register value) {
470}
471
473 Register array, Register index, Register value) {
474 TemporaryRegisterScope temps(this);
475 Register scratch = temps.AcquireScratch();
476 ShiftLeftU64(scratch, index, Operand(kTaggedSizeLog2));
477 AddU64(scratch, scratch, array);
479 value, FieldMemOperand(scratch, OFFSET_OF_DATA_START(FixedArray)));
480}
481
482inline void MaglevAssembler::StoreTaggedSignedField(Register object, int offset,
483 Register value) {
484 AssertSmi(value);
486}
487
488inline void MaglevAssembler::StoreTaggedSignedField(Register object, int offset,
489 Tagged<Smi> value) {
490 TemporaryRegisterScope scope(this);
491 Register scratch = r0;
492 Move(scratch, value);
494}
495
496inline void MaglevAssembler::StoreInt32Field(Register object, int offset,
497 int32_t value) {
498 TemporaryRegisterScope scope(this);
499 Register scratch = r0;
500 Move(scratch, value);
501 StoreU32(scratch, FieldMemOperand(object, offset));
502}
503
504inline void MaglevAssembler::StoreField(MemOperand operand, Register value,
505 int size) {
506 DCHECK(size == 1 || size == 2 || size == 4);
507 if (size == 1) {
508 StoreU8(value, operand);
509 } else if (size == 2) {
510 StoreU16(value, operand);
511 } else {
512 DCHECK_EQ(size, 4);
513 StoreU32(value, operand);
514 }
515}
516
517inline void MaglevAssembler::ReverseByteOrder(Register value, int size) {
518 if (size == 2) {
519 lay(sp, MemOperand(sp, -kSystemPointerSize));
520 StoreU16(value, MemOperand(sp));
521 lrvh(value, MemOperand(sp));
522 LoadS16(value, value);
523 lay(sp, MemOperand(sp, kSystemPointerSize));
524 } else if (size == 4) {
525 lrvr(value, value);
526 LoadS32(value, value);
527 } else {
528 DCHECK_EQ(size, 1);
529 }
530}
531
532inline void MaglevAssembler::IncrementInt32(Register reg) {
533 AddS32(reg, Operand(1));
534}
535
536inline void MaglevAssembler::DecrementInt32(Register reg) {
537 SubS32(reg, Operand(1));
538}
539
540inline void MaglevAssembler::AddInt32(Register reg, int amount) {
541 AddS32(reg, Operand(amount));
542}
543
544inline void MaglevAssembler::AndInt32(Register reg, int mask) {
545 And(reg, Operand(mask));
546 LoadU32(reg, reg);
547}
548
549inline void MaglevAssembler::OrInt32(Register reg, int mask) {
550 Or(reg, Operand(mask));
551 LoadU32(reg, reg);
552}
553
554inline void MaglevAssembler::AndInt32(Register reg, Register other) {
555 And(reg, other);
556 LoadU32(reg, reg);
557}
558
559inline void MaglevAssembler::OrInt32(Register reg, Register other) {
560 Or(reg, other);
561 LoadU32(reg, reg);
562}
563
564inline void MaglevAssembler::ShiftLeft(Register reg, int amount) {
565 ShiftLeftU32(reg, reg, Operand(amount));
566}
567
568inline void MaglevAssembler::IncrementAddress(Register reg, int32_t delta) {
569 CHECK(is_int20(delta));
570 lay(reg, MemOperand(reg, delta));
571}
572
573inline void MaglevAssembler::LoadAddress(Register dst, MemOperand location) {
574 lay(dst, location);
575}
576
577inline void MaglevAssembler::Call(Label* target) {
578 MacroAssembler::Call(target);
579}
580
581inline void MaglevAssembler::EmitEnterExitFrame(int extra_slots,
582 StackFrame::Type frame_type,
583 Register c_function,
584 Register scratch) {
585 EnterExitFrame(scratch, extra_slots, frame_type);
586}
587
588inline void MaglevAssembler::Move(StackSlot dst, Register src) {
589 StoreU64(src, StackSlotOperand(dst));
590}
591inline void MaglevAssembler::Move(StackSlot dst, DoubleRegister src) {
592 StoreF64(src, StackSlotOperand(dst));
593}
594inline void MaglevAssembler::Move(Register dst, StackSlot src) {
595 LoadU64(dst, StackSlotOperand(src));
596}
597inline void MaglevAssembler::Move(DoubleRegister dst, StackSlot src) {
598 LoadF64(dst, StackSlotOperand(src));
599}
600inline void MaglevAssembler::Move(MemOperand dst, Register src) {
601 StoreU64(src, dst);
602}
603inline void MaglevAssembler::Move(Register dst, MemOperand src) {
604 LoadU64(dst, src);
605}
607 if (dst != src) {
608 MacroAssembler::Move(dst, src);
609 }
610}
611inline void MaglevAssembler::Move(Register dst, Tagged<Smi> src) {
612 MacroAssembler::Move(dst, src);
613}
614inline void MaglevAssembler::Move(Register dst, ExternalReference src) {
615 MacroAssembler::Move(dst, src);
616}
617inline void MaglevAssembler::Move(Register dst, Register src) {
618 if (dst != src) {
619 mov(dst, src);
620 }
621}
622inline void MaglevAssembler::Move(Register dst, Tagged<TaggedIndex> i) {
623 mov(dst, Operand(i.ptr()));
624}
625inline void MaglevAssembler::Move(Register dst, int32_t i) {
626 mov(dst, Operand(i));
627}
628inline void MaglevAssembler::Move(DoubleRegister dst, double n) {
629 TemporaryRegisterScope scope(this);
630 Register scratch = scope.AcquireScratch();
631 MacroAssembler::LoadF64(dst, n, scratch);
632}
633inline void MaglevAssembler::Move(DoubleRegister dst, Float64 n) {
634 TemporaryRegisterScope scope(this);
635 Register scratch = scope.AcquireScratch();
636 MacroAssembler::LoadF64(dst, n, scratch);
637}
638inline void MaglevAssembler::Move(Register dst, Handle<HeapObject> obj) {
639 MacroAssembler::Move(dst, obj);
640}
641
642inline void MaglevAssembler::Move(Register dst, uint32_t i) {
643 // Move as a uint32 to avoid sign extension.
644 mov(dst, Operand(i));
645 LoadU32(dst, dst);
646}
647
648void MaglevAssembler::MoveTagged(Register dst, Handle<HeapObject> obj) {
649#ifdef V8_COMPRESS_POINTERS
651#else
652 MacroAssembler::Move(dst, obj);
653#endif
654}
655
656inline void MaglevAssembler::LoadInt32(Register dst, MemOperand src) {
657 LoadU32(dst, src);
658}
659
660inline void MaglevAssembler::StoreInt32(MemOperand dst, Register src) {
661 StoreU32(src, dst);
662}
663
666}
668 MaglevAssembler::TemporaryRegisterScope temps(this);
669 DoubleRegister double_scratch = temps.AcquireScratchDouble();
670 ledbr(double_scratch, src);
671 MacroAssembler::StoreF32(double_scratch, dst);
672}
674 MacroAssembler::LoadF64(dst, src);
675}
677 MacroAssembler::StoreF64(src, dst);
678}
679
681 Register base,
682 Register index) {
683 LoadF64(dst, MemOperand(base, index));
684}
686 DoubleRegister dst, Register base, Register index) {
687 TemporaryRegisterScope scope(this);
688 Register scratch = r0;
689 LoadU64(scratch, MemOperand(base, index));
690 lrvgr(scratch, scratch);
691 ldgr(dst, scratch);
692}
693inline void MaglevAssembler::StoreUnalignedFloat64(Register base,
694 Register index,
695 DoubleRegister src) {
696 StoreF64(src, MemOperand(base, index));
697}
699 Register base, Register index, DoubleRegister src) {
700 TemporaryRegisterScope scope(this);
701 Register scratch = r0;
702 lgdr(scratch, src);
703 lrvgr(scratch, scratch);
704 StoreU64(scratch, MemOperand(base, index));
705}
706
707inline void MaglevAssembler::SignExtend32To64Bits(Register dst, Register src) {
708 // No 64-bit registers.
709 LoadS32(dst, src);
710}
711inline void MaglevAssembler::NegateInt32(Register val) {
712 LoadS32(val, val);
713 lcgr(val, val);
714}
715
716inline void MaglevAssembler::ToUint8Clamped(Register result,
717 DoubleRegister value, Label* min,
718 Label* max, Label* done) {
719 TemporaryRegisterScope temps(this);
720 DoubleRegister scratch = temps.AcquireScratchDouble();
722 CmpF64(kDoubleRegZero, value);
723 // Set to 0 if NaN.
724 JumpIf(Condition(CC_OF | ge), min);
725 LoadF64(scratch, 255.0, r0);
726 CmpF64(value, scratch);
727 JumpIf(ge, max);
728 // if value in [0, 255], then round up to the nearest.
730 Jump(done);
731}
732
733template <typename NodeT>
734inline void MaglevAssembler::DeoptIfBufferDetached(Register array,
735 Register scratch,
736 NodeT* node) {
737 // A detached buffer leads to megamorphic feedback, so we won't have a deopt
738 // loop if we deopt here.
739 LoadTaggedField(scratch,
740 FieldMemOperand(array, JSArrayBufferView::kBufferOffset));
741 LoadU32(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
742 tmll(scratch, Operand(JSArrayBuffer::WasDetachedBit::kMask));
743 EmitEagerDeoptIf(ne, DeoptimizeReason::kArrayBufferWasDetached, node);
744}
745
746inline void MaglevAssembler::LoadByte(Register dst, MemOperand src) {
747 LoadU8(dst, src);
748}
749
751 Register map, Register scratch) {
752 LoadU8(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
753 And(scratch, Operand(Map::Bits1::IsUndetectableBit::kMask |
754 Map::Bits1::IsCallableBit::kMask));
755 CmpU32(scratch, Operand(Map::Bits1::IsCallableBit::kMask));
756 return eq;
757}
758
760 Register map, Register scratch) {
761 tmy(FieldMemOperand(map, Map::kBitFieldOffset),
762 Operand(Map::Bits1::IsUndetectableBit::kMask |
763 Map::Bits1::IsCallableBit::kMask));
764 return eq;
765}
766
767inline void MaglevAssembler::LoadInstanceType(Register instance_type,
768 Register heap_object) {
769 LoadMap(instance_type, heap_object);
770 LoadU16(instance_type,
771 FieldMemOperand(instance_type, Map::kInstanceTypeOffset));
772}
773
774inline void MaglevAssembler::JumpIfObjectType(Register heap_object,
775 InstanceType type, Label* target,
776 Label::Distance distance) {
777 TemporaryRegisterScope temps(this);
778 Register scratch = temps.AcquireScratch();
779 CompareObjectType(heap_object, scratch, scratch, type);
780 JumpIf(kEqual, target, distance);
781}
782
783inline void MaglevAssembler::JumpIfNotObjectType(Register heap_object,
784 InstanceType type,
785 Label* target,
786 Label::Distance distance) {
787 TemporaryRegisterScope temps(this);
788 Register scratch = temps.AcquireScratch();
789 CompareObjectType(heap_object, scratch, scratch, type);
790 JumpIf(kNotEqual, target, distance);
791}
792
793inline void MaglevAssembler::AssertObjectType(Register heap_object,
794 InstanceType type,
795 AbortReason reason) {
796 TemporaryRegisterScope temps(this);
797 Register scratch = temps.AcquireScratch();
798 AssertNotSmi(heap_object);
799 CompareObjectType(heap_object, scratch, scratch, type);
800 Assert(kEqual, reason);
801}
802
804 Register heap_object, InstanceType type, Label* if_true,
805 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
806 Label::Distance false_distance, bool fallthrough_when_false) {
807 TemporaryRegisterScope temps(this);
808 Register scratch = temps.AcquireScratch();
809 CompareObjectType(heap_object, scratch, scratch, type);
810 Branch(kEqual, if_true, true_distance, fallthrough_when_true, if_false,
811 false_distance, fallthrough_when_false);
812}
813
814inline void MaglevAssembler::JumpIfObjectTypeInRange(Register heap_object,
815 InstanceType lower_limit,
816 InstanceType higher_limit,
817 Label* target,
818 Label::Distance distance) {
819 TemporaryRegisterScope temps(this);
820 Register scratch = temps.AcquireScratch();
821 CompareObjectTypeRange(heap_object, scratch, scratch, scratch, lower_limit,
822 higher_limit);
823 JumpIf(kUnsignedLessThanEqual, target, distance);
824}
825
827 Register heap_object, InstanceType lower_limit, InstanceType higher_limit,
828 Label* target, Label::Distance distance) {
829 TemporaryRegisterScope temps(this);
830 Register scratch = temps.AcquireScratch();
831 CompareObjectTypeRange(heap_object, scratch, scratch, scratch, lower_limit,
832 higher_limit);
833 JumpIf(kUnsignedGreaterThan, target, distance);
834}
835
836inline void MaglevAssembler::AssertObjectTypeInRange(Register heap_object,
837 InstanceType lower_limit,
838 InstanceType higher_limit,
839 AbortReason reason) {
840 TemporaryRegisterScope temps(this);
841 Register scratch = temps.AcquireScratch();
842 AssertNotSmi(heap_object);
843 CompareObjectTypeRange(heap_object, scratch, scratch, scratch, lower_limit,
844 higher_limit);
846}
847
849 Register heap_object, InstanceType lower_limit, InstanceType higher_limit,
850 Label* if_true, Label::Distance true_distance, bool fallthrough_when_true,
851 Label* if_false, Label::Distance false_distance,
852 bool fallthrough_when_false) {
853 TemporaryRegisterScope temps(this);
854 Register scratch = temps.AcquireScratch();
855 CompareObjectTypeRange(heap_object, scratch, scratch, scratch, lower_limit,
856 higher_limit);
857 Branch(kUnsignedLessThanEqual, if_true, true_distance, fallthrough_when_true,
858 if_false, false_distance, fallthrough_when_false);
859}
860
862 Register heap_object, Label* target, Label::Distance distance) {
863 // If the type of the result (stored in its map) is less than
864 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
865 static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
866 TemporaryRegisterScope temps(this);
867 Register scratch = temps.AcquireScratch();
868 MacroAssembler::CompareObjectType<true>(heap_object, scratch, scratch,
869 FIRST_JS_RECEIVER_TYPE);
870 JumpIf(ge, target, distance);
871}
872
873inline void MaglevAssembler::CompareMapWithRoot(Register object,
874 RootIndex index,
875 Register scratch) {
876 LoadMap(scratch, object);
877 CompareRoot(scratch, index);
878}
879
880inline void MaglevAssembler::CompareInstanceType(Register map,
881 InstanceType instance_type) {
882 TemporaryRegisterScope temps(this);
883 Register scratch = temps.AcquireScratch();
884 MacroAssembler::CompareInstanceType(map, scratch, instance_type);
885}
886
888 Register map, Register instance_type_out, InstanceType lower_limit,
889 InstanceType higher_limit) {
890 TemporaryRegisterScope temps(this);
891 Register scratch = temps.AcquireScratch();
892 MacroAssembler::CompareInstanceTypeRange(map, instance_type_out, scratch,
893 lower_limit, higher_limit);
895}
896
898 DoubleRegister src1, DoubleRegister src2, Condition cond, Label* target,
899 Label* nan_failed, Label::Distance distance) {
900 CmpF64(src1, src2);
901 JumpIf(CC_OF, nan_failed);
902 JumpIf(cond, target, distance);
903}
904
906 DoubleRegister src1, DoubleRegister src2, Condition cond,
907 BasicBlock* if_true, BasicBlock* if_false, BasicBlock* next_block,
908 BasicBlock* nan_failed) {
909 CmpF64(src1, src2);
910 JumpIf(CC_OF, nan_failed->label());
911 Branch(cond, if_true, if_false, next_block);
912}
913
914inline void MaglevAssembler::PrepareCallCFunction(int num_reg_arguments,
915 int num_double_registers) {
916 TemporaryRegisterScope temps(this);
917 Register scratch = temps.AcquireScratch();
918 MacroAssembler::PrepareCallCFunction(num_reg_arguments, num_double_registers,
919 scratch);
920}
921
922inline void MaglevAssembler::CallSelf() {
923 DCHECK(code_gen_state()->entry_label()->is_bound());
924 Call(code_gen_state()->entry_label());
925}
926
927inline void MaglevAssembler::Jump(Label* target, Label::Distance) {
928 // Any eager deopts should go through JumpIf to enable us to support the
929 // `--deopt-every-n-times` stress mode. See EmitEagerDeoptStress.
930 DCHECK(!IsDeoptLabel(target));
931 b(target);
932}
933
934inline void MaglevAssembler::JumpToDeopt(Label* target) {
935 DCHECK(IsDeoptLabel(target));
936 b(target);
937}
938
939inline void MaglevAssembler::EmitEagerDeoptStress(Label* target) {
940 // TODO(olivf): On arm `--deopt-every-n-times` is currently not supported.
941 // Supporting it would require to implement this method, additionally handle
942 // deopt branches in Cbz, and handle all cases where we fall through to the
943 // deopt branch (like Int32Divide).
944}
945
946inline void MaglevAssembler::JumpIf(Condition cond, Label* target,
948 b(to_condition(cond), target);
949}
950
951inline void MaglevAssembler::JumpIfRoot(Register with, RootIndex index,
952 Label* if_equal,
953 Label::Distance distance) {
954 MacroAssembler::JumpIfRoot(with, index, if_equal);
955}
956
957inline void MaglevAssembler::JumpIfNotRoot(Register with, RootIndex index,
958 Label* if_not_equal,
959 Label::Distance distance) {
960 MacroAssembler::JumpIfNotRoot(with, index, if_not_equal);
961}
962
963inline void MaglevAssembler::JumpIfSmi(Register src, Label* on_smi,
964 Label::Distance distance) {
965 MacroAssembler::JumpIfSmi(src, on_smi);
966}
967
968inline void MaglevAssembler::JumpIfNotSmi(Register src, Label* on_smi,
969 Label::Distance distance) {
970 MacroAssembler::JumpIfNotSmi(src, on_smi);
971}
972
973void MaglevAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
974 Label* target, Label::Distance) {
975 MaglevAssembler::TemporaryRegisterScope temps(this);
976 Register scratch = r0;
977 mov(scratch, Operand(byte));
978 LoadS8(scratch, scratch);
979 if (is_signed(cc)) {
980 CmpS32(value, scratch);
981 } else {
982 CmpU32(value, scratch);
983 }
984 b(to_condition(cc), target);
985}
986
987void MaglevAssembler::JumpIfHoleNan(DoubleRegister value, Register scratch,
988 Label* target, Label::Distance distance) {
989 // TODO(leszeks): Right now this only accepts Zone-allocated target labels.
990 // This works because all callsites are jumping to either a deopt, deferred
991 // code, or a basic block. If we ever need to jump to an on-stack label, we
992 // have to add support for it here change the caller to pass a ZoneLabelRef.
993 DCHECK(compilation_info()->zone()->Contains(target));
994 ZoneLabelRef is_hole = ZoneLabelRef::UnsafeFromLabelPointer(target);
995 ZoneLabelRef is_not_hole(this);
996 CmpF64(value, value);
999 [](MaglevAssembler* masm, DoubleRegister value, Register scratch,
1000 ZoneLabelRef is_hole, ZoneLabelRef is_not_hole) {
1001 masm->lgdr(scratch, value);
1002 masm->ShiftRightU64(scratch, scratch, Operand(32));
1003 masm->CompareInt32AndJumpIf(scratch, kHoleNanUpper32, kEqual,
1004 *is_hole);
1005 masm->Jump(*is_not_hole);
1006 },
1007 value, scratch, is_hole, is_not_hole));
1008 bind(*is_not_hole);
1009}
1010
1011void MaglevAssembler::JumpIfNotHoleNan(DoubleRegister value, Register scratch,
1012 Label* target,
1013 Label::Distance distance) {
1014 MaglevAssembler::TemporaryRegisterScope temps(this);
1015 CmpF64(value, value);
1016 JumpIf(ordered, target, distance);
1017
1018 lgdr(scratch, value);
1019 ShiftRightU64(scratch, scratch, Operand(32));
1020 CompareInt32AndJumpIf(scratch, kHoleNanUpper32, kNotEqual, target, distance);
1021}
1022
1023void MaglevAssembler::JumpIfNotHoleNan(MemOperand operand, Label* target,
1024 Label::Distance distance) {
1025 MaglevAssembler::TemporaryRegisterScope temps(this);
1026 Register scratch = r0;
1027 mov(scratch, Operand(kHoleNanInt64));
1028 CmpU32(scratch, operand);
1029 JumpIf(ne, target, distance);
1030
1031 LoadU64(scratch, operand);
1032 ShiftRightU64(scratch, scratch, Operand(32));
1033 CompareInt32AndJumpIf(scratch, kHoleNanUpper32, kNotEqual, target, distance);
1034}
1035
1036void MaglevAssembler::JumpIfNan(DoubleRegister value, Label* target,
1037 Label::Distance distance) {
1038 CmpF64(value, value);
1039 JumpIf(unordered, target, distance);
1040}
1041
1042void MaglevAssembler::JumpIfNotNan(DoubleRegister value, Label* target,
1043 Label::Distance distance) {
1044 CmpF64(value, value);
1045 JumpIf(ordered, target, distance);
1046}
1047
1048void MaglevAssembler::CompareIntPtrAndJumpIf(Register r1, Register r2,
1049 Condition cond, Label* target,
1050 Label::Distance distance) {
1051 if (is_signed(cond)) {
1052 CmpS64(r1, r2);
1053 } else {
1054 CmpU64(r1, r2);
1055 }
1056 b(to_condition(cond), target);
1057}
1058
1059inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, Register r2,
1060 Condition cond,
1061 Label* target,
1062 Label::Distance distance) {
1063 if (is_signed(cond)) {
1064 CmpS32(r1, r2);
1065 } else {
1066 CmpU32(r1, r2);
1067 }
1068 b(to_condition(cond), target);
1069}
1070
1071inline void MaglevAssembler::CompareIntPtrAndJumpIf(Register r1, int32_t value,
1072 Condition cond,
1073 Label* target,
1074 Label::Distance distance) {
1075 if (is_signed(cond)) {
1076 CmpS64(r1, Operand(value));
1077 } else {
1078 CmpU64(r1, Operand(value));
1079 }
1080 b(to_condition(cond), target);
1081}
1082
1083inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, int32_t value,
1084 Condition cond,
1085 Label* target,
1086 Label::Distance distance) {
1087 if (is_signed(cond)) {
1088 CmpS32(r1, Operand(value));
1089 } else {
1090 CmpU32(r1, Operand(value));
1091 }
1092 JumpIf(cond, target);
1093}
1094
1095inline void MaglevAssembler::CompareInt32AndAssert(Register r1, Register r2,
1096 Condition cond,
1097 AbortReason reason) {
1098 if (is_signed(cond)) {
1099 CmpS32(r1, r2);
1100 } else {
1101 CmpU32(r1, r2);
1102 }
1103 Assert(to_condition(cond), reason);
1104}
1105
1106inline void MaglevAssembler::CompareInt32AndAssert(Register r1, int32_t value,
1107 Condition cond,
1108 AbortReason reason) {
1109 if (is_signed(cond)) {
1110 CmpS32(r1, Operand(value));
1111 } else {
1112 CmpU32(r1, Operand(value));
1113 }
1114 Assert(to_condition(cond), reason);
1115}
1116
1118 Register r1, int32_t value, Condition cond, Label* if_true,
1119 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1120 Label::Distance false_distance, bool fallthrough_when_false) {
1121 if (is_signed(cond)) {
1122 CmpS32(r1, Operand(value));
1123 } else {
1124 CmpU32(r1, Operand(value));
1125 }
1126 Branch(to_condition(cond), if_true, true_distance, fallthrough_when_true,
1127 if_false, false_distance, fallthrough_when_false);
1128}
1129
1131 Register r1, Register r2, Condition cond, Label* if_true,
1132 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1133 Label::Distance false_distance, bool fallthrough_when_false) {
1134 if (is_signed(cond)) {
1135 CmpS32(r1, r2);
1136 } else {
1137 CmpU32(r1, r2);
1138 }
1139 Branch(to_condition(cond), if_true, true_distance, fallthrough_when_true,
1140 if_false, false_distance, fallthrough_when_false);
1141}
1142
1144 Register r1, int32_t value, Condition cond, Label* if_true,
1145 Label::Distance true_distance, bool fallthrough_when_true, Label* if_false,
1146 Label::Distance false_distance, bool fallthrough_when_false) {
1147 if (is_signed(cond)) {
1148 CmpS64(r1, Operand(value));
1149 } else {
1150 CmpU64(r1, Operand(value));
1151 }
1152 Branch(to_condition(cond), if_true, true_distance, fallthrough_when_true,
1153 if_false, false_distance, fallthrough_when_false);
1154}
1155
1156inline void MaglevAssembler::CompareSmiAndJumpIf(Register r1, Tagged<Smi> value,
1157 Condition cond, Label* target,
1158 Label::Distance distance) {
1159 CmpSmiLiteral(r1, value, r0);
1160 JumpIf(cond, target);
1161}
1162
1163inline void MaglevAssembler::CompareSmiAndAssert(Register r1, Tagged<Smi> value,
1164 Condition cond,
1165 AbortReason reason) {
1166 if (!v8_flags.debug_code) return;
1167 AssertSmi(r1);
1168 CompareTagged(r1, Operand(value));
1169 Assert(cond, reason);
1170}
1171
1172inline void MaglevAssembler::CompareByteAndJumpIf(MemOperand left, int8_t right,
1173 Condition cond,
1174 Register scratch,
1175 Label* target,
1176 Label::Distance distance) {
1177 MaglevAssembler::TemporaryRegisterScope temps(this);
1178 Register scratch2 = r0;
1179 LoadS8(scratch, left);
1180 mov(scratch2, Operand(right));
1181 LoadS8(scratch2, scratch2);
1182 CmpS32(scratch, scratch2);
1183 CHECK(is_signed(cond));
1184 JumpIf(cond, target, distance);
1185}
1186
1187inline void MaglevAssembler::CompareTaggedAndJumpIf(Register reg,
1188 Tagged<Smi> value,
1189 Condition cond,
1190 Label* target,
1191 Label::Distance distance) {
1193 CmpSmiLiteral(reg, value, r0);
1194 } else {
1195 Move(r0, value);
1196 CmpS64(reg, r0);
1197 }
1198 JumpIf(cond, target);
1199}
1200
1201inline void MaglevAssembler::CompareTaggedAndJumpIf(Register reg,
1202 Handle<HeapObject> obj,
1203 Condition cond,
1204 Label* target,
1205 Label::Distance distance) {
1206 MaglevAssembler::TemporaryRegisterScope temps(this);
1207 Register scratch = r0;
1208 MacroAssembler::Move(scratch, obj,
1212 CmpTagged(reg, scratch);
1213 b(to_condition(cond), target);
1214}
1215
1216inline void MaglevAssembler::CompareTaggedAndJumpIf(Register src1,
1217 Register src2,
1218 Condition cond,
1219 Label* target,
1220 Label::Distance distance) {
1221 CmpTagged(src1, src2);
1222 JumpIf(cond, target, distance);
1223}
1224
1226 DoubleRegister reg, Label* target, Label::Distance distance) {
1229 JumpIf(eq, target);
1230 JumpIf(CC_OF, target); // NaN check
1231}
1232
1234 MemOperand operand, Label* target, Label::Distance distance) {
1236 CmpF64(kDoubleRegZero, operand);
1237 JumpIf(eq, target);
1238 JumpIf(CC_OF, target); // NaN check
1239}
1240
1242 Register value, int32_t mask, Label* target, Label::Distance distance) {
1243 And(r0, value, Operand(mask));
1244 bne(target);
1245}
1246
1248 MemOperand operand, int32_t mask, Label* target, Label::Distance distance) {
1249 LoadU32(r0, operand);
1250 And(r0, Operand(mask));
1251 bne(target);
1252}
1253
1255 MemOperand operand, uint8_t mask, Label* target, Label::Distance distance) {
1256 tmy(operand, Operand(mask));
1257 bne(target, distance);
1258}
1259
1261 Register value, int32_t mask, Label* target, Label::Distance distance) {
1262 And(r0, value, Operand(mask));
1263 beq(target);
1264}
1265
1267 MemOperand operand, int32_t mask, Label* target, Label::Distance distance) {
1268 LoadU32(r0, operand);
1269 And(r0, Operand(mask));
1270 beq(target);
1271}
1272
1274 MemOperand operand, uint8_t mask, Label* target, Label::Distance distance) {
1275 tmy(operand, Operand(mask));
1276 beq(target, distance);
1277}
1278
1280 Register heap_number) {
1281 LoadF64(result, FieldMemOperand(heap_number, offsetof(HeapNumber, value_)));
1282}
1283
1284inline void MaglevAssembler::LoadHeapInt32Value(Register result,
1285 Register heap_number) {
1287 FieldMemOperand(heap_number, offsetof(HeapNumber, value_) +
1289}
1290
1291inline void MaglevAssembler::StoreHeapInt32Value(Register value,
1292 Register heap_number) {
1293 StoreU32(value,
1294 FieldMemOperand(heap_number, offsetof(HeapNumber, value_) +
1296}
1297
1299 Register src) {
1301}
1302
1304 Register src) {
1306}
1307
1309 Register src) {
1311}
1312
1313inline void MaglevAssembler::Pop(Register dst) { pop(dst); }
1314
1316 if (v8_flags.slow_debug_code) {
1317 mov(r0, sp);
1320 CmpU64(r0, fp);
1321 Assert(eq, AbortReason::kStackAccessBelowStackPointer);
1322 }
1323}
1324
1326 int stack_check_offset) {
1327 TemporaryRegisterScope temps(this);
1328 Register interrupt_stack_limit = temps.AcquireScratch();
1330
1331 Register stack_cmp_reg = sp;
1332 if (stack_check_offset >= kStackLimitSlackForDeoptimizationInBytes) {
1333 stack_cmp_reg = r0;
1334 mov(stack_cmp_reg, sp);
1335 lay(stack_cmp_reg, MemOperand(stack_cmp_reg, -stack_check_offset));
1336 }
1337 CmpU64(stack_cmp_reg, interrupt_stack_limit);
1338 return ge;
1339}
1340
1341inline void MaglevAssembler::FinishCode() {}
1342
1343template <typename NodeT>
1345 NodeT* node) {
1346 EmitEagerDeoptIf(ne, reason, node);
1347}
1348
1349template <>
1351 Register src) {
1352 Move(dst, src);
1353}
1354template <>
1356 MemOperand src) {
1357 switch (repr) {
1359 return LoadU32(dst, src);
1364 return LoadU64(dst, src);
1365 default:
1366 UNREACHABLE();
1367 }
1368}
1369template <>
1371 MemOperand dst, Register src) {
1372 switch (repr) {
1374 return StoreU32(src, dst);
1378 return StoreU64(src, dst);
1379 default:
1380 UNREACHABLE();
1381 }
1382}
1383template <>
1385 MemOperand dst, MemOperand src) {
1386 TemporaryRegisterScope temps(this);
1387 Register scratch = temps.AcquireScratch();
1388 MoveRepr(repr, scratch, src);
1389 MoveRepr(repr, dst, scratch);
1390}
1391
1393 // Implemented only for x64.
1394}
1395
1396} // namespace maglev
1397} // namespace internal
1398} // namespace v8
1399
1400#endif // V8_MAGLEV_S390_MAGLEV_ASSEMBLER_S390_INL_H_
#define T
#define Assert(condition)
interpreter::OperandScale scale
Definition builtins.cc:44
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
static VfpRegList DefaultFPTmpList()
void bne(Register rj, Register rd, int32_t offset)
void lzdr(DoubleRegister r1)
void ledbr(R1 r1, R2 r2)
static RegList DefaultTmpList()
void beq(Register rj, Register rd, int32_t offset)
static constexpr size_t kMaxSizeInHeap
void JumpIfRoot(Register with, RootIndex index, Label *if_equal)
void LoadStackLimit(Register destination, StackLimitKind kind)
void ConvertIntToDouble(Register src, DoubleRegister dst)
void Call(Register target, Condition cond=al)
void LoadF32AsF64(DoubleRegister dst, const MemOperand &opnd)
void LoadU8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void mov(Register rd, Register rj)
void CompareTagged(Register src1, Register src2, CRegister cr=cr0)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal)
void StoreF64(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void CompareRoot(Register obj, RootIndex index)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void Move(Register dst, Tagged< Smi > smi)
void JumpIfSmi(Register value, Label *smi_label)
void ShiftRightU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void LoadU16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void LoadS32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void CmpU32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void SmiTag(Register reg, SBit s=LeaveCC)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void AddS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void SubS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void StoreF32(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void ShiftLeftU32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void CmpU64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadF64(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void AddS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
void StoreU32(Register src, const MemOperand &mem, Register scratch)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void LoadU32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst)
void LoadS8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void CompareObjectTypeRange(Register heap_object, Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void ConvertDoubleToInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void SubS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void ShiftLeftU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void CmpS64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void CmpTagged(const Register &r1, const Register &r2)
void Or(Register dst, Register src)
void CmpS32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void StoreU64(Register src, const MemOperand &mem, Register scratch=no_reg)
void StoreU16(Register src, const MemOperand &mem, Register scratch)
void StoreU8(Register src, const MemOperand &mem, Register scratch)
void AddU64(Register dst, const Operand &imm)
void CompareInstanceTypeRange(Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadU64(Register dst, const MemOperand &mem, Register scratch=no_reg)
void CmpF64(DoubleRegister src1, DoubleRegister src2)
void CmpSmiLiteral(Register src1, Tagged< Smi > smi, Register scratch, CRegister cr=cr0)
void LoadMap(Register destination, Register object)
void ConvertInt64ToDouble(Register src, DoubleRegister double_dst)
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand)
void LoadS16(Register dst, const MemOperand &mem, Register scratch=no_reg)
static Operand EmbeddedNumber(double number)
constexpr bool has(RegisterT reg) const
static constexpr Register no_reg()
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
void SetAvailableDoubleRegList(DoubleRegList available)
void Include(const Register &reg1, const Register &reg2=no_reg)
static LocationOperand * cast(InstructionOperand *op)
TemporaryRegisterScope(MaglevAssembler *masm, const SavedData &saved_data)
void CompareMapWithRoot(Register object, RootIndex index, Register scratch)
void LoadFixedArrayElement(Register result, Register array, Register index)
void CompareInstanceType(Register map, InstanceType instance_type)
void SmiAddConstant(Register dst, Register src, int value, Label *fail, Label::Distance distance=Label::kFar)
void JumpIfByte(Condition cc, Register value, int32_t byte, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotNan(DoubleRegister value, Label *target, Label::Distance distance=Label::kFar)
Condition IsNotCallableNorUndetactable(Register map, Register scratch)
void JumpIfRoot(Register with, RootIndex index, Label *if_equal, Label::Distance distance=Label::kFar)
void ToUint8Clamped(Register result, DoubleRegister value, Label *min, Label *max, Label *done)
Condition IsCallableAndNotUndetectable(Register map, Register scratch)
void LoadFloat32(DoubleRegister dst, MemOperand src)
void JumpIfNotObjectType(Register heap_object, InstanceType type, Label *target, Label::Distance distance=Label::kFar)
void ReverseByteOrderAndStoreUnalignedFloat64(Register base, Register index, DoubleRegister src)
void IntPtrToDouble(DoubleRegister result, Register src)
void Branch(Condition condition, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
MemOperand GetStackSlot(const compiler::AllocatedOperand &operand)
void StoreField(MemOperand operand, Register value, int element_size)
void LoadSignedField(Register result, MemOperand operand, int element_size)
void JumpIfSmi(Register src, Label *on_smi, Label::Distance near_jump=Label::kFar)
MaglevAssembler(Isolate *isolate, Zone *zone, MaglevCodeGenState *code_gen_state)
void TestUint8AndJumpIfAllClear(MemOperand operand, uint8_t mask, Label *target, Label::Distance distance=Label::kFar)
void AssertObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, AbortReason reason)
void CompareInt32AndAssert(Register r1, Register r2, Condition cond, AbortReason reason)
void CompareDoubleAndJumpIfZeroOrNaN(DoubleRegister reg, Label *target, Label::Distance distance=Label::kFar)
void LoadFixedDoubleArrayElement(DoubleRegister result, Register array, Register index)
void LoadUnsignedField(Register result, MemOperand operand, int element_size)
void JumpIfObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *target, Label::Distance distance=Label::kFar)
void CheckInt32IsSmi(Register obj, Label *fail, Register scratch=Register::no_reg())
void ReverseByteOrder(Register value, int element_size)
Condition FunctionEntryStackCheck(int stack_check_offset)
void LoadHeapNumberValue(DoubleRegister result, Register heap_number)
void Jump(Label *target, Label::Distance distance=Label::kFar)
MemOperand DataViewElementOperand(Register data_pointer, Register index)
void StoreTaggedSignedField(Register object, int offset, Register value)
void CompareSmiAndJumpIf(Register r1, Tagged< Smi > value, Condition cond, Label *target, Label::Distance distance=Label::kFar)
TemporaryRegisterScope * scratch_register_scope() const
void TestUint8AndJumpIfAnySet(MemOperand operand, uint8_t mask, Label *target, Label::Distance distance=Label::kFar)
void MoveRepr(MachineRepresentation repr, Dest dst, Source src)
void StoreFixedDoubleArrayElement(Register array, Register index, DoubleRegister value)
void SmiTagInt32AndSetFlags(Register dst, Register src)
void LoadInstanceType(Register instance_type, Register heap_object)
void StoreHeapInt32Value(Register value, Register heap_number)
void LoadInt32(Register dst, MemOperand src)
void StoreFixedArrayElementNoWriteBarrier(Register array, Register index, Register value)
void LoadAddress(Register dst, MemOperand location)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Label *target, Label::Distance distance=Label::kFar)
void LoadBoundedSizeFromObject(Register result, Register object, int offset)
void SetSlotAddressForTaggedField(Register slot_reg, Register object, int offset)
void CompareFloat64AndBranch(DoubleRegister src1, DoubleRegister src2, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block, BasicBlock *nan_failed)
void CompareTaggedAndJumpIf(Register reg, Tagged< Smi > smi, Condition cond, Label *target, Label::Distance distance=Label::kFar)
void StoreInt32Field(Register object, int offset, int32_t value)
void BranchOnObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *if_true, Label::Distance true_distance, bool fallthrough_when_true, Label *if_false, Label::Distance false_distance, bool fallthrough_when_false)
void LoadExternalPointerField(Register result, MemOperand operand)
void StoreInt32(MemOperand dst, Register src)
void BuildTypedArrayDataPointer(Register data_pointer, Register object)
void JumpIfObjectTypeNotInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotRoot(Register with, RootIndex index, Label *if_not_equal, Label::Distance distance=Label::kFar)
void EmitEnterExitFrame(int extra_slots, StackFrame::Type frame_type, Register c_function, Register scratch)
void BranchOnObjectType(Register heap_object, InstanceType type, Label *if_true, Label::Distance true_distance, bool fallthrough_when_true, Label *if_false, Label::Distance false_distance, bool fallthrough_when_false)
void LoadHeapInt32Value(Register result, Register heap_number)
void CompareInstanceTypeRange(Register map, InstanceType lower_limit, InstanceType higher_limit)
void Move(StackSlot dst, Register src)
void SignExtend32To64Bits(Register dst, Register src)
void LoadFixedArrayElementWithoutDecompressing(Register result, Register array, Register index)
void LoadUnalignedFloat64AndReverseByteOrder(DoubleRegister dst, Register base, Register index)
void IncrementAddress(Register reg, int32_t delta)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void JumpIfNan(DoubleRegister value, Label *target, Label::Distance distance=Label::kFar)
void JumpIfNotSmi(Register src, Label *on_not_smi, Label::Distance near_jump=Label::kFar)
MemOperand TypedArrayElementOperand(Register data_pointer, Register index, int element_size)
MaglevCompilationInfo * compilation_info() const
void TestInt32AndJumpIfAllClear(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
int GetFramePointerOffsetForStackSlot(const compiler::AllocatedOperand &operand)
void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
MaglevCodeGenState * code_gen_state() const
void SetSlotAddressForFixedArrayElement(Register slot_reg, Register object, Register index)
void LoadTaggedFieldByIndex(Register result, Register object, Register index, int scale, int offset)
void CompareIntPtrAndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
void CompareInt32AndBranch(Register r1, int32_t value, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void CompareSmiAndAssert(Register r1, Tagged< Smi > value, Condition cond, AbortReason reason)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void LoadFloat64(DoubleRegister dst, MemOperand src)
void LoadUnalignedFloat64(DoubleRegister dst, Register base, Register index)
void LoadTaggedFieldWithoutDecompressing(Register result, Register object, int offset)
Condition IsRootConstant(Input input, RootIndex root_index)
void StoreFloat32(MemOperand dst, DoubleRegister src)
void EmitEagerDeoptIf(Condition cond, DeoptimizeReason reason, NodeT *node)
void StoreFloat64(MemOperand dst, DoubleRegister src)
void EmitEagerDeoptIfNotEqual(DeoptimizeReason reason, NodeT *node)
void StoreTaggedFieldNoWriteBarrier(Register object, int offset, Register value)
MemOperand ToMemOperand(const compiler::InstructionOperand &operand)
void MoveHeapNumber(Register dst, double value)
void LoadTaggedField(Register result, MemOperand operand)
void LoadByte(Register dst, MemOperand src)
void MoveTagged(Register dst, Handle< HeapObject > obj)
void JumpIfNotHoleNan(DoubleRegister value, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void DeoptIfBufferDetached(Register array, Register scratch, NodeT *node)
void StoreUnalignedFloat64(Register base, Register index, DoubleRegister src)
void TestInt32AndJumpIfAnySet(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
void JumpIfHoleNan(DoubleRegister value, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void Uint32ToDouble(DoubleRegister result, Register src)
void CompareFloat64AndJumpIf(DoubleRegister src1, DoubleRegister src2, Condition cond, Label *target, Label *nan_failed, Label::Distance distance=Label::kFar)
void CompareIntPtrAndBranch(Register r1, int32_t value, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void Int32ToDouble(DoubleRegister result, Register src)
void JumpIfObjectType(Register heap_object, InstanceType type, Label *target, Label::Distance distance=Label::kFar)
void AssertObjectType(Register heap_object, InstanceType type, AbortReason reason)
void SmiSubConstant(Register dst, Register src, int value, Label *fail, Label::Distance distance=Label::kFar)
void CompareByteAndJumpIf(MemOperand left, int8_t right, Condition cond, Register scratch, Label *target, Label::Distance distance=Label::kFar)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0)
static int TemporaryCount(size_t map_count)
MapCompare(MaglevAssembler *masm, Register object, size_t map_count)
void Generate(Handle< Map > map, Condition cond, Label *if_true, Label::Distance distance=Label::kFar)
static ZoneLabelRef UnsafeFromLabelPointer(Label *label)
Register const object_
Register const value_
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
const MapRef map_
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
Label label
int32_t offset
ZoneVector< RpoNumber > & result
LiftoffRegister reg
uint32_t const mask
base::SmallVector< int32_t, 1 > stack_slots
MaglevAssembler *const masm_
FloatWithBits< 64 > Float64
Definition index.h:234
void PushIterator(MaglevAssembler *masm, base::iterator_range< T > range, Args... args)
void PushIteratorReverse(MaglevAssembler *masm, base::iterator_range< T > range, Args... args)
void PushInput(MaglevAssembler *masm, const Input &input)
constexpr Condition ConditionFor(Operation operation)
Register ToRegister(const compiler::InstructionOperand &operand)
constexpr Condition ConditionForFloat64(Operation operation)
NodeTMixin< Node, Derived > NodeT
Definition maglev-ir.h:2858
uint32_t WasmInterpreterRuntime int64_t r0
constexpr VFPRoundingMode kRoundToNearest
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr int kTaggedSize
Definition globals.h:542
DwVfpRegister DoubleRegister
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr uint64_t kHoleNanInt64
Definition globals.h:1960
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int kStackLimitSlackForDeoptimizationInBytes
Definition globals.h:213
constexpr int kTaggedSizeLog2
Definition globals.h:543
constexpr bool SmiValuesAre31Bits()
const int kHeapObjectTag
Definition v8-internal.h:72
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
constexpr uint32_t kHoleNanUpper32
Definition globals.h:1952
return value
Definition map-inl.h:893
constexpr int kDoubleSizeLog2
Definition globals.h:421
constexpr int kIeeeDoubleMantissaWordOffset
Definition globals.h:1762
Condition to_condition(Condition cond)
bool is_signed(Condition cond)
Operation
Definition operation.h:43
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
static void Push(MaglevAssembler *masm, Arg arg, Args... args)
static void PushReverse(MaglevAssembler *masm, Arg arg, Args... args)
static void Push(MaglevAssembler *masm, const Input &arg, Args... args)
static void PushReverse(MaglevAssembler *masm, const Input &arg, Args... args)
#define OFFSET_OF_DATA_START(Type)