v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-s390.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <assert.h> // For assert
6#include <limits.h> // For LONG_MIN, LONG_MAX.
7
8#if V8_TARGET_ARCH_S390X
9
10#include "src/base/bits.h"
20#include "src/debug/debug.h"
26#include "src/objects/smi.h"
27#include "src/runtime/runtime.h"
29
30// Satisfy cpplint check, but don't include platform-specific header. It is
31// included recursively via macro-assembler.h.
32#if 0
34#endif
35
36#define __ ACCESS_MASM(masm)
37
38namespace v8 {
39namespace internal {
40
41namespace {
42
43// For WebAssembly we care about the full floating point (Simd) registers. If we
44// are not running Wasm, we can get away with saving half of those (F64)
45// registers.
46#if V8_ENABLE_WEBASSEMBLY
47constexpr int kStackSavedSavedFPSizeInBytes =
49#else
50constexpr int kStackSavedSavedFPSizeInBytes =
52#endif // V8_ENABLE_WEBASSEMBLY
53
54} // namespace
55
57 DoubleRegister left_reg,
58 DoubleRegister right_reg) {
59 if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
60 vfmax(result_reg, left_reg, right_reg, Condition(1), Condition(8),
61 Condition(3));
62 return;
63 }
64
65 Label check_zero, return_left, return_right, return_nan, done;
66 cdbr(left_reg, right_reg);
67 bunordered(&return_nan, Label::kNear);
68 beq(&check_zero);
69 bge(&return_left, Label::kNear);
70 b(&return_right, Label::kNear);
71
72 bind(&check_zero);
74 cdbr(left_reg, kDoubleRegZero);
75 /* left == right != 0. */
76 bne(&return_left, Label::kNear);
77 /* At this point, both left and right are either 0 or -0. */
78 /* N.B. The following works because +0 + -0 == +0 */
79 /* For max we want logical-and of sign bit: (L + R) */
80 ldr(result_reg, left_reg);
81 adbr(result_reg, right_reg);
82 b(&done, Label::kNear);
83
84 bind(&return_nan);
85 /* If left or right are NaN, adbr propagates the appropriate one.*/
86 adbr(left_reg, right_reg);
87 b(&return_left, Label::kNear);
88
89 bind(&return_right);
90 if (right_reg != result_reg) {
91 ldr(result_reg, right_reg);
92 }
93 b(&done, Label::kNear);
94
95 bind(&return_left);
96 if (left_reg != result_reg) {
97 ldr(result_reg, left_reg);
98 }
99 bind(&done);
100}
101
103 DoubleRegister left_reg,
104 DoubleRegister right_reg) {
105 if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
106 vfmin(result_reg, left_reg, right_reg, Condition(1), Condition(8),
107 Condition(3));
108 return;
109 }
110 Label check_zero, return_left, return_right, return_nan, done;
111 cdbr(left_reg, right_reg);
112 bunordered(&return_nan, Label::kNear);
113 beq(&check_zero);
114 ble(&return_left, Label::kNear);
115 b(&return_right, Label::kNear);
116
117 bind(&check_zero);
119 cdbr(left_reg, kDoubleRegZero);
120 /* left == right != 0. */
121 bne(&return_left, Label::kNear);
122 /* At this point, both left and right are either 0 or -0. */
123 /* N.B. The following works because +0 + -0 == +0 */
124 /* For min we want logical-or of sign bit: -(-L + -R) */
125 lcdbr(left_reg, left_reg);
126 ldr(result_reg, left_reg);
127 if (left_reg == right_reg) {
128 adbr(result_reg, right_reg);
129 } else {
130 sdbr(result_reg, right_reg);
131 }
132 lcdbr(result_reg, result_reg);
133 b(&done, Label::kNear);
134
135 bind(&return_nan);
136 /* If left or right are NaN, adbr propagates the appropriate one.*/
137 adbr(left_reg, right_reg);
138 b(&return_left, Label::kNear);
139
140 bind(&return_right);
141 if (right_reg != result_reg) {
142 ldr(result_reg, right_reg);
143 }
144 b(&done, Label::kNear);
145
146 bind(&return_left);
147 if (left_reg != result_reg) {
148 ldr(result_reg, left_reg);
149 }
150 bind(&done);
151}
152
154 DoubleRegister left_reg,
155 DoubleRegister right_reg) {
156 if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
157 vfmax(result_reg, left_reg, right_reg, Condition(1), Condition(8),
158 Condition(2));
159 return;
160 }
161 Label check_zero, return_left, return_right, return_nan, done;
162 cebr(left_reg, right_reg);
163 bunordered(&return_nan, Label::kNear);
164 beq(&check_zero);
165 bge(&return_left, Label::kNear);
166 b(&return_right, Label::kNear);
167
168 bind(&check_zero);
170 cebr(left_reg, kDoubleRegZero);
171 /* left == right != 0. */
172 bne(&return_left, Label::kNear);
173 /* At this point, both left and right are either 0 or -0. */
174 /* N.B. The following works because +0 + -0 == +0 */
175 /* For max we want logical-and of sign bit: (L + R) */
176 ldr(result_reg, left_reg);
177 aebr(result_reg, right_reg);
178 b(&done, Label::kNear);
179
180 bind(&return_nan);
181 /* If left or right are NaN, aebr propagates the appropriate one.*/
182 aebr(left_reg, right_reg);
183 b(&return_left, Label::kNear);
184
185 bind(&return_right);
186 if (right_reg != result_reg) {
187 ldr(result_reg, right_reg);
188 }
189 b(&done, Label::kNear);
190
191 bind(&return_left);
192 if (left_reg != result_reg) {
193 ldr(result_reg, left_reg);
194 }
195 bind(&done);
196}
197
199 DoubleRegister left_reg,
200 DoubleRegister right_reg) {
201 if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
202 vfmin(result_reg, left_reg, right_reg, Condition(1), Condition(8),
203 Condition(2));
204 return;
205 }
206
207 Label check_zero, return_left, return_right, return_nan, done;
208 cebr(left_reg, right_reg);
209 bunordered(&return_nan, Label::kNear);
210 beq(&check_zero);
211 ble(&return_left, Label::kNear);
212 b(&return_right, Label::kNear);
213
214 bind(&check_zero);
216 cebr(left_reg, kDoubleRegZero);
217 /* left == right != 0. */
218 bne(&return_left, Label::kNear);
219 /* At this point, both left and right are either 0 or -0. */
220 /* N.B. The following works because +0 + -0 == +0 */
221 /* For min we want logical-or of sign bit: -(-L + -R) */
222 lcebr(left_reg, left_reg);
223 ldr(result_reg, left_reg);
224 if (left_reg == right_reg) {
225 aebr(result_reg, right_reg);
226 } else {
227 sebr(result_reg, right_reg);
228 }
229 lcebr(result_reg, result_reg);
230 b(&done, Label::kNear);
231
232 bind(&return_nan);
233 /* If left or right are NaN, aebr propagates the appropriate one.*/
234 aebr(left_reg, right_reg);
235 b(&return_left, Label::kNear);
236
237 bind(&return_right);
238 if (right_reg != result_reg) {
239 ldr(result_reg, right_reg);
240 }
241 b(&done, Label::kNear);
242
243 bind(&return_left);
244 if (left_reg != result_reg) {
245 ldr(result_reg, left_reg);
246 }
247 bind(&done);
248}
249
251 fiebra(ROUND_TOWARD_POS_INF, dst, src);
252}
253
255 fidbra(ROUND_TOWARD_POS_INF, dst, src);
256}
257
259 fiebra(ROUND_TOWARD_NEG_INF, dst, src);
260}
261
263 fidbra(ROUND_TOWARD_NEG_INF, dst, src);
264}
265
267 fiebra(ROUND_TOWARD_0, dst, src);
268}
269
271 fidbra(ROUND_TOWARD_0, dst, src);
272}
273
275 fiebra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
276}
277
279 fidbra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
280}
281
283 Register exclusion1,
284 Register exclusion2,
285 Register exclusion3) const {
286 int bytes = 0;
287
288 RegList exclusions = {exclusion1, exclusion2, exclusion3};
289 RegList list = kJSCallerSaved - exclusions;
290 bytes += list.Count() * kSystemPointerSize;
291
292 if (fp_mode == SaveFPRegsMode::kSave) {
293 bytes += kStackSavedSavedFPSizeInBytes;
294 }
295
296 return bytes;
297}
298
299int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
300 Register exclusion1, Register exclusion2,
301 Register exclusion3) {
302 int bytes = 0;
303
304 RegList exclusions = {exclusion1, exclusion2, exclusion3};
305 RegList list = kJSCallerSaved - exclusions;
306 MultiPush(list);
307 bytes += list.Count() * kSystemPointerSize;
308
309 if (fp_mode == SaveFPRegsMode::kSave) {
311 bytes += kStackSavedSavedFPSizeInBytes;
312 }
313
314 return bytes;
315}
316
317int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
318 Register exclusion1, Register exclusion2,
319 Register exclusion3) {
320 int bytes = 0;
321 if (fp_mode == SaveFPRegsMode::kSave) {
323 bytes += kStackSavedSavedFPSizeInBytes;
324 }
325
326 RegList exclusions = {exclusion1, exclusion2, exclusion3};
327 RegList list = kJSCallerSaved - exclusions;
328 MultiPop(list);
329 bytes += list.Count() * kSystemPointerSize;
330
331 return bytes;
332}
333
335 int constant_index) {
336 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
337
338 const uint32_t offset = OFFSET_OF_DATA_START(FixedArray) +
339 constant_index * kSystemPointerSize - kHeapObjectTag;
340
341 CHECK(is_uint19(offset));
343 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
346 constant_index)),
347 r1);
348}
349
352}
353
354void MacroAssembler::StoreRootRelative(int32_t offset, Register value) {
356}
357
359 intptr_t offset) {
360 if (offset == 0) {
362 } else if (is_uint12(offset)) {
364 } else {
365 DCHECK(is_int20(offset));
367 }
368}
369
371 ExternalReference reference, Register scratch) {
372 if (root_array_available()) {
373 if (reference.IsIsolateFieldId()) {
374 return MemOperand(kRootRegister, reference.offset_from_root_register());
375 }
376 if (options().enable_root_relative_access) {
377 intptr_t offset =
379 if (is_int32(offset)) {
380 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
381 }
382 }
383 if (options().isolate_independent_code) {
384 if (IsAddressableThroughRootRegister(isolate(), reference)) {
385 // Some external references can be efficiently loaded as an offset from
386 // kRootRegister.
387 intptr_t offset =
389 CHECK(is_int32(offset));
390 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
391 } else {
392 // Otherwise, do a memory load from the external reference table.
393 LoadU64(scratch,
396 isolate(), reference)));
397 return MemOperand(scratch, 0);
398 }
399 }
400 }
401 Move(scratch, reference);
402 return MemOperand(scratch, 0);
403}
404
405void MacroAssembler::GetLabelAddress(Register dest, Label* target) {
406 larl(dest, target);
407}
408
409void MacroAssembler::Jump(Register target, Condition cond) { b(cond, target); }
410
411void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
412 Condition cond) {
413 Label skip;
414
415 if (cond != al) b(NegateCondition(cond), &skip);
416
417 mov(ip, Operand(target, rmode));
418 b(ip);
419
420 bind(&skip);
421}
422
424 Condition cond) {
426 Jump(static_cast<intptr_t>(target), rmode, cond);
427}
428
430 Condition cond) {
432 DCHECK_IMPLIES(options().isolate_independent_code,
434
436 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
437 TailCallBuiltin(builtin, cond);
438 return;
439 }
442}
443
444void MacroAssembler::Jump(const ExternalReference& reference) {
445#if V8_OS_ZOS
446 // Place reference into scratch r12 ip register
447 Move(ip, reference);
448 // z/OS uses function descriptors, extract code entry into r6
449 LoadMultipleP(r5, r6, MemOperand(ip));
450 // Preserve return address into r14
451 mov(r14, r7);
452 // Call C Function
454 // Branch to return address in r14
455 b(r14);
456#else
457 UseScratchRegisterScope temps(this);
458 Register scratch = temps.Acquire();
459 Move(scratch, reference);
460 Jump(scratch);
461#endif
462}
463
464void MacroAssembler::Call(Register target) {
465 // Branch to target via indirect branch
466 basr(r14, target);
467}
468
469void MacroAssembler::CallJSEntry(Register target) {
470 DCHECK(target == r4);
471 Call(target);
472}
473
475 RelocInfo::Mode rmode,
476 Condition cond) {
477 // S390 Assembler::move sequence is IILF / IIHF
478 int size;
479 size = 14; // IILF + IIHF + BASR
480 return size;
481}
482
484 Condition cond) {
485 DCHECK(cond == al);
486
487 mov(ip, Operand(target, rmode));
488 basr(r14, ip);
489}
490
492 Condition cond) {
493 DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
494
495 DCHECK_IMPLIES(options().isolate_independent_code,
497
499 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
500 CallBuiltin(builtin);
501 return;
502 }
504 call(code, rmode);
505}
506
509 // Use ip directly instead of using UseScratchRegisterScope, as we do not
510 // preserve scratch registers across calls.
511 switch (options().builtin_call_jump_mode) {
513 mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
514 Call(ip);
515 break;
516 }
518 UNREACHABLE();
521 Call(ip);
522 break;
524 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
526 break;
527 }
528 }
529}
530
533 CommentForOffHeapTrampoline("tail call", builtin));
534 // Use ip directly instead of using UseScratchRegisterScope, as we do not
535 // preserve scratch registers across calls.
536 switch (options().builtin_call_jump_mode) {
538 mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
539 Jump(ip, cond);
540 break;
541 }
543 UNREACHABLE();
546 Jump(ip, cond);
547 break;
549 if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
550 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
552 } else {
554 Jump(ip, cond);
555 }
556 break;
557 }
558 }
559}
560
561void MacroAssembler::Drop(int count) {
562 if (count > 0) {
563 int total = count * kSystemPointerSize;
564 if (is_uint12(total)) {
565 la(sp, MemOperand(sp, total));
566 } else if (is_int20(total)) {
567 lay(sp, MemOperand(sp, total));
568 } else {
569 AddS64(sp, Operand(total));
570 }
571 }
572}
573
574void MacroAssembler::Drop(Register count, Register scratch) {
575 ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2));
576 AddS64(sp, sp, scratch);
577}
578
579// Enforce alignment of sp.
581 int frame_alignment = ActivationFrameAlignment();
582 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
583
584 uint64_t frame_alignment_mask = ~(static_cast<uint64_t>(frame_alignment) - 1);
585 AndP(sp, sp, Operand(frame_alignment_mask));
586}
587
589 Register scratch) {
590 LoadU32(scratch, FieldMemOperand(code, Code::kFlagsOffset));
592}
593
594Operand MacroAssembler::ClearedValue() const {
595 return Operand(static_cast<int32_t>(i::ClearedValue(isolate()).ptr()));
596}
597
598void MacroAssembler::Call(Label* target) { b(r14, target); }
599
600void MacroAssembler::Push(Handle<HeapObject> handle) {
601 mov(r0, Operand(handle));
602 push(r0);
603}
604
606 mov(r0, Operand(smi));
607 push(r0);
608}
609
610void MacroAssembler::Push(Tagged<TaggedIndex> index) {
611 // TaggedIndex is the same as Smi for 32 bit archs.
612 mov(r0, Operand(static_cast<uint32_t>(index.value())));
613 push(r0);
614}
615
616void MacroAssembler::Move(Register dst, Handle<HeapObject> value,
617 RelocInfo::Mode rmode) {
618 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
619 // non-isolate-independent code. In many cases it might be cheaper than
620 // embedding the relocatable value.
621 if (root_array_available_ && options().isolate_independent_code) {
622 IndirectLoadConstant(dst, value);
623 return;
624 } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
626 DCHECK(is_uint32(index));
627 mov(dst, Operand(static_cast<int>(index), rmode));
628 } else {
630 mov(dst, Operand(value.address(), rmode));
631 }
632}
633
634void MacroAssembler::Move(Register dst, ExternalReference reference) {
635 if (root_array_available()) {
636 if (reference.IsIsolateFieldId()) {
638 Operand(reference.offset_from_root_register()));
639 return;
640 }
641 if (options().isolate_independent_code) {
642 IndirectLoadExternalReference(dst, reference);
643 return;
644 }
645 }
646
647 // External references should not get created with IDs if
648 // `!root_array_available()`.
649 CHECK(!reference.IsIsolateFieldId());
650 mov(dst, Operand(reference));
651}
652
655}
656
657void MacroAssembler::Move(Register dst, Register src, Condition cond) {
658 if (dst != src) {
659 if (cond == al) {
660 mov(dst, src);
661 } else {
662 LoadOnConditionP(cond, dst, src);
663 }
664 }
665}
666
668 if (dst != src) {
669 ldr(dst, src);
670 }
671}
672
673void MacroAssembler::Move(Register dst, const MemOperand& src) {
674 LoadU64(dst, src);
675}
676
677// Wrapper around Assembler::mvc (SS-a format)
678void MacroAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
679 const Operand& length) {
680 mvc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
681}
682
683// Wrapper around Assembler::clc (SS-a format)
685 const MemOperand& opnd2,
686 const Operand& length) {
687 clc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
688}
689
690// Wrapper around Assembler::xc (SS-a format)
692 const MemOperand& opnd2,
693 const Operand& length) {
694 xc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
695}
696
697// Wrapper around Assembler::risbg(n) (RIE-f)
698void MacroAssembler::RotateInsertSelectBits(Register dst, Register src,
699 const Operand& startBit,
700 const Operand& endBit,
701 const Operand& shiftAmt,
702 bool zeroBits) {
703 if (zeroBits)
704 // High tag the top bit of I4/EndBit to zero out any unselected bits
705 risbg(dst, src, startBit,
706 Operand(static_cast<intptr_t>(endBit.immediate() | 0x80)), shiftAmt);
707 else
708 risbg(dst, src, startBit, endBit, shiftAmt);
709}
710
711void MacroAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
712 Label* L) {
713 brxhg(dst, inc, L);
714}
715
716void MacroAssembler::PushArray(Register array, Register size, Register scratch,
717 Register scratch2, PushArrayOrder order) {
718 Label loop, done;
719
720 if (order == kNormal) {
721 ShiftLeftU64(scratch, size, Operand(kSystemPointerSizeLog2));
722 lay(scratch, MemOperand(array, scratch));
723 bind(&loop);
724 CmpS64(array, scratch);
725 bge(&done);
726 lay(scratch, MemOperand(scratch, -kSystemPointerSize));
727 lay(sp, MemOperand(sp, -kSystemPointerSize));
728 MoveChar(MemOperand(sp), MemOperand(scratch), Operand(kSystemPointerSize));
729 b(&loop);
730 bind(&done);
731 } else {
732 DCHECK_NE(scratch2, r0);
733 ShiftLeftU64(scratch, size, Operand(kSystemPointerSizeLog2));
734 lay(scratch, MemOperand(array, scratch));
735 mov(scratch2, array);
736 bind(&loop);
737 CmpS64(scratch2, scratch);
738 bge(&done);
739 lay(sp, MemOperand(sp, -kSystemPointerSize));
740 MoveChar(MemOperand(sp), MemOperand(scratch2), Operand(kSystemPointerSize));
741 lay(scratch2, MemOperand(scratch2, kSystemPointerSize));
742 b(&loop);
743 bind(&done);
744 }
745}
746
747void MacroAssembler::MultiPush(RegList regs, Register location) {
748 int16_t num_to_push = regs.Count();
749 int16_t stack_offset = num_to_push * kSystemPointerSize;
750
751 SubS64(location, location, Operand(stack_offset));
752 for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
753 if ((regs.bits() & (1 << i)) != 0) {
754 stack_offset -= kSystemPointerSize;
755 StoreU64(ToRegister(i), MemOperand(location, stack_offset));
756 }
757 }
758}
759
760void MacroAssembler::MultiPop(RegList regs, Register location) {
761 int16_t stack_offset = 0;
762
763 for (int16_t i = 0; i < Register::kNumRegisters; i++) {
764 if ((regs.bits() & (1 << i)) != 0) {
765 LoadU64(ToRegister(i), MemOperand(location, stack_offset));
766 stack_offset += kSystemPointerSize;
767 }
768 }
769 AddS64(location, location, Operand(stack_offset));
770}
771
772void MacroAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
773 int16_t num_to_push = dregs.Count();
774 int16_t stack_offset = num_to_push * kDoubleSize;
775
776 SubS64(location, location, Operand(stack_offset));
777 for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
778 if ((dregs.bits() & (1 << i)) != 0) {
780 stack_offset -= kDoubleSize;
781 StoreF64(dreg, MemOperand(location, stack_offset));
782 }
783 }
784}
785
786void MacroAssembler::MultiPushV128(DoubleRegList dregs, Register scratch,
787 Register location) {
788 int16_t num_to_push = dregs.Count();
789 int16_t stack_offset = num_to_push * kSimd128Size;
790
791 SubS64(location, location, Operand(stack_offset));
792 for (int16_t i = Simd128Register::kNumRegisters - 1; i >= 0; i--) {
793 if ((dregs.bits() & (1 << i)) != 0) {
795 stack_offset -= kSimd128Size;
796 StoreV128(dreg, MemOperand(location, stack_offset), scratch);
797 }
798 }
799}
800
801void MacroAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
802 int16_t stack_offset = 0;
803
804 for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
805 if ((dregs.bits() & (1 << i)) != 0) {
807 LoadF64(dreg, MemOperand(location, stack_offset));
808 stack_offset += kDoubleSize;
809 }
810 }
811 AddS64(location, location, Operand(stack_offset));
812}
813
814void MacroAssembler::MultiPopV128(DoubleRegList dregs, Register scratch,
815 Register location) {
816 int16_t stack_offset = 0;
817
818 for (int16_t i = 0; i < Simd128Register::kNumRegisters; i++) {
819 if ((dregs.bits() & (1 << i)) != 0) {
821 LoadV128(dreg, MemOperand(location, stack_offset), scratch);
822 stack_offset += kSimd128Size;
823 }
824 }
825 AddS64(location, location, Operand(stack_offset));
826}
827
828void MacroAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch,
829 Register location) {
830#if V8_ENABLE_WEBASSEMBLY
831 bool generating_bultins =
833 if (generating_bultins) {
834 Label push_doubles, simd_pushed;
835 Move(r1, ExternalReference::supports_wasm_simd_128_address());
836 LoadU8(r1, MemOperand(r1));
837 LoadAndTestP(r1, r1); // If > 0 then simd is available.
838 ble(&push_doubles, Label::kNear);
839 // Save vector registers, don't save double registers anymore.
840 MultiPushV128(dregs, scratch);
841 b(&simd_pushed);
842 bind(&push_doubles);
843 // Simd not supported, only save double registers.
844 MultiPushDoubles(dregs);
845 // We still need to allocate empty space on the stack as if
846 // Simd rgeisters were saved (see kFixedFrameSizeFromFp).
847 lay(sp, MemOperand(sp, -(dregs.Count() * kDoubleSize)));
848 bind(&simd_pushed);
849 } else {
851 MultiPushV128(dregs, scratch);
852 } else {
853 MultiPushDoubles(dregs);
854 lay(sp, MemOperand(sp, -(dregs.Count() * kDoubleSize)));
855 }
856 }
857#else
858 MultiPushDoubles(dregs);
859#endif
860}
861
862void MacroAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch,
863 Register location) {
864#if V8_ENABLE_WEBASSEMBLY
865 bool generating_bultins =
867 if (generating_bultins) {
868 Label pop_doubles, simd_popped;
869 Move(r1, ExternalReference::supports_wasm_simd_128_address());
870 LoadU8(r1, MemOperand(r1));
871 LoadAndTestP(r1, r1); // If > 0 then simd is available.
872 ble(&pop_doubles, Label::kNear);
873 // Pop vector registers, don't pop double registers anymore.
874 MultiPopV128(dregs, scratch);
875 b(&simd_popped);
876 bind(&pop_doubles);
877 // Simd not supported, only pop double registers.
878 lay(sp, MemOperand(sp, dregs.Count() * kDoubleSize));
879 MultiPopDoubles(dregs);
880 bind(&simd_popped);
881 } else {
883 MultiPopV128(dregs, scratch);
884 } else {
885 lay(sp, MemOperand(sp, dregs.Count() * kDoubleSize));
886 MultiPopDoubles(dregs);
887 }
888 }
889#else
890 MultiPopDoubles(dregs);
891#endif
892}
893
895 if (registers.is_empty()) return;
896 ASM_CODE_COMMENT(this);
897 // TODO(victorgomes): {stm/ldm} pushes/pops registers in the opposite order
898 // as expected by Maglev frame. Consider massaging Maglev to accept this
899 // order instead.
900 // Can not use MultiPush(registers, sp) due to orders
901 for (Register reg : registers) {
902 Push(reg);
903 }
904}
905
907 if (registers.is_empty()) return;
908 ASM_CODE_COMMENT(this);
909 // Can not use MultiPop(registers, sp);
910 for (Register reg : base::Reversed(registers)) {
911 Pop(reg);
912 }
913}
914
915void MacroAssembler::PushAll(DoubleRegList registers, int stack_slot_size) {
916 if (registers.is_empty()) return;
917 ASM_CODE_COMMENT(this);
919}
920
921void MacroAssembler::PopAll(DoubleRegList registers, int stack_slot_size) {
922 if (registers.is_empty()) return;
923 ASM_CODE_COMMENT(this);
925}
926
928 ASM_CODE_COMMENT(this);
929 if (CanBeImmediate(index)) {
931 return;
932 }
933 LoadRoot(destination, index);
934}
935
937 Condition) {
938 if (CanBeImmediate(index)) {
940 return;
941 }
944}
945
947 const MemOperand& field_operand,
948 const Register& scratch) {
950 DecompressTagged(destination, field_operand);
951 } else {
952 LoadU64(destination, field_operand, scratch);
953 }
954}
956 const Register& destination, const MemOperand& field_operand,
957 const Register& scratch) {
959 LoadU32(destination, field_operand, scratch);
960 } else {
961 LoadU64(destination, field_operand, scratch);
962 }
963}
964void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) {
965 if (SmiValuesAre31Bits()) {
966 LoadS32(dst, src);
967 } else {
968 LoadU64(dst, src);
969 }
970 SmiUntag(dst);
971}
972
973void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) {
974 SmiUntag(dst, src);
975}
976
977void MacroAssembler::StoreTaggedField(const Register& value,
978 const MemOperand& dst_field_operand,
979 const Register& scratch) {
981 RecordComment("[ StoreTagged");
982 StoreU32(value, dst_field_operand);
983 RecordComment("]");
984 } else {
985 StoreU64(value, dst_field_operand, scratch);
986 }
987}
988
990 Register src) {
991 RecordComment("[ DecompressTaggedSigned");
992 llgfr(destination, src);
993 RecordComment("]");
994}
995
997 MemOperand field_operand) {
998 RecordComment("[ DecompressTaggedSigned");
999 llgf(destination, field_operand);
1000 RecordComment("]");
1001}
1002
1003void MacroAssembler::DecompressTagged(Register destination, Register source) {
1004 RecordComment("[ DecompressTagged");
1005 llgfr(destination, source);
1007 RecordComment("]");
1008}
1009
1011 MemOperand field_operand) {
1012 RecordComment("[ DecompressTagged");
1013 llgf(destination, field_operand);
1015 RecordComment("]");
1016}
1017
1019 Tagged_t immediate) {
1020 ASM_CODE_COMMENT(this);
1021 mov(destination, Operand(immediate, RelocInfo::NO_INFO));
1023}
1024
1026 MemOperand field_operand) {
1028 DecompressTaggedSigned(destination, field_operand);
1029 } else {
1030 LoadU64(destination, field_operand);
1031 }
1032}
1033
1034void MacroAssembler::RecordWriteField(Register object, int offset,
1035 Register value, Register slot_address,
1036 LinkRegisterStatus lr_status,
1037 SaveFPRegsMode save_fp,
1038 SmiCheck smi_check) {
1039 // First, check if a write barrier is even needed. The tests below
1040 // catch stores of Smis.
1041 Label done;
1042
1043 // Skip barrier if writing a smi.
1044 if (smi_check == SmiCheck::kInline) {
1045 JumpIfSmi(value, &done);
1046 }
1047
1048 // Although the object register is tagged, the offset is relative to the start
1049 // of the object, so so offset must be a multiple of kSystemPointerSize.
1051
1052 lay(slot_address, MemOperand(object, offset - kHeapObjectTag));
1053 if (v8_flags.slow_debug_code) {
1054 Label ok;
1055 AndP(r0, slot_address, Operand(kTaggedSize - 1));
1056 beq(&ok, Label::kNear);
1057 stop();
1058 bind(&ok);
1059 }
1060
1061 RecordWrite(object, slot_address, value, lr_status, save_fp, SmiCheck::kOmit);
1062
1063 bind(&done);
1064
1065 // Clobber clobbered input registers when running with the debug-code flag
1066 // turned on to provoke errors.
1067 if (v8_flags.slow_debug_code) {
1068 mov(value, Operand(base::bit_cast<intptr_t>(kZapValue + 4)));
1069 mov(slot_address, Operand(base::bit_cast<intptr_t>(kZapValue + 8)));
1070 }
1071}
1072
1073void MacroAssembler::Zero(const MemOperand& dest) {
1074 ASM_CODE_COMMENT(this);
1075 Register scratch = r0;
1076
1077 mov(scratch, Operand::Zero());
1078 StoreU64(scratch, dest);
1079}
1080void MacroAssembler::Zero(const MemOperand& dest1, const MemOperand& dest2) {
1081 ASM_CODE_COMMENT(this);
1082 Register scratch = r0;
1083
1084 mov(scratch, Operand::Zero());
1085 StoreU64(scratch, dest1);
1086 StoreU64(scratch, dest2);
1087}
1088
1090 if (registers.is_empty()) return;
1092}
1093
1095 if (registers.is_empty()) return;
1097}
1098
1099void MacroAssembler::CallEphemeronKeyBarrier(Register object,
1100 Register slot_address,
1101 SaveFPRegsMode fp_mode) {
1102 DCHECK(!AreAliased(object, slot_address));
1106
1108 Register slot_address_parameter =
1110
1111 Push(object);
1112 Push(slot_address);
1113 Pop(slot_address_parameter);
1114 Pop(object_parameter);
1115
1118}
1119
1121 Register slot_address,
1122 SaveFPRegsMode fp_mode,
1123 StubCallMode mode) {
1124 DCHECK(!AreAliased(object, slot_address));
1128
1130 Register slot_address_parameter =
1132
1133 Push(object);
1134 Push(slot_address);
1135 Pop(slot_address_parameter);
1136 Pop(object_parameter);
1137
1138 CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode);
1139
1141}
1142
1143void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
1144 SaveFPRegsMode fp_mode,
1145 StubCallMode mode) {
1146 // Use CallRecordWriteStubSaveRegisters if the object and slot registers
1147 // need to be caller saved.
1150#if V8_ENABLE_WEBASSEMBLY
1151 if (mode == StubCallMode::kCallWasmRuntimeStub) {
1152 auto wasm_target =
1153 static_cast<Address>(wasm::WasmCode::GetRecordWriteBuiltin(fp_mode));
1154 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
1155#else
1156 if (false) {
1157#endif
1158 } else {
1160 }
1161}
1162
1163// Will clobber 4 registers: object, address, scratch, ip. The
1164// register 'object' contains a heap object pointer. The heap object
1165// tag is shifted away.
1166void MacroAssembler::RecordWrite(Register object, Register slot_address,
1167 Register value, LinkRegisterStatus lr_status,
1168 SaveFPRegsMode fp_mode, SmiCheck smi_check) {
1169 DCHECK(!AreAliased(object, slot_address, value));
1170 if (v8_flags.slow_debug_code) {
1171 LoadTaggedField(r0, MemOperand(slot_address));
1172 CmpS64(value, r0);
1173 Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
1174 }
1175
1176 if (v8_flags.disable_write_barriers) {
1177 return;
1178 }
1179 // First, check if a write barrier is even needed. The tests below
1180 // catch stores of smis and stores into the young generation.
1181 Label done;
1182
1183 if (smi_check == SmiCheck::kInline) {
1184 JumpIfSmi(value, &done);
1185 }
1186
1187 CheckPageFlag(value,
1188 value, // Used as scratch.
1190 CheckPageFlag(object,
1191 value, // Used as scratch.
1193
1194 // Record the actual write.
1195 if (lr_status == kLRHasNotBeenSaved) {
1196 push(r14);
1197 }
1198 CallRecordWriteStubSaveRegisters(object, slot_address, fp_mode);
1199 if (lr_status == kLRHasNotBeenSaved) {
1200 pop(r14);
1201 }
1202
1203 if (v8_flags.slow_debug_code) mov(slot_address, Operand(kZapValue));
1204
1205 bind(&done);
1206
1207 // Clobber clobbered registers when running with the debug-code flag
1208 // turned on to provoke errors.
1209 if (v8_flags.slow_debug_code) {
1210 mov(slot_address, Operand(base::bit_cast<intptr_t>(kZapValue + 12)));
1211 mov(value, Operand(base::bit_cast<intptr_t>(kZapValue + 16)));
1212 }
1213}
1214
1215void MacroAssembler::PushCommonFrame(Register marker_reg) {
1216 ASM_CODE_COMMENT(this);
1217 int fp_delta = 0;
1218 if (marker_reg.is_valid()) {
1219 Push(r14, fp, marker_reg);
1220 fp_delta = 1;
1221 } else {
1222 Push(r14, fp);
1223 fp_delta = 0;
1224 }
1225 la(fp, MemOperand(sp, fp_delta * kSystemPointerSize));
1226}
1227
1228void MacroAssembler::PopCommonFrame(Register marker_reg) {
1229 if (marker_reg.is_valid()) {
1230 Pop(r14, fp, marker_reg);
1231 } else {
1232 Pop(r14, fp);
1233 }
1234}
1235
1236void MacroAssembler::PushStandardFrame(Register function_reg) {
1237 int fp_delta = 0;
1238 if (function_reg.is_valid()) {
1239 Push(r14, fp, cp, function_reg);
1240 fp_delta = 2;
1241 } else {
1242 Push(r14, fp, cp);
1243 fp_delta = 1;
1244 }
1245 la(fp, MemOperand(sp, fp_delta * kSystemPointerSize));
1247}
1248
1250 // if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
1251 // LoadU64(kConstantPoolRegister,
1252 // MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
1253 // set_constant_pool_available(false);
1254 // }
1258}
1259
1261 const DoubleRegister src) {
1262 // Turn potential sNaN into qNaN
1263 if (dst != src) ldr(dst, src);
1265 sdbr(dst, kDoubleRegZero);
1266}
1267
1268void MacroAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) {
1269 cdfbr(dst, src);
1270}
1271
1273 Register src) {
1274 if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
1275 cdlfbr(Condition(5), Condition(0), dst, src);
1276 } else {
1277 // zero-extend src
1278 llgfr(src, src);
1279 // convert to double
1280 cdgbr(dst, src);
1281 }
1282}
1283
1284void MacroAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
1285 cefbra(Condition(4), dst, src);
1286}
1287
1289 Register src) {
1290 celfbr(Condition(4), Condition(0), dst, src);
1291}
1292
1294 Register src) {
1295 cegbr(double_dst, src);
1296}
1297
1299 Register src) {
1300 cdgbr(double_dst, src);
1301}
1302
1304 Register src) {
1305 celgbr(Condition(0), Condition(0), double_dst, src);
1306}
1307
1309 Register src) {
1310 cdlgbr(Condition(0), Condition(0), double_dst, src);
1311}
1312
1313void MacroAssembler::ConvertFloat32ToInt64(const Register dst,
1314 const DoubleRegister double_input,
1316 Condition m = Condition(0);
1317 switch (rounding_mode) {
1318 case kRoundToZero:
1319 m = Condition(5);
1320 break;
1321 case kRoundToNearest:
1322 UNIMPLEMENTED();
1323 case kRoundToPlusInf:
1324 m = Condition(6);
1325 break;
1326 case kRoundToMinusInf:
1327 m = Condition(7);
1328 break;
1329 default:
1330 UNIMPLEMENTED();
1331 }
1332 cgebr(m, dst, double_input);
1333}
1334
1335void MacroAssembler::ConvertDoubleToInt64(const Register dst,
1336 const DoubleRegister double_input,
1338 Condition m = Condition(0);
1339 switch (rounding_mode) {
1340 case kRoundToZero:
1341 m = Condition(5);
1342 break;
1343 case kRoundToNearest:
1344 UNIMPLEMENTED();
1345 case kRoundToPlusInf:
1346 m = Condition(6);
1347 break;
1348 case kRoundToMinusInf:
1349 m = Condition(7);
1350 break;
1351 default:
1352 UNIMPLEMENTED();
1353 }
1354 cgdbr(m, dst, double_input);
1355}
1356
1357void MacroAssembler::ConvertDoubleToInt32(const Register dst,
1358 const DoubleRegister double_input,
1360 Condition m = Condition(0);
1361 switch (rounding_mode) {
1362 case kRoundToZero:
1363 m = Condition(5);
1364 break;
1365 case kRoundToNearest:
1366 m = Condition(4);
1367 break;
1368 case kRoundToPlusInf:
1369 m = Condition(6);
1370 break;
1371 case kRoundToMinusInf:
1372 m = Condition(7);
1373 break;
1374 default:
1375 UNIMPLEMENTED();
1376 }
1377 lghi(dst, Operand::Zero());
1378 cfdbr(m, dst, double_input);
1379}
1380
1382 const DoubleRegister double_input,
1384 Condition m = Condition(0);
1385 switch (rounding_mode) {
1386 case kRoundToZero:
1387 m = Condition(5);
1388 break;
1389 case kRoundToNearest:
1390 m = Condition(4);
1391 break;
1392 case kRoundToPlusInf:
1393 m = Condition(6);
1394 break;
1395 case kRoundToMinusInf:
1396 m = Condition(7);
1397 break;
1398 default:
1399 UNIMPLEMENTED();
1400 }
1401 lghi(result, Operand::Zero());
1402 cfebr(m, result, double_input);
1403}
1404
1406 const Register result, const DoubleRegister double_input,
1408 Condition m = Condition(0);
1409 switch (rounding_mode) {
1410 case kRoundToZero:
1411 m = Condition(5);
1412 break;
1413 case kRoundToNearest:
1414 UNIMPLEMENTED();
1415 case kRoundToPlusInf:
1416 m = Condition(6);
1417 break;
1418 case kRoundToMinusInf:
1419 m = Condition(7);
1420 break;
1421 default:
1422 UNIMPLEMENTED();
1423 }
1424 lghi(result, Operand::Zero());
1425 clfebr(m, Condition(0), result, double_input);
1426}
1427
1429 const Register result, const DoubleRegister double_input,
1431 Condition m = Condition(0);
1432 switch (rounding_mode) {
1433 case kRoundToZero:
1434 m = Condition(5);
1435 break;
1436 case kRoundToNearest:
1437 UNIMPLEMENTED();
1438 case kRoundToPlusInf:
1439 m = Condition(6);
1440 break;
1441 case kRoundToMinusInf:
1442 m = Condition(7);
1443 break;
1444 default:
1445 UNIMPLEMENTED();
1446 }
1447 clgebr(m, Condition(0), result, double_input);
1448}
1449
1451 const Register dst, const DoubleRegister double_input,
1453 Condition m = Condition(0);
1454 switch (rounding_mode) {
1455 case kRoundToZero:
1456 m = Condition(5);
1457 break;
1458 case kRoundToNearest:
1459 UNIMPLEMENTED();
1460 case kRoundToPlusInf:
1461 m = Condition(6);
1462 break;
1463 case kRoundToMinusInf:
1464 m = Condition(7);
1465 break;
1466 default:
1467 UNIMPLEMENTED();
1468 }
1469 clgdbr(m, Condition(0), dst, double_input);
1470}
1471
1473 const Register dst, const DoubleRegister double_input,
1475 Condition m = Condition(0);
1476 switch (rounding_mode) {
1477 case kRoundToZero:
1478 m = Condition(5);
1479 break;
1480 case kRoundToNearest:
1481 UNIMPLEMENTED();
1482 case kRoundToPlusInf:
1483 m = Condition(6);
1484 break;
1485 case kRoundToMinusInf:
1486 m = Condition(7);
1487 break;
1488 default:
1489 UNIMPLEMENTED();
1490 }
1491 lghi(dst, Operand::Zero());
1492 clfdbr(m, Condition(0), dst, double_input);
1493}
1494
1495void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
1496 lgdr(dst, src);
1497}
1498
1499void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
1500 ldgr(dst, src);
1501}
1502
1503void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
1504 int prologue_offset) {
1505 {
1506 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1507 mov(r1, Operand(StackFrame::TypeToMarker(type)));
1508 PushCommonFrame(r1);
1509 }
1510}
1511
1512void MacroAssembler::Prologue(Register base, int prologue_offset) {
1513 DCHECK(base != no_reg);
1515}
1516
1517void MacroAssembler::DropArguments(Register count) {
1518 ShiftLeftU64(ip, count, Operand(kSystemPointerSizeLog2));
1519 lay(sp, MemOperand(sp, ip));
1520}
1521
1523 Register receiver) {
1524 DCHECK(!AreAliased(argc, receiver));
1525 DropArguments(argc);
1526 push(receiver);
1527}
1528
1530 bool load_constant_pool_pointer_reg) {
1531 ASM_CODE_COMMENT(this);
1532 // We create a stack frame with:
1533 // Return Addr <-- old sp
1534 // Old FP <-- new fp
1535 // CP
1536 // type
1537 // CodeObject <-- new sp
1538
1539 Register scratch = no_reg;
1540 if (!StackFrame::IsJavaScript(type)) {
1541 scratch = ip;
1542 mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1543 }
1544 PushCommonFrame(scratch);
1545#if V8_ENABLE_WEBASSEMBLY
1546 if (type == StackFrame::WASM) Push(kWasmImplicitArgRegister);
1547#endif // V8_ENABLE_WEBASSEMBLY
1548}
1549
1550int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
1551 ASM_CODE_COMMENT(this);
1552 // Drop the execution stack down to the frame pointer and restore
1553 // the caller frame pointer, return address and constant pool pointer.
1555 if (is_int20(StandardFrameConstants::kCallerSPOffset + stack_adjustment)) {
1557 stack_adjustment));
1558 } else {
1559 AddS64(r1, fp,
1560 Operand(StandardFrameConstants::kCallerSPOffset + stack_adjustment));
1561 }
1563 mov(sp, r1);
1564 int frame_ends = pc_offset();
1565 return frame_ends;
1566}
1567
1568// ExitFrame layout (probably wrongish.. needs updating)
1569//
1570// SP -> previousSP
1571// LK reserved
1572// sp_on_exit (for debug?)
1573// oldSP->prev SP
1574// LK
1575// <parameters on stack>
1576
1577// Prior to calling EnterExitFrame, we've got a bunch of parameters
1578// on the stack that we need to wrap a real frame around.. so first
1579// we reserve a slot for LK and push the previous SP which is captured
1580// in the fp register (r11)
1581// Then - we buy a new frame
1582
1583// r14
1584// oldFP <- newFP
1585// SP
1586// Floats
1587// gaps
1588// Args
1589// ABIRes <- newSP
1590void MacroAssembler::EnterExitFrame(Register scratch, int stack_space,
1591 StackFrame::Type frame_type) {
1592 DCHECK(frame_type == StackFrame::EXIT ||
1593 frame_type == StackFrame::BUILTIN_EXIT ||
1594 frame_type == StackFrame::API_ACCESSOR_EXIT ||
1595 frame_type == StackFrame::API_CALLBACK_EXIT);
1596
1597 // Set up the frame structure on the stack.
1601
1602 using ER = ExternalReference;
1603
1604 // This is an opportunity to build a frame to wrap
1605 // all of the pushes that have happened inside of V8
1606 // since we were called from C code
1607 mov(r1, Operand(StackFrame::TypeToMarker(frame_type)));
1608 PushCommonFrame(r1);
1609 // Reserve room for saved entry sp.
1611
1612 if (v8_flags.debug_code) {
1614 r1);
1615 }
1616
1617 // Save the frame pointer and the context in top.
1618 ER c_entry_fp_address =
1619 ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
1620 StoreU64(fp, ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
1621
1622 ER context_address = ER::Create(IsolateAddressId::kContextAddress, isolate());
1623 StoreU64(cp, ExternalReferenceAsOperand(context_address, no_reg));
1624
1625 lay(sp, MemOperand(sp, -(stack_space + 1) * kSystemPointerSize));
1626
1627 // Allocate and align the frame preparing for calling the runtime
1628 // function.
1629 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1630 if (frame_alignment > 0) {
1631 DCHECK_EQ(frame_alignment, 8);
1632 ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
1633 }
1634
1636 StoreU64(MemOperand(sp), Operand::Zero(), r0);
1637 // Set the exit frame sp value to point just before the return address
1638 // location.
1641}
1642
1644#if !defined(USE_SIMULATOR)
1645 // Running on the real platform. Use the alignment as mandated by the local
1646 // environment.
1647 // Note: This will break if we ever start generating snapshots on one S390
1648 // platform for another S390 platform with a different alignment.
1650#else // Simulated
1651 // If we are using the simulator then we should always align to the expected
1652 // alignment. As the simulator is used to generate snapshots we do not know
1653 // if the target platform will need alignment, so this is controlled from a
1654 // flag.
1655 return v8_flags.sim_stack_alignment;
1656#endif
1657}
1658
1659void MacroAssembler::LeaveExitFrame(Register scratch) {
1660 using ER = ExternalReference;
1661
1662 // Restore current context from top and clear it in debug mode.
1663 ER context_address = ER::Create(IsolateAddressId::kContextAddress, isolate());
1664 LoadU64(cp, ExternalReferenceAsOperand(context_address, no_reg));
1665
1666#ifdef DEBUG
1667 mov(scratch, Operand(Context::kInvalidContext));
1668 StoreU64(scratch, ExternalReferenceAsOperand(context_address, no_reg));
1669#endif
1670
1671 // Clear the top frame.
1672 ER c_entry_fp_address =
1673 ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
1674 mov(scratch, Operand::Zero());
1675 StoreU64(scratch, ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
1676
1677 // Tear down the exit frame, pop the arguments, and return.
1678 LeaveFrame(StackFrame::EXIT);
1679}
1680
1682 Move(dst, d0);
1683}
1684
1686 Move(dst, d0);
1687}
1688
1691 Isolate* isolate = this->isolate();
1692 ExternalReference limit =
1694 ? ExternalReference::address_of_real_jslimit(isolate)
1695 : ExternalReference::address_of_jslimit(isolate);
1697
1698 intptr_t offset =
1700 CHECK(is_int32(offset));
1702}
1703
1704void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
1705 Label* stack_overflow) {
1706 // Check the stack for overflow. We are not trying to catch
1707 // interruptions (e.g. debug break and preemption) here, so the "real stack
1708 // limit" is checked.
1710 // Make scratch the space we have left. The stack might already be overflowed
1711 // here which will cause scratch to become negative.
1712 SubS64(scratch, sp, scratch);
1713 // Check if the arguments will overflow the stack.
1714 ShiftLeftU64(r0, num_args, Operand(kSystemPointerSizeLog2));
1715 CmpS64(scratch, r0);
1716 ble(stack_overflow); // Signed comparison.
1717}
1718
1719void MacroAssembler::InvokePrologue(Register expected_parameter_count,
1720 Register actual_parameter_count,
1721 InvokeType type) {
1722 Label regular_invoke;
1723
1724 // r2: actual arguments count
1725 // r3: function (passed through to callee)
1726 // r4: expected arguments count
1727
1728 DCHECK_EQ(actual_parameter_count, r2);
1729 DCHECK_EQ(expected_parameter_count, r4);
1730
1731 // If overapplication or if the actual argument count is equal to the
1732 // formal parameter count, no need to push extra undefined values.
1733 SubS64(expected_parameter_count, expected_parameter_count,
1734 actual_parameter_count);
1735 ble(&regular_invoke);
1736
1737 Label stack_overflow;
1738 Register scratch = r6;
1739 StackOverflowCheck(expected_parameter_count, scratch, &stack_overflow);
1740
1741 // Underapplication. Move the arguments already in the stack, including the
1742 // receiver and the return address.
1743 {
1744 Label copy, check;
1745 Register num = r7, src = r8, dest = ip; // r7 and r8 are context and root.
1746 mov(src, sp);
1747 // Update stack pointer.
1748 ShiftLeftU64(scratch, expected_parameter_count,
1749 Operand(kSystemPointerSizeLog2));
1750 SubS64(sp, sp, scratch);
1751 mov(dest, sp);
1752 ltgr(num, actual_parameter_count);
1753 b(&check);
1754 bind(&copy);
1755 LoadU64(r0, MemOperand(src));
1756 lay(src, MemOperand(src, kSystemPointerSize));
1757 StoreU64(r0, MemOperand(dest));
1758 lay(dest, MemOperand(dest, kSystemPointerSize));
1759 SubS64(num, num, Operand(1));
1760 bind(&check);
1761 b(gt, &copy);
1762 }
1763
1764 // Fill remaining expected arguments with undefined values.
1765 LoadRoot(scratch, RootIndex::kUndefinedValue);
1766 {
1767 Label loop;
1768 bind(&loop);
1769 StoreU64(scratch, MemOperand(ip));
1770 lay(ip, MemOperand(ip, kSystemPointerSize));
1771 SubS64(expected_parameter_count, expected_parameter_count, Operand(1));
1772 bgt(&loop);
1773 }
1774 b(&regular_invoke);
1775
1776 bind(&stack_overflow);
1777 {
1778 FrameScope frame(
1779 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
1780 CallRuntime(Runtime::kThrowStackOverflow);
1781 bkpt(0);
1782 }
1783
1784 bind(&regular_invoke);
1785}
1786
1787void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1788 Register expected_parameter_count,
1789 Register actual_parameter_count) {
1790 Label skip_hook;
1791
1792 ExternalReference debug_hook_active =
1793 ExternalReference::debug_hook_on_function_call_address(isolate());
1794 Move(r6, debug_hook_active);
1795 tm(MemOperand(r6), Operand(0xFF));
1796 beq(&skip_hook);
1797
1798 {
1799 // Load receiver to pass it later to DebugOnFunctionCall hook.
1800 LoadReceiver(r6);
1801 FrameScope frame(
1802 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
1803
1804 SmiTag(expected_parameter_count);
1805 Push(expected_parameter_count);
1806
1807 SmiTag(actual_parameter_count);
1808 Push(actual_parameter_count);
1809
1810 if (new_target.is_valid()) {
1812 }
1813 Push(fun, fun, r6);
1814 CallRuntime(Runtime::kDebugOnFunctionCall);
1815 Pop(fun);
1816 if (new_target.is_valid()) {
1817 Pop(new_target);
1818 }
1819
1820 Pop(actual_parameter_count);
1821 SmiUntag(actual_parameter_count);
1822
1823 Pop(expected_parameter_count);
1824 SmiUntag(expected_parameter_count);
1825 }
1826 bind(&skip_hook);
1827}
1828
1829void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1830 Register expected_parameter_count,
1831 Register actual_parameter_count,
1832 InvokeType type) {
1833 // You can't call a function without a valid frame.
1835 DCHECK_EQ(function, r3);
1836 DCHECK_IMPLIES(new_target.is_valid(), new_target == r5);
1837
1838 // On function call, call into the debugger if necessary.
1839 CheckDebugHook(function, new_target, expected_parameter_count,
1840 actual_parameter_count);
1841
1842 // Clear the new.target register if not given.
1843 if (!new_target.is_valid()) {
1844 LoadRoot(r5, RootIndex::kUndefinedValue);
1845 }
1846
1847 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
1848 // We call indirectly through the code field in the function to
1849 // allow recompilation to take effect without changing any of the
1850 // call sites.
1851 constexpr int unused_argument_count = 0;
1852 switch (type) {
1853 case InvokeType::kCall:
1854 CallJSFunction(function, unused_argument_count);
1855 break;
1856 case InvokeType::kJump:
1857 JumpJSFunction(function);
1858 break;
1859 }
1860}
1861
1863 Register fun, Register new_target, Register actual_parameter_count,
1864 InvokeType type) {
1865 // You can't call a function without a valid frame.
1867
1868 // Contract with called JS functions requires that function is passed in r3.
1869 DCHECK_EQ(fun, r3);
1870
1871 Register expected_reg = r4;
1872 Register temp_reg = r6;
1873 LoadTaggedField(cp, FieldMemOperand(fun, JSFunction::kContextOffset));
1874 LoadTaggedField(temp_reg,
1875 FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
1876 LoadU16(expected_reg,
1877 FieldMemOperand(temp_reg,
1878 SharedFunctionInfo::kFormalParameterCountOffset));
1879
1880 InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
1881 type);
1882}
1883
1884void MacroAssembler::InvokeFunction(Register function,
1885 Register expected_parameter_count,
1886 Register actual_parameter_count,
1887 InvokeType type) {
1888 // You can't call a function without a valid frame.
1890
1891 // Contract with called JS functions requires that function is passed in r3.
1892 DCHECK_EQ(function, r3);
1893
1894 // Get the function and setup the context.
1895 LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
1896
1897 InvokeFunctionCode(r3, no_reg, expected_parameter_count,
1898 actual_parameter_count, type);
1899}
1900
1902 // Adjust this code if not the case.
1905
1906 // Link the current handler as the next handler.
1907 Move(r7,
1908 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1909
1910 // Buy the full stack frame for 5 slots.
1912
1913 // Store padding.
1914 lghi(r0, Operand::Zero());
1915 StoreU64(r0, MemOperand(sp)); // Padding.
1916
1917 // Copy the old handler into the next handler slot.
1919 Operand(kSystemPointerSize));
1920 // Set this new handler as the current one.
1921 StoreU64(sp, MemOperand(r7));
1922}
1923
1926 static_assert(StackHandlerConstants::kNextOffset == 0);
1927
1928 // Pop the Next Handler into r3 and store it into Handler Address reference.
1929 Pop(r3);
1930 Move(ip,
1931 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1932 StoreU64(r3, MemOperand(ip));
1933
1934 Drop(1); // Drop padding.
1935}
1936
1937void MacroAssembler::IsObjectType(Register object, Register scratch1,
1938 Register scratch2, InstanceType type) {
1939 ASM_CODE_COMMENT(this);
1940 CompareObjectType(object, scratch1, scratch2, type);
1941}
1942
1943void MacroAssembler::CompareObjectTypeRange(Register object, Register map,
1944 Register type_reg, Register scratch,
1945 InstanceType lower_limit,
1946 InstanceType upper_limit) {
1947 ASM_CODE_COMMENT(this);
1948 LoadMap(map, object);
1949 CompareInstanceTypeRange(map, type_reg, scratch, lower_limit, upper_limit);
1950}
1951
1952void MacroAssembler::CompareRange(Register value, Register scratch,
1953 unsigned lower_limit, unsigned higher_limit) {
1954 ASM_CODE_COMMENT(this);
1955 DCHECK_LT(lower_limit, higher_limit);
1956 if (lower_limit != 0) {
1957 mov(scratch, value);
1958 slgfi(scratch, Operand(lower_limit));
1959 CmpU64(scratch, Operand(higher_limit - lower_limit));
1960 } else {
1961 CmpU64(value, Operand(higher_limit));
1962 }
1963}
1964
1965void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
1966 Register scratch,
1967 InstanceType lower_limit,
1968 InstanceType higher_limit) {
1969 DCHECK_LT(lower_limit, higher_limit);
1970 LoadU16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1971 CompareRange(type_reg, scratch, lower_limit, higher_limit);
1972}
1973
1974void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
1977 // Some smi roots contain system pointer size values like stack limits.
1978 LoadRoot(r0, index);
1979 CmpU64(obj, r0);
1980 return;
1981 }
1982 return CompareTaggedRoot(obj, index);
1983}
1984
1985void MacroAssembler::CompareTaggedRoot(Register obj, RootIndex index) {
1986 if (CanBeImmediate(index)) {
1987 CompareTagged(obj, Operand(ReadOnlyRootPtr(index)));
1988 return;
1989 }
1991#ifdef V8_TARGET_BIG_ENDIAN
1993#endif
1995}
1996
1997void MacroAssembler::JumpIfIsInRange(Register value, Register scratch,
1998 unsigned lower_limit,
1999 unsigned higher_limit,
2000 Label* on_in_range) {
2001 CompareRange(value, scratch, lower_limit, higher_limit);
2002 ble(on_in_range);
2003}
2004
2005void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
2006 Register result,
2007 DoubleRegister double_input,
2008 StubCallMode stub_mode) {
2009 Label done;
2010
2011 TryInlineTruncateDoubleToI(result, double_input, &done);
2012
2013 // If we fell through then inline version didn't succeed - call stub instead.
2014 push(r14);
2015 // Put input on stack.
2016 lay(sp, MemOperand(sp, -kDoubleSize));
2017 StoreF64(double_input, MemOperand(sp));
2018
2019#if V8_ENABLE_WEBASSEMBLY
2020 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
2021 Call(static_cast<Address>(Builtin::kDoubleToI), RelocInfo::WASM_STUB_CALL);
2022#else
2023 // For balance.
2024 if (false) {
2025#endif // V8_ENABLE_WEBASSEMBLY
2026 } else {
2027 CallBuiltin(Builtin::kDoubleToI);
2028 }
2029
2030 LoadU64(result, MemOperand(sp, 0));
2031 la(sp, MemOperand(sp, kDoubleSize));
2032 pop(r14);
2033
2034 bind(&done);
2035}
2036
2038 DoubleRegister double_input,
2039 Label* done) {
2040 ConvertDoubleToInt64(result, double_input);
2041
2042 // Test for overflow
2044 beq(done);
2045}
2046
2047namespace {
2048
2049#ifndef V8_ENABLE_LEAPTIERING
2050
2051void TailCallOptimizedCodeSlot(MacroAssembler* masm,
2052 Register optimized_code_entry,
2053 Register scratch) {
2054 // ----------- S t a t e -------------
2055 // -- r2 : actual argument count
2056 // -- r5 : new target (preserved for callee if needed, and caller)
2057 // -- r3 : target function (preserved for callee if needed, and caller)
2058 // -----------------------------------
2059 DCHECK(!AreAliased(r3, r5, optimized_code_entry, scratch));
2060
2061 Register closure = r3;
2062 Label heal_optimized_code_slot;
2063
2064 // If the optimized code is cleared, go to runtime to update the optimization
2065 // marker field.
2066 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
2067 &heal_optimized_code_slot);
2068
2069 // The entry references a CodeWrapper object. Unwrap it now.
2071 optimized_code_entry,
2072 FieldMemOperand(optimized_code_entry, CodeWrapper::kCodeOffset));
2073
2074 // Check if the optimized code is marked for deopt. If it is, call the
2075 // runtime to clear it.
2076 {
2077 __ TestCodeIsMarkedForDeoptimization(optimized_code_entry, scratch);
2078 __ bne(&heal_optimized_code_slot);
2079 }
2080
2081 // Optimized code is good, get it into the closure and link the closure
2082 // into the optimized functions list, then tail call the optimized code.
2083 __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure, scratch,
2084 r7);
2085 static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
2086 __ LoadCodeInstructionStart(r4, optimized_code_entry);
2087 __ Jump(r4);
2088
2089 // Optimized code slot contains deoptimized code or code is cleared and
2090 // optimized code marker isn't updated. Evict the code, update the marker
2091 // and re-enter the closure's code.
2092 __ bind(&heal_optimized_code_slot);
2093 __ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
2094}
2095
2096#endif // V8_ENABLE_LEAPTIERING
2097
2098} // namespace
2099
2100#ifdef V8_ENABLE_DEBUG_CODE
2101void MacroAssembler::AssertFeedbackCell(Register object, Register scratch) {
2102 if (v8_flags.debug_code) {
2103 IsObjectType(object, scratch, scratch, FEEDBACK_CELL_TYPE);
2104 Assert(eq, AbortReason::kExpectedFeedbackCell);
2105 }
2106}
2107void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
2108 if (v8_flags.debug_code) {
2109 IsObjectType(object, scratch, scratch, FEEDBACK_VECTOR_TYPE);
2110 Assert(eq, AbortReason::kExpectedFeedbackVector);
2111 }
2112}
2113void MacroAssembler::AssertFeedbackVector(Register object) {
2114 if (v8_flags.debug_code) {
2115 UseScratchRegisterScope temps(this);
2116 Register scratch = temps.Acquire();
2117 CompareObjectType(object, scratch, scratch, FEEDBACK_VECTOR_TYPE);
2118 Assert(eq, AbortReason::kExpectedFeedbackVector);
2119 }
2120}
2121#endif // V8_ENABLE_DEBUG_CODE
2122
2123// Optimized code is good, get it into the closure and link the closure
2124// into the optimized functions list, then tail call the optimized code.
2126 Register optimized_code, Register closure, Register scratch1,
2127 Register slot_address) {
2128#ifdef V8_ENABLE_LEAPTIERING
2129 UNREACHABLE();
2130#else
2131 DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
2133 DCHECK(!AreAliased(optimized_code, closure));
2134 // Store code entry in the closure.
2135 StoreTaggedField(optimized_code,
2136 FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
2137 // Write barrier clobbers scratch1 below.
2138 Register value = scratch1;
2139 mov(value, optimized_code);
2140
2141 RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
2144#endif // V8_ENABLE_LEAPTIERING
2145}
2146
2148 Runtime::FunctionId function_id) {
2149 // ----------- S t a t e -------------
2150 // -- r2 : actual argument count
2151 // -- r3 : target function (preserved for callee)
2152 // -- r5 : new target (preserved for callee)
2153 // -----------------------------------
2154 {
2155 FrameAndConstantPoolScope scope(this, StackFrame::INTERNAL);
2156 // Push a copy of the target function, the new target and the actual
2157 // argument count.
2158 // Push function as parameter to the runtime call.
2162
2163 CallRuntime(function_id, 1);
2164 mov(r4, r2);
2165
2166 // Restore target function, new target and actual argument count.
2170 }
2171 static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
2172 JumpCodeObject(r4);
2173}
2174
2175#ifndef V8_ENABLE_LEAPTIERING
2176
2177// Read off the flags in the feedback vector and check if there
2178// is optimized code or a tiering state that needs to be processed.
2180 Register flags, Register feedback_vector, CodeKind current_code_kind) {
2181 ASM_CODE_COMMENT(this);
2182 DCHECK(!AreAliased(flags, feedback_vector));
2183 DCHECK(CodeKindCanTierUp(current_code_kind));
2184 LoadU16(flags,
2185 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
2189 if (current_code_kind != CodeKind::MAGLEV) {
2191 }
2192 CHECK(is_uint16(kFlagsMask));
2193 tmll(flags, Operand(kFlagsMask));
2194 return Condition(7);
2195}
2196
2197// Read off the flags in the feedback vector and check if there
2198// is optimized code or a tiering state that needs to be processed.
2200 Register flags, Register feedback_vector, CodeKind current_code_kind,
2201 Label* flags_need_processing) {
2202 ASM_CODE_COMMENT(this);
2204 current_code_kind),
2205 flags_need_processing);
2206}
2207
2209 Register flags, Register feedback_vector) {
2210 DCHECK(!AreAliased(flags, feedback_vector));
2211 Label maybe_has_optimized_code, maybe_needs_logging;
2212 // Check if optimized code is available
2214 beq(&maybe_needs_logging);
2215
2216 GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
2217
2218 bind(&maybe_needs_logging);
2219 TestBitMask(flags, FeedbackVector::LogNextExecutionBit::kMask, r0);
2220 beq(&maybe_has_optimized_code);
2221 GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution);
2222
2223 bind(&maybe_has_optimized_code);
2224 Register optimized_code_entry = flags;
2225 LoadTaggedField(optimized_code_entry,
2226 FieldMemOperand(feedback_vector,
2227 FeedbackVector::kMaybeOptimizedCodeOffset));
2228 TailCallOptimizedCodeSlot(this, optimized_code_entry, r1);
2229}
2230
2231#endif // !V8_ENABLE_LEAPTIERING
2232
2233void MacroAssembler::CallRuntime(const Runtime::Function* f,
2234 int num_arguments) {
2235 // All parameters are on the stack. r2 has the return value after call.
2236
2237 // If the expected number of arguments of the runtime function is
2238 // constant, we check that the actual number of arguments match the
2239 // expectation.
2240 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2241
2242 // TODO(1236192): Most runtime routines don't need the number of
2243 // arguments passed in because it is constant. At some point we
2244 // should remove this need and make the runtime routine entry code
2245 // smarter.
2246 mov(r2, Operand(num_arguments));
2248 bool switch_to_central_stack = options().is_wasm;
2249 CallBuiltin(Builtins::RuntimeCEntry(f->result_size, switch_to_central_stack));
2250}
2251
2253 const Runtime::Function* function = Runtime::FunctionForId(fid);
2254 DCHECK_EQ(1, function->result_size);
2255 if (function->nargs >= 0) {
2256 mov(r2, Operand(function->nargs));
2257 }
2259}
2260
2261void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
2262 bool builtin_exit_frame) {
2263 Move(r3, builtin);
2264 TailCallBuiltin(Builtins::CEntry(1, ArgvMode::kStack, builtin_exit_frame));
2265}
2266
2267void MacroAssembler::LoadWeakValue(Register out, Register in,
2268 Label* target_if_cleared) {
2270 beq(target_if_cleared);
2271
2272 AndP(out, in, Operand(~kWeakHeapObjectMask));
2273}
2274
2275void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
2276 Register scratch1,
2277 Register scratch2) {
2278 DCHECK(value > 0 && is_int8(value));
2279 if (v8_flags.native_code_counters && counter->Enabled()) {
2280 Move(scratch2, ExternalReference::Create(counter));
2281 // @TODO(john.yan): can be optimized by asi()
2282 LoadS32(scratch1, MemOperand(scratch2));
2283 AddS64(scratch1, Operand(value));
2284 StoreU32(scratch1, MemOperand(scratch2));
2285 }
2286}
2287
2288void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
2289 Register scratch1,
2290 Register scratch2) {
2291 DCHECK(value > 0 && is_int8(value));
2292 if (v8_flags.native_code_counters && counter->Enabled()) {
2293 Move(scratch2, ExternalReference::Create(counter));
2294 // @TODO(john.yan): can be optimized by asi()
2295 LoadS32(scratch1, MemOperand(scratch2));
2296 AddS64(scratch1, Operand(-value));
2297 StoreU32(scratch1, MemOperand(scratch2));
2298 }
2299}
2300
2301void MacroAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
2302 Label L;
2303 b(to_condition(cond), &L);
2304 Abort(reason);
2305 // will not return here
2306 bind(&L);
2307}
2308
2310 ASM_CODE_COMMENT(this);
2311 if (v8_flags.code_comments) {
2312 RecordComment("Abort message:", SourceLocation{});
2313 RecordComment(GetAbortReason(reason), SourceLocation{});
2314 }
2315
2316 // Without debug code, save the code size and just trap.
2317 if (!v8_flags.debug_code || v8_flags.trap_on_abort) {
2318 stop();
2319 return;
2320 }
2321
2322 if (should_abort_hard()) {
2323 // We don't care if we constructed a frame. Just pretend we did.
2324 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
2325 lgfi(r2, Operand(static_cast<int>(reason)));
2326 PrepareCallCFunction(1, 0, r3);
2327#if V8_OS_ZOS
2328 CallCFunction(ExternalReference::abort_with_reason(), 1, 0);
2329#else
2330 Move(r3, ExternalReference::abort_with_reason());
2331 // Use Call directly to avoid any unneeded overhead. The function won't
2332 // return anyway.
2333 Call(r3);
2334#endif
2335 return;
2336 }
2337
2338 LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(reason)));
2339
2340 {
2341 // We don't actually want to generate a pile of code for this, so just
2342 // claim there is a stack frame, without generating one.
2343 FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
2344 if (root_array_available()) {
2345 // Generate an indirect call via builtins entry table here in order to
2346 // ensure that the interpreter_entry_return_pc_offset is the same for
2347 // InterpreterEntryTrampoline and InterpreterEntryTrampolineForProfiling
2348 // when v8_flags.debug_code is enabled.
2349 LoadEntryFromBuiltin(Builtin::kAbort, ip);
2350 Call(ip);
2351 } else {
2352 CallBuiltin(Builtin::kAbort);
2353 }
2354 }
2355 // will not return here
2356}
2357
2358void MacroAssembler::LoadCompressedMap(Register destination, Register object) {
2361}
2362
2363void MacroAssembler::LoadMap(Register destination, Register object) {
2365}
2366
2367void MacroAssembler::LoadFeedbackVector(Register dst, Register closure,
2368 Register scratch, Label* fbv_undef) {
2369 Label done;
2370
2371 // Load the feedback vector from the closure.
2372 LoadTaggedField(dst,
2373 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
2374 LoadTaggedField(dst, FieldMemOperand(dst, FeedbackCell::kValueOffset));
2375
2376 // Check if feedback vector is valid.
2377 IsObjectType(dst, scratch, scratch, FEEDBACK_VECTOR_TYPE);
2378 b(eq, &done);
2379
2380 // Not valid, load undefined.
2381 LoadRoot(dst, RootIndex::kUndefinedValue);
2382 b(fbv_undef);
2383
2384 bind(&done);
2385}
2386
2387void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
2388 LoadMap(dst, cp);
2390 dst, FieldMemOperand(
2391 dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
2393}
2394
2395#ifdef V8_ENABLE_DEBUG_CODE
2396void MacroAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
2397 if (v8_flags.debug_code) Check(cond, reason, cr);
2398}
2399
2401 if (v8_flags.debug_code) Abort(reason);
2402}
2403
2404void MacroAssembler::AssertZeroExtended(Register int32_register) {
2405 if (!v8_flags.slow_debug_code) return;
2406 ASM_CODE_COMMENT(this);
2407 mov(r0, Operand(kMaxUInt32));
2408 CmpS64(int32_register, r0);
2409 Check(le, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
2410}
2411
2412void MacroAssembler::AssertMap(Register object) {
2413 if (!v8_flags.debug_code) return;
2414 ASM_CODE_COMMENT(this);
2415 TestIfSmi(object);
2416 Check(ne, AbortReason::kOperandIsNotAMap);
2417 Push(object);
2418 LoadMap(object, object);
2419 CompareInstanceType(object, object, MAP_TYPE);
2420 Pop(object);
2421 Check(eq, AbortReason::kOperandIsNotAMap);
2422}
2423
2424void MacroAssembler::AssertNotSmi(Register object) {
2425 if (v8_flags.debug_code) {
2426 static_assert(kSmiTag == 0);
2427 TestIfSmi(object);
2428 Check(ne, AbortReason::kOperandIsASmi, cr0);
2429 }
2430}
2431
2432void MacroAssembler::AssertSmi(Register object) {
2433 if (v8_flags.debug_code) {
2434 static_assert(kSmiTag == 0);
2435 TestIfSmi(object);
2436 Check(eq, AbortReason::kOperandIsNotASmi, cr0);
2437 }
2438}
2439
2440void MacroAssembler::AssertConstructor(Register object, Register scratch) {
2441 if (v8_flags.debug_code) {
2442 static_assert(kSmiTag == 0);
2443 TestIfSmi(object);
2444 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
2445 LoadMap(scratch, object);
2446 tm(FieldMemOperand(scratch, Map::kBitFieldOffset),
2447 Operand(Map::Bits1::IsConstructorBit::kMask));
2448 Check(ne, AbortReason::kOperandIsNotAConstructor);
2449 }
2450}
2451
2452void MacroAssembler::AssertFunction(Register object) {
2453 if (v8_flags.debug_code) {
2454 static_assert(kSmiTag == 0);
2455 TestIfSmi(object);
2456 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
2457 push(object);
2458 LoadMap(object, object);
2459 CompareInstanceTypeRange(object, object, object, FIRST_JS_FUNCTION_TYPE,
2460 LAST_JS_FUNCTION_TYPE);
2461 pop(object);
2462 Check(le, AbortReason::kOperandIsNotAFunction);
2463 }
2464}
2465
2466void MacroAssembler::AssertCallableFunction(Register object) {
2467 if (!v8_flags.debug_code) return;
2468 ASM_CODE_COMMENT(this);
2469 static_assert(kSmiTag == 0);
2470 TestIfSmi(object);
2471 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
2472 push(object);
2473 LoadMap(object, object);
2474 CompareInstanceTypeRange(object, object, object,
2477 pop(object);
2478 Check(le, AbortReason::kOperandIsNotACallableFunction);
2479}
2480
2481void MacroAssembler::AssertBoundFunction(Register object) {
2482 if (v8_flags.debug_code) {
2483 static_assert(kSmiTag == 0);
2484 TestIfSmi(object);
2485 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
2486 push(object);
2487 IsObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2488 pop(object);
2489 Check(eq, AbortReason::kOperandIsNotABoundFunction);
2490 }
2491}
2492
2493void MacroAssembler::AssertGeneratorObject(Register object) {
2494 if (!v8_flags.debug_code) return;
2495 TestIfSmi(object);
2496 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
2497
2498 // Load map
2499 Register map = object;
2500 push(object);
2501 LoadMap(map, object);
2502
2503 // Check if JSGeneratorObject
2504 Register scratch = object;
2505 CompareInstanceTypeRange(map, scratch, scratch,
2506 FIRST_JS_GENERATOR_OBJECT_TYPE,
2507 LAST_JS_GENERATOR_OBJECT_TYPE);
2508 // Restore generator object to register and perform assertion
2509 pop(object);
2510 Check(le, AbortReason::kOperandIsNotAGeneratorObject);
2511}
2512
2514 Register scratch) {
2515 if (v8_flags.debug_code) {
2516 Label done_checking;
2517 AssertNotSmi(object);
2518 CompareRoot(object, RootIndex::kUndefinedValue);
2519 beq(&done_checking, Label::kNear);
2520 LoadMap(scratch, object);
2521 CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
2522 Assert(eq, AbortReason::kExpectedUndefinedOrCell);
2523 bind(&done_checking);
2524 }
2525}
2526
2527void MacroAssembler::AssertJSAny(Register object, Register map_tmp,
2528 Register tmp, AbortReason abort_reason) {
2529 if (!v8_flags.debug_code) return;
2530
2531 ASM_CODE_COMMENT(this);
2532 DCHECK(!AreAliased(object, map_tmp, tmp));
2533 Label ok;
2534
2535 JumpIfSmi(object, &ok);
2536
2537 LoadMap(map_tmp, object);
2538 CompareInstanceType(map_tmp, tmp, LAST_NAME_TYPE);
2539 ble(&ok);
2540
2541 CompareInstanceType(map_tmp, tmp, FIRST_JS_RECEIVER_TYPE);
2542 bge(&ok);
2543
2544 CompareRoot(map_tmp, RootIndex::kHeapNumberMap);
2545 beq(&ok);
2546
2547 CompareRoot(map_tmp, RootIndex::kBigIntMap);
2548 beq(&ok);
2549
2550 CompareRoot(object, RootIndex::kUndefinedValue);
2551 beq(&ok);
2552
2553 CompareRoot(object, RootIndex::kTrueValue);
2554 beq(&ok);
2555
2556 CompareRoot(object, RootIndex::kFalseValue);
2557 beq(&ok);
2558
2559 CompareRoot(object, RootIndex::kNullValue);
2560 beq(&ok);
2561
2562 Abort(abort_reason);
2563
2564 bind(&ok);
2565}
2566
2567#endif // V8_ENABLE_DEBUG_CODE
2568
2569int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
2570 int num_double_arguments) {
2571 int stack_passed_words = 0;
2572 if (num_double_arguments > DoubleRegister::kNumRegisters) {
2573 stack_passed_words +=
2574 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
2575 }
2576 // Up to five simple arguments are passed in registers r2..r6
2577 if (num_reg_arguments > kRegisterPassedArguments) {
2578 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2579 }
2580 return stack_passed_words;
2581}
2582
2583void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2584 int num_double_arguments,
2585 Register scratch) {
2586 int frame_alignment = ActivationFrameAlignment();
2587 int stack_passed_arguments =
2588 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2589 int stack_space = kNumRequiredStackFrameSlots;
2590 if (frame_alignment > kSystemPointerSize) {
2591 // Make stack end at alignment and make room for stack arguments
2592 // -- preserving original value of sp.
2593 mov(scratch, sp);
2594 lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kSystemPointerSize));
2595 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2596 ClearRightImm(sp, sp,
2597 Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
2598 StoreU64(scratch,
2599 MemOperand(sp, (stack_passed_arguments)*kSystemPointerSize));
2600 } else {
2601 stack_space += stack_passed_arguments;
2602 }
2603 lay(sp, MemOperand(sp, (-stack_space) * kSystemPointerSize));
2604}
2605
2606void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2607 Register scratch) {
2608 PrepareCallCFunction(num_reg_arguments, 0, scratch);
2609}
2610
2612
2614
2616 DoubleRegister src2) {
2617 if (src2 == d0) {
2618 DCHECK(src1 != d2);
2619 Move(d2, src2);
2620 Move(d0, src1);
2621 } else {
2622 Move(d0, src1);
2623 Move(d2, src2);
2624 }
2625}
2626
2627int MacroAssembler::CallCFunction(ExternalReference function,
2628 int num_reg_arguments,
2629 int num_double_arguments,
2630 SetIsolateDataSlots set_isolate_data_slots,
2631 bool has_function_descriptor,
2632 Label* return_label) {
2633 Move(ip, function);
2634 return CallCFunction(ip, num_reg_arguments, num_double_arguments,
2635 set_isolate_data_slots, has_function_descriptor,
2636 return_label);
2637}
2638
2639int MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
2640 int num_double_arguments,
2641 SetIsolateDataSlots set_isolate_data_slots,
2642 bool has_function_descriptor,
2643 Label* return_label) {
2644 ASM_CODE_COMMENT(this);
2645 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
2646 DCHECK(has_frame());
2647
2648#if V8_OS_ZOS
2649 // Shuffle input arguments
2650 mov(r1, r2);
2651 mov(r2, r3);
2652 mov(r3, r4);
2653
2654 // XPLINK treats r7 as voliatile return register, but r14 as preserved
2655 // Since Linux is the other way around, perserve r7 value in r14 across
2656 // the call.
2657 mov(r14, r7);
2658
2659 // XPLINK linkage requires args in r5,r6,r7,r8,r9 to be passed on the stack.
2660 // However, for DirectAPI C calls, there may not be stack slots
2661 // for these 4th and 5th parameters if num_reg_arguments are less
2662 // than 3. In that case, we need to still preserve r5/r6 into
2663 // register save area, as they are considered volatile in XPLINK.
2664 if (num_reg_arguments == 4) {
2665 StoreU64(r5, MemOperand(sp, 19 * kSystemPointerSize));
2667 } else if (num_reg_arguments >= 5) {
2668 // Save original r5 - r6 to Stack, r7 - r9 already saved to Stack
2669 StoreMultipleP(r5, r6, MemOperand(sp, 19 * kSystemPointerSize));
2670 } else {
2672 }
2673#endif
2674
2675 Label get_pc;
2676
2677 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
2678 // Save the frame pointer and PC so that the stack layout remains iterable,
2679 // even without an ExitFrame which normally exists between JS and C frames.
2680 // See x64 code for reasoning about how to address the isolate data fields.
2681 larl(r0, &get_pc);
2683 StoreU64(r0,
2684 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerPC));
2685 StoreU64(fp,
2686 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
2687 }
2688
2689#if V8_OS_ZOS
2690 // Set up the system stack pointer with the XPLINK bias.
2691 lay(r4, MemOperand(sp, -kStackPointerBias));
2692
2693 Register dest = function;
2694 if (has_function_descriptor) {
2695 LoadMultipleP(r5, r6, MemOperand(function));
2696 dest = r6;
2697 }
2698#else
2699 // Just call directly. The function called cannot cause a GC, or
2700 // allow preemption, so the return address in the link register
2701 // stays correct.
2702 Register dest = function;
2703 if (ABI_CALL_VIA_IP) {
2704 Move(ip, function);
2705 dest = ip;
2706 }
2707#endif
2708
2709#if V8_OS_ZOS
2710 if (has_function_descriptor) {
2711 // Branch to target via indirect branch
2712 basr(r7, dest);
2713 nop(BASR_CALL_TYPE_NOP);
2714 } else {
2715 basr(r7, dest);
2716 }
2717
2718 // Restore r5-r9 from the appropriate stack locations (see notes above).
2719 if (num_reg_arguments == 4) {
2720 LoadU64(r5, MemOperand(sp, 19 * kSystemPointerSize));
2722 } else if (num_reg_arguments >= 5) {
2723 LoadMultipleP(r5, r6, MemOperand(sp, 19 * kSystemPointerSize));
2724 } else {
2726 }
2727
2728 // Restore original r7
2729 mov(r7, r14);
2730
2731 // Shuffle the result
2732 mov(r2, r3);
2733#else
2734 Call(dest);
2735#endif
2736
2737 int call_pc_offset = pc_offset();
2738 bind(&get_pc);
2739 if (return_label) bind(return_label);
2740
2741 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
2742 // We don't unset the PC; the FP is the source of truth.
2743 Register zero_scratch = r0;
2744 lghi(zero_scratch, Operand::Zero());
2745
2746 StoreU64(zero_scratch,
2747 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
2748 }
2749
2750 int stack_passed_arguments =
2751 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2752 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
2754 // Load the original stack pointer (pre-alignment) from the stack
2755 LoadU64(sp, MemOperand(sp, stack_space * kSystemPointerSize));
2756 } else {
2757 la(sp, MemOperand(sp, stack_space * kSystemPointerSize));
2758 }
2759
2760 return call_pc_offset;
2761}
2762
2763int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments,
2764 SetIsolateDataSlots set_isolate_data_slots,
2765 bool has_function_descriptor,
2766 Label* return_label) {
2767 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
2768 has_function_descriptor, return_label);
2769}
2770
2771int MacroAssembler::CallCFunction(Register function, int num_arguments,
2772 SetIsolateDataSlots set_isolate_data_slots,
2773 bool has_function_descriptor,
2774 Label* return_label) {
2775 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
2776 has_function_descriptor, return_label);
2777}
2778
2780 Register object,
2781 Register scratch, // scratch may be same register as object
2782 int mask, Condition cc, Label* condition_met) {
2783 DCHECK(cc == ne || cc == eq);
2784 ClearRightImm(scratch, object, Operand(kPageSizeBits));
2785
2787 // If it's a power of two, we can use Test-Under-Mask Memory-Imm form
2788 // which allows testing of a single byte in memory.
2789 int32_t byte_offset = 4;
2790 uint32_t shifted_mask = mask;
2791 // Determine the byte offset to be tested
2792 if (mask <= 0x80) {
2793 byte_offset = kSystemPointerSize - 1;
2794 } else if (mask < 0x8000) {
2795 byte_offset = kSystemPointerSize - 2;
2796 shifted_mask = mask >> 8;
2797 } else if (mask < 0x800000) {
2798 byte_offset = kSystemPointerSize - 3;
2799 shifted_mask = mask >> 16;
2800 } else {
2801 byte_offset = kSystemPointerSize - 4;
2802 shifted_mask = mask >> 24;
2803 }
2804#if V8_TARGET_LITTLE_ENDIAN
2805 // Reverse the byte_offset if emulating on little endian platform
2806 byte_offset = kSystemPointerSize - byte_offset - 1;
2807#endif
2808 tm(MemOperand(scratch, MemoryChunk::FlagsOffset() + byte_offset),
2809 Operand(shifted_mask));
2810 } else {
2811 LoadU64(scratch, MemOperand(scratch, MemoryChunk::FlagsOffset()));
2812 AndP(r0, scratch, Operand(mask));
2813 }
2814 // Should be okay to remove rc
2815
2816 if (cc == ne) {
2817 bne(condition_met);
2818 }
2819 if (cc == eq) {
2820 beq(condition_met);
2821 }
2822}
2823
2824Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
2825 Register reg4, Register reg5,
2826 Register reg6) {
2827 RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
2828
2829 const RegisterConfiguration* config = RegisterConfiguration::Default();
2830 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2831 int code = config->GetAllocatableGeneralCode(i);
2832 Register candidate = Register::from_code(code);
2833 if (regs.has(candidate)) continue;
2834 return candidate;
2835 }
2836 UNREACHABLE();
2837}
2838
2839void MacroAssembler::mov(Register dst, Register src) { lgr(dst, src); }
2840
2841void MacroAssembler::mov(Register dst, const Operand& src) {
2842 int64_t value = 0;
2843
2844 if (src.is_heap_number_request()) {
2845 RequestHeapNumber(src.heap_number_request());
2846 } else {
2847 value = src.immediate();
2848 }
2849
2850 if (src.rmode() != RelocInfo::NO_INFO) {
2851 // some form of relocation needed
2852 RecordRelocInfo(src.rmode(), value);
2853 }
2854
2855 int32_t hi_32 = static_cast<int32_t>(value >> 32);
2856 int32_t lo_32 = static_cast<int32_t>(value);
2857
2858 if (src.rmode() == RelocInfo::NO_INFO) {
2859 if (hi_32 == 0) {
2860 if (is_uint16(lo_32)) {
2861 llill(dst, Operand(lo_32));
2862 return;
2863 }
2864 llilf(dst, Operand(lo_32));
2865 return;
2866 } else if (lo_32 == 0) {
2867 if (is_uint16(hi_32)) {
2868 llihl(dst, Operand(hi_32));
2869 return;
2870 }
2871 llihf(dst, Operand(hi_32));
2872 return;
2873 } else if (is_int16(value)) {
2874 lghi(dst, Operand(value));
2875 return;
2876 } else if (is_int32(value)) {
2877 lgfi(dst, Operand(value));
2878 return;
2879 }
2880 }
2881
2882 iihf(dst, Operand(hi_32));
2883 iilf(dst, Operand(lo_32));
2884}
2885
2886void MacroAssembler::MulS32(Register dst, const MemOperand& src1) {
2887 if (is_uint12(src1.offset())) {
2888 ms(dst, src1);
2889 } else if (is_int20(src1.offset())) {
2890 msy(dst, src1);
2891 } else {
2892 UNIMPLEMENTED();
2893 }
2894}
2895
2896void MacroAssembler::MulS32(Register dst, Register src1) { msr(dst, src1); }
2897
2898void MacroAssembler::MulS32(Register dst, const Operand& src1) {
2899 msfi(dst, src1);
2900}
2901
2902#define Generate_MulHigh32(instr) \
2903 { \
2904 lgfr(dst, src1); \
2905 instr(dst, src2); \
2906 srlg(dst, dst, Operand(32)); \
2907 }
2908
2909void MacroAssembler::MulHighS32(Register dst, Register src1,
2910 const MemOperand& src2) {
2911 Generate_MulHigh32(msgf);
2912}
2913
2914void MacroAssembler::MulHighS32(Register dst, Register src1, Register src2) {
2915 if (dst == src2) {
2916 std::swap(src1, src2);
2917 }
2918 Generate_MulHigh32(msgfr);
2919}
2920
2921void MacroAssembler::MulHighS32(Register dst, Register src1,
2922 const Operand& src2) {
2923 Generate_MulHigh32(msgfi);
2924}
2925
2926#undef Generate_MulHigh32
2927
2928#define Generate_MulHighU32(instr) \
2929 { \
2930 lr(r1, src1); \
2931 instr(r0, src2); \
2932 LoadU32(dst, r0); \
2933 }
2934
2935void MacroAssembler::MulHighU32(Register dst, Register src1,
2936 const MemOperand& src2) {
2937 Generate_MulHighU32(ml);
2938}
2939
2940void MacroAssembler::MulHighU32(Register dst, Register src1, Register src2) {
2941 Generate_MulHighU32(mlr);
2942}
2943
2944void MacroAssembler::MulHighU32(Register dst, Register src1,
2945 const Operand& src2) {
2946 USE(dst);
2947 USE(src1);
2948 USE(src2);
2949 UNREACHABLE();
2950}
2951
2952#undef Generate_MulHighU32
2953
2954#define Generate_Mul32WithOverflowIfCCUnequal(instr) \
2955 { \
2956 lgfr(dst, src1); \
2957 instr(dst, src2); \
2958 cgfr(dst, dst); \
2959 }
2960
2961void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
2962 const MemOperand& src2) {
2963 Register result = dst;
2964 if (src2.rx() == dst || src2.rb() == dst) dst = r0;
2965 Generate_Mul32WithOverflowIfCCUnequal(msgf);
2966 if (result != dst) llgfr(result, dst);
2967}
2968
2969void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
2970 Register src2) {
2971 if (dst == src2) {
2972 std::swap(src1, src2);
2973 }
2974 Generate_Mul32WithOverflowIfCCUnequal(msgfr);
2975}
2976
2977void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
2978 const Operand& src2) {
2979 Generate_Mul32WithOverflowIfCCUnequal(msgfi);
2980}
2981
2982#undef Generate_Mul32WithOverflowIfCCUnequal
2983
2984#define Generate_Div32(instr) \
2985 { \
2986 lgfr(r1, src1); \
2987 instr(r0, src2); \
2988 LoadU32(dst, r1); \
2989 }
2990
2991void MacroAssembler::DivS32(Register dst, Register src1,
2992 const MemOperand& src2) {
2993 Generate_Div32(dsgf);
2994}
2995
2996void MacroAssembler::DivS32(Register dst, Register src1, Register src2) {
2997 Generate_Div32(dsgfr);
2998}
2999
3000#undef Generate_Div32
3001
3002#define Generate_DivU32(instr) \
3003 { \
3004 lr(r0, src1); \
3005 srdl(r0, Operand(32)); \
3006 instr(r0, src2); \
3007 LoadU32(dst, r1); \
3008 }
3009
3010void MacroAssembler::DivU32(Register dst, Register src1,
3011 const MemOperand& src2) {
3012 Generate_DivU32(dl);
3013}
3014
3015void MacroAssembler::DivU32(Register dst, Register src1, Register src2) {
3016 Generate_DivU32(dlr);
3017}
3018
3019#undef Generate_DivU32
3020
3021#define Generate_Div64(instr) \
3022 { \
3023 lgr(r1, src1); \
3024 instr(r0, src2); \
3025 lgr(dst, r1); \
3026 }
3027
3028void MacroAssembler::DivS64(Register dst, Register src1,
3029 const MemOperand& src2) {
3030 Generate_Div64(dsg);
3031}
3032
3033void MacroAssembler::DivS64(Register dst, Register src1, Register src2) {
3034 Generate_Div64(dsgr);
3035}
3036
3037#undef Generate_Div64
3038
3039#define Generate_DivU64(instr) \
3040 { \
3041 lgr(r1, src1); \
3042 lghi(r0, Operand::Zero()); \
3043 instr(r0, src2); \
3044 lgr(dst, r1); \
3045 }
3046
3047void MacroAssembler::DivU64(Register dst, Register src1,
3048 const MemOperand& src2) {
3049 Generate_DivU64(dlg);
3050}
3051
3052void MacroAssembler::DivU64(Register dst, Register src1, Register src2) {
3053 Generate_DivU64(dlgr);
3054}
3055
3056#undef Generate_DivU64
3057
3058#define Generate_Mod32(instr) \
3059 { \
3060 lgfr(r1, src1); \
3061 instr(r0, src2); \
3062 LoadU32(dst, r0); \
3063 }
3064
3065void MacroAssembler::ModS32(Register dst, Register src1,
3066 const MemOperand& src2) {
3067 Generate_Mod32(dsgf);
3068}
3069
3070void MacroAssembler::ModS32(Register dst, Register src1, Register src2) {
3071 Generate_Mod32(dsgfr);
3072}
3073
3074#undef Generate_Mod32
3075
3076#define Generate_ModU32(instr) \
3077 { \
3078 lr(r0, src1); \
3079 srdl(r0, Operand(32)); \
3080 instr(r0, src2); \
3081 LoadU32(dst, r0); \
3082 }
3083
3084void MacroAssembler::ModU32(Register dst, Register src1,
3085 const MemOperand& src2) {
3086 Generate_ModU32(dl);
3087}
3088
3089void MacroAssembler::ModU32(Register dst, Register src1, Register src2) {
3090 Generate_ModU32(dlr);
3091}
3092
3093#undef Generate_ModU32
3094
3095#define Generate_Mod64(instr) \
3096 { \
3097 lgr(r1, src1); \
3098 instr(r0, src2); \
3099 lgr(dst, r0); \
3100 }
3101
3102void MacroAssembler::ModS64(Register dst, Register src1,
3103 const MemOperand& src2) {
3104 Generate_Mod64(dsg);
3105}
3106
3107void MacroAssembler::ModS64(Register dst, Register src1, Register src2) {
3108 Generate_Mod64(dsgr);
3109}
3110
3111#undef Generate_Mod64
3112
3113#define Generate_ModU64(instr) \
3114 { \
3115 lgr(r1, src1); \
3116 lghi(r0, Operand::Zero()); \
3117 instr(r0, src2); \
3118 lgr(dst, r0); \
3119 }
3120
3121void MacroAssembler::ModU64(Register dst, Register src1,
3122 const MemOperand& src2) {
3123 Generate_ModU64(dlg);
3124}
3125
3126void MacroAssembler::ModU64(Register dst, Register src1, Register src2) {
3127 Generate_ModU64(dlgr);
3128}
3129
3130#undef Generate_ModU64
3131
3132void MacroAssembler::MulS64(Register dst, const Operand& opnd) {
3133 msgfi(dst, opnd);
3134}
3135
3136void MacroAssembler::MulS64(Register dst, Register src) { msgr(dst, src); }
3137
3138void MacroAssembler::MulS64(Register dst, const MemOperand& opnd) {
3139 msg(dst, opnd);
3140}
3141
3142void MacroAssembler::MulHighS64(Register dst, Register src1, Register src2) {
3143 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
3144 mgrk(r0, src1, src2);
3145 lgr(dst, r0);
3146 } else {
3148 PushCallerSaved(fp_mode, ip);
3149 Push(src1, src2);
3150 Pop(r2, r3);
3151 {
3152 FrameScope scope(this, StackFrame::INTERNAL);
3153 PrepareCallCFunction(2, 0, r0);
3154 CallCFunction(ExternalReference::int64_mul_high_function(), 2, 0);
3155 }
3156 mov(r0, r2);
3157 PopCallerSaved(fp_mode, ip);
3158 mov(dst, r0);
3159 }
3160}
3161
3162void MacroAssembler::MulHighS64(Register dst, Register src1,
3163 const MemOperand& src2) {
3164 // TODO(v8): implement this.
3165 UNIMPLEMENTED();
3166}
3167
3168void MacroAssembler::MulHighU64(Register dst, Register src1, Register src2) {
3169 lgr(r1, src1);
3170 mlgr(r0, src2);
3171 lgr(dst, r0);
3172}
3173
3174void MacroAssembler::MulHighU64(Register dst, Register src1,
3175 const MemOperand& src2) {
3176 // TODO(v8): implement this.
3177 UNIMPLEMENTED();
3178}
3179
3181 sqdbr(result, input);
3182}
3184 if (is_uint12(input.offset())) {
3185 sqdb(result, input);
3186 } else {
3187 ldy(result, input);
3188 sqdbr(result, result);
3189 }
3190}
3191//----------------------------------------------------------------------------
3192// Add Instructions
3193//----------------------------------------------------------------------------
3194
3195// Add 32-bit (Register dst = Register dst + Immediate opnd)
3196void MacroAssembler::AddS32(Register dst, const Operand& opnd) {
3197 if (is_int16(opnd.immediate()))
3198 ahi(dst, opnd);
3199 else
3200 afi(dst, opnd);
3201}
3202
3203// Add Pointer Size (Register dst = Register dst + Immediate opnd)
3204void MacroAssembler::AddS64(Register dst, const Operand& opnd) {
3205 if (is_int16(opnd.immediate()))
3206 aghi(dst, opnd);
3207 else
3208 agfi(dst, opnd);
3209}
3210
3211void MacroAssembler::AddS32(Register dst, Register src, int32_t opnd) {
3212 AddS32(dst, src, Operand(opnd));
3213}
3214
3215// Add 32-bit (Register dst = Register src + Immediate opnd)
3216void MacroAssembler::AddS32(Register dst, Register src, const Operand& opnd) {
3217 if (dst != src) {
3218 if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
3219 ahik(dst, src, opnd);
3220 return;
3221 }
3222 lr(dst, src);
3223 }
3224 AddS32(dst, opnd);
3225}
3226
3227void MacroAssembler::AddS64(Register dst, Register src, int32_t opnd) {
3228 AddS64(dst, src, Operand(opnd));
3229}
3230
3231// Add Pointer Size (Register dst = Register src + Immediate opnd)
3232void MacroAssembler::AddS64(Register dst, Register src, const Operand& opnd) {
3233 if (dst != src) {
3234 if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
3235 aghik(dst, src, opnd);
3236 return;
3237 }
3238 mov(dst, src);
3239 }
3240 AddS64(dst, opnd);
3241}
3242
3243// Add 32-bit (Register dst = Register dst + Register src)
3244void MacroAssembler::AddS32(Register dst, Register src) { ar(dst, src); }
3245
3246// Add Pointer Size (Register dst = Register dst + Register src)
3247void MacroAssembler::AddS64(Register dst, Register src) { agr(dst, src); }
3248
3249// Add 32-bit (Register dst = Register src1 + Register src2)
3250void MacroAssembler::AddS32(Register dst, Register src1, Register src2) {
3251 if (dst != src1 && dst != src2) {
3252 // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
3253 // as AR is a smaller instruction
3254 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3255 ark(dst, src1, src2);
3256 return;
3257 } else {
3258 lr(dst, src1);
3259 }
3260 } else if (dst == src2) {
3261 src2 = src1;
3262 }
3263 ar(dst, src2);
3264}
3265
3266// Add Pointer Size (Register dst = Register src1 + Register src2)
3267void MacroAssembler::AddS64(Register dst, Register src1, Register src2) {
3268 if (dst != src1 && dst != src2) {
3269 // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
3270 // as AR is a smaller instruction
3271 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3272 agrk(dst, src1, src2);
3273 return;
3274 } else {
3275 mov(dst, src1);
3276 }
3277 } else if (dst == src2) {
3278 src2 = src1;
3279 }
3280 agr(dst, src2);
3281}
3282
3283// Add 32-bit (Register-Memory)
3284void MacroAssembler::AddS32(Register dst, const MemOperand& opnd) {
3285 DCHECK(is_int20(opnd.offset()));
3286 if (is_uint12(opnd.offset()))
3287 a(dst, opnd);
3288 else
3289 ay(dst, opnd);
3290}
3291
3292// Add Pointer Size (Register-Memory)
3293void MacroAssembler::AddS64(Register dst, const MemOperand& opnd) {
3294 DCHECK(is_int20(opnd.offset()));
3295 ag(dst, opnd);
3296}
3297
3298// Add 32-bit (Memory - Immediate)
3299void MacroAssembler::AddS32(const MemOperand& opnd, const Operand& imm) {
3300 DCHECK(is_int8(imm.immediate()));
3301 DCHECK(is_int20(opnd.offset()));
3302 DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
3303 asi(opnd, imm);
3304}
3305
3306// Add Pointer-sized (Memory - Immediate)
3307void MacroAssembler::AddS64(const MemOperand& opnd, const Operand& imm) {
3308 DCHECK(is_int8(imm.immediate()));
3309 DCHECK(is_int20(opnd.offset()));
3310 DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
3311 agsi(opnd, imm);
3312}
3313
3314//----------------------------------------------------------------------------
3315// Add Logical Instructions
3316//----------------------------------------------------------------------------
3317
3318// Add Logical 32-bit (Register dst = Register src1 + Register src2)
3319void MacroAssembler::AddU32(Register dst, Register src1, Register src2) {
3320 if (dst != src2 && dst != src1) {
3321 lr(dst, src1);
3322 alr(dst, src2);
3323 } else if (dst != src2) {
3324 // dst == src1
3325 DCHECK(dst == src1);
3326 alr(dst, src2);
3327 } else {
3328 // dst == src2
3329 DCHECK(dst == src2);
3330 alr(dst, src1);
3331 }
3332}
3333
3334// Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
3335void MacroAssembler::AddU32(Register dst, const Operand& imm) {
3336 alfi(dst, imm);
3337}
3338
3339// Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
3340void MacroAssembler::AddU64(Register dst, const Operand& imm) {
3341 algfi(dst, imm);
3342}
3343
3344void MacroAssembler::AddU64(Register dst, Register src1, Register src2) {
3345 if (dst != src2 && dst != src1) {
3346 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3347 algrk(dst, src1, src2);
3348 } else {
3349 lgr(dst, src1);
3350 algr(dst, src2);
3351 }
3352 } else if (dst != src2) {
3353 // dst == src1
3354 DCHECK(dst == src1);
3355 algr(dst, src2);
3356 } else {
3357 // dst == src2
3358 DCHECK(dst == src2);
3359 algr(dst, src1);
3360 }
3361}
3362
3363// Add Logical 32-bit (Register-Memory)
3364void MacroAssembler::AddU32(Register dst, const MemOperand& opnd) {
3365 DCHECK(is_int20(opnd.offset()));
3366 if (is_uint12(opnd.offset()))
3367 al_z(dst, opnd);
3368 else
3369 aly(dst, opnd);
3370}
3371
3372// Add Logical Pointer Size (Register-Memory)
3373void MacroAssembler::AddU64(Register dst, const MemOperand& opnd) {
3374 DCHECK(is_int20(opnd.offset()));
3375 alg(dst, opnd);
3376}
3377
3378//----------------------------------------------------------------------------
3379// Subtract Instructions
3380//----------------------------------------------------------------------------
3381
3382// Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
3383void MacroAssembler::SubU32(Register dst, Register src1, Register src2) {
3384 if (dst != src2 && dst != src1) {
3385 lr(dst, src1);
3386 slr(dst, src2);
3387 } else if (dst != src2) {
3388 // dst == src1
3389 DCHECK(dst == src1);
3390 slr(dst, src2);
3391 } else {
3392 // dst == src2
3393 DCHECK(dst == src2);
3394 lr(r0, dst);
3395 SubU32(dst, src1, r0);
3396 }
3397}
3398
3399// Subtract 32-bit (Register dst = Register dst - Immediate opnd)
3400void MacroAssembler::SubS32(Register dst, const Operand& imm) {
3401 AddS32(dst, Operand(-(imm.immediate())));
3402}
3403
3404// Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
3405void MacroAssembler::SubS64(Register dst, const Operand& imm) {
3406 AddS64(dst, Operand(-(imm.immediate())));
3407}
3408
3409void MacroAssembler::SubS32(Register dst, Register src, int32_t imm) {
3410 SubS32(dst, src, Operand(imm));
3411}
3412
3413// Subtract 32-bit (Register dst = Register src - Immediate opnd)
3414void MacroAssembler::SubS32(Register dst, Register src, const Operand& imm) {
3415 AddS32(dst, src, Operand(-(imm.immediate())));
3416}
3417
3418void MacroAssembler::SubS64(Register dst, Register src, int32_t imm) {
3419 SubS64(dst, src, Operand(imm));
3420}
3421
3422// Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
3423void MacroAssembler::SubS64(Register dst, Register src, const Operand& imm) {
3424 AddS64(dst, src, Operand(-(imm.immediate())));
3425}
3426
3427// Subtract 32-bit (Register dst = Register dst - Register src)
3428void MacroAssembler::SubS32(Register dst, Register src) { sr(dst, src); }
3429
3430// Subtract Pointer Size (Register dst = Register dst - Register src)
3431void MacroAssembler::SubS64(Register dst, Register src) { sgr(dst, src); }
3432
3433// Subtract 32-bit (Register = Register - Register)
3434void MacroAssembler::SubS32(Register dst, Register src1, Register src2) {
3435 // Use non-clobbering version if possible
3436 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3437 srk(dst, src1, src2);
3438 return;
3439 }
3440 if (dst != src1 && dst != src2) lr(dst, src1);
3441 // In scenario where we have dst = src - dst, we need to swap and negate
3442 if (dst != src1 && dst == src2) {
3443 Label done;
3444 lcr(dst, dst); // dst = -dst
3445 b(overflow, &done);
3446 ar(dst, src1); // dst = dst + src
3447 bind(&done);
3448 } else {
3449 sr(dst, src2);
3450 }
3451}
3452
3453// Subtract Pointer Sized (Register = Register - Register)
3454void MacroAssembler::SubS64(Register dst, Register src1, Register src2) {
3455 // Use non-clobbering version if possible
3456 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3457 sgrk(dst, src1, src2);
3458 return;
3459 }
3460 if (dst != src1 && dst != src2) mov(dst, src1);
3461 // In scenario where we have dst = src - dst, we need to swap and negate
3462 if (dst != src1 && dst == src2) {
3463 Label done;
3464 lcgr(dst, dst); // dst = -dst
3465 b(overflow, &done);
3466 AddS64(dst, src1); // dst = dst + src
3467 bind(&done);
3468 } else {
3469 SubS64(dst, src2);
3470 }
3471}
3472
3473// Subtract 32-bit (Register-Memory)
3474void MacroAssembler::SubS32(Register dst, const MemOperand& opnd) {
3475 DCHECK(is_int20(opnd.offset()));
3476 if (is_uint12(opnd.offset()))
3477 s(dst, opnd);
3478 else
3479 sy(dst, opnd);
3480}
3481
3482// Subtract Pointer Sized (Register - Memory)
3483void MacroAssembler::SubS64(Register dst, const MemOperand& opnd) {
3484 sg(dst, opnd);
3485}
3486
3487void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
3488 sllg(r0, src, Operand(32));
3489 ldgr(dst, r0);
3490}
3491
3492void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
3493 lgdr(dst, src);
3494 srlg(dst, dst, Operand(32));
3495}
3496
3497// Load And Subtract 32-bit (similar to laa/lan/lao/lax)
3498void MacroAssembler::LoadAndSub32(Register dst, Register src,
3499 const MemOperand& opnd) {
3500 lcr(dst, src);
3501 laa(dst, dst, opnd);
3502}
3503
3504void MacroAssembler::LoadAndSub64(Register dst, Register src,
3505 const MemOperand& opnd) {
3506 lcgr(dst, src);
3507 laag(dst, dst, opnd);
3508}
3509
3510//----------------------------------------------------------------------------
3511// Subtract Logical Instructions
3512//----------------------------------------------------------------------------
3513
3514// Subtract Logical 32-bit (Register - Memory)
3515void MacroAssembler::SubU32(Register dst, const MemOperand& opnd) {
3516 DCHECK(is_int20(opnd.offset()));
3517 if (is_uint12(opnd.offset()))
3518 sl(dst, opnd);
3519 else
3520 sly(dst, opnd);
3521}
3522
3523// Subtract Logical Pointer Sized (Register - Memory)
3524void MacroAssembler::SubU64(Register dst, const MemOperand& opnd) {
3525 DCHECK(is_int20(opnd.offset()));
3526 slgf(dst, opnd);
3527}
3528
3529//----------------------------------------------------------------------------
3530// Bitwise Operations
3531//----------------------------------------------------------------------------
3532
3533// AND 32-bit - dst = dst & src
3534void MacroAssembler::And(Register dst, Register src) { nr(dst, src); }
3535
3536// AND Pointer Size - dst = dst & src
3537void MacroAssembler::AndP(Register dst, Register src) { ngr(dst, src); }
3538
3539// Non-clobbering AND 32-bit - dst = src1 & src1
3540void MacroAssembler::And(Register dst, Register src1, Register src2) {
3541 if (dst != src1 && dst != src2) {
3542 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3543 // as XR is a smaller instruction
3544 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3545 nrk(dst, src1, src2);
3546 return;
3547 } else {
3548 lr(dst, src1);
3549 }
3550 } else if (dst == src2) {
3551 src2 = src1;
3552 }
3553 And(dst, src2);
3554}
3555
3556// Non-clobbering AND pointer size - dst = src1 & src1
3557void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
3558 if (dst != src1 && dst != src2) {
3559 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3560 // as XR is a smaller instruction
3561 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3562 ngrk(dst, src1, src2);
3563 return;
3564 } else {
3565 mov(dst, src1);
3566 }
3567 } else if (dst == src2) {
3568 src2 = src1;
3569 }
3570 AndP(dst, src2);
3571}
3572
3573// AND 32-bit (Reg - Mem)
3574void MacroAssembler::And(Register dst, const MemOperand& opnd) {
3575 DCHECK(is_int20(opnd.offset()));
3576 if (is_uint12(opnd.offset()))
3577 n(dst, opnd);
3578 else
3579 ny(dst, opnd);
3580}
3581
3582// AND Pointer Size (Reg - Mem)
3583void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
3584 DCHECK(is_int20(opnd.offset()));
3585 ng(dst, opnd);
3586}
3587
3588// AND 32-bit - dst = dst & imm
3589void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
3590
3591// AND Pointer Size - dst = dst & imm
3592void MacroAssembler::AndP(Register dst, const Operand& opnd) {
3593 intptr_t value = opnd.immediate();
3594 if (value >> 32 != -1) {
3595 // this may not work b/c condition code won't be set correctly
3596 nihf(dst, Operand(value >> 32));
3597 }
3598 nilf(dst, Operand(value & 0xFFFFFFFF));
3599}
3600
3601// AND 32-bit - dst = src & imm
3602void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
3603 if (dst != src) lr(dst, src);
3604 nilf(dst, opnd);
3605}
3606
3607// AND Pointer Size - dst = src & imm
3608void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
3609 // Try to exploit RISBG first
3610 intptr_t value = opnd.immediate();
3611 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
3612 intptr_t shifted_value = value;
3613 int trailing_zeros = 0;
3614
3615 // We start checking how many trailing zeros are left at the end.
3616 while ((0 != shifted_value) && (0 == (shifted_value & 1))) {
3617 trailing_zeros++;
3618 shifted_value >>= 1;
3619 }
3620
3621 // If temp (value with right-most set of zeros shifted out) is 1 less
3622 // than power of 2, we have consecutive bits of 1.
3623 // Special case: If shift_value is zero, we cannot use RISBG, as it requires
3624 // selection of at least 1 bit.
3625 if ((0 != shifted_value) && base::bits::IsPowerOfTwo(shifted_value + 1)) {
3626 int startBit =
3627 base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros;
3628 int endBit = 63 - trailing_zeros;
3629 // Start: startBit, End: endBit, Shift = 0, true = zero unselected bits.
3630 RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
3631 Operand::Zero(), true);
3632 return;
3633 } else if (-1 == shifted_value) {
3634 // A Special case in which all top bits up to MSB are 1's. In this case,
3635 // we can set startBit to be 0.
3636 int endBit = 63 - trailing_zeros;
3637 RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
3638 Operand::Zero(), true);
3639 return;
3640 }
3641 }
3642
3643 // If we are &'ing zero, we can just whack the dst register and skip copy
3644 if (dst != src && (0 != value)) mov(dst, src);
3645 AndP(dst, opnd);
3646}
3647
3648// OR 32-bit - dst = dst & src
3649void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); }
3650
3651// OR Pointer Size - dst = dst & src
3652void MacroAssembler::OrP(Register dst, Register src) { ogr(dst, src); }
3653
3654// Non-clobbering OR 32-bit - dst = src1 & src1
3655void MacroAssembler::Or(Register dst, Register src1, Register src2) {
3656 if (dst != src1 && dst != src2) {
3657 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3658 // as XR is a smaller instruction
3659 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3660 ork(dst, src1, src2);
3661 return;
3662 } else {
3663 lr(dst, src1);
3664 }
3665 } else if (dst == src2) {
3666 src2 = src1;
3667 }
3668 Or(dst, src2);
3669}
3670
3671// Non-clobbering OR pointer size - dst = src1 & src1
3672void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
3673 if (dst != src1 && dst != src2) {
3674 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3675 // as XR is a smaller instruction
3676 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3677 ogrk(dst, src1, src2);
3678 return;
3679 } else {
3680 mov(dst, src1);
3681 }
3682 } else if (dst == src2) {
3683 src2 = src1;
3684 }
3685 OrP(dst, src2);
3686}
3687
3688// OR 32-bit (Reg - Mem)
3689void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
3690 DCHECK(is_int20(opnd.offset()));
3691 if (is_uint12(opnd.offset()))
3692 o(dst, opnd);
3693 else
3694 oy(dst, opnd);
3695}
3696
3697// OR Pointer Size (Reg - Mem)
3698void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
3699 DCHECK(is_int20(opnd.offset()));
3700 og(dst, opnd);
3701}
3702
3703// OR 32-bit - dst = dst & imm
3704void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
3705
3706// OR Pointer Size - dst = dst & imm
3707void MacroAssembler::OrP(Register dst, const Operand& opnd) {
3708 intptr_t value = opnd.immediate();
3709 if (value >> 32 != 0) {
3710 // this may not work b/c condition code won't be set correctly
3711 oihf(dst, Operand(value >> 32));
3712 }
3713 oilf(dst, Operand(value & 0xFFFFFFFF));
3714}
3715
3716// OR 32-bit - dst = src & imm
3717void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) {
3718 if (dst != src) lr(dst, src);
3719 oilf(dst, opnd);
3720}
3721
3722// OR Pointer Size - dst = src & imm
3723void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) {
3724 if (dst != src) mov(dst, src);
3725 OrP(dst, opnd);
3726}
3727
3728// XOR 32-bit - dst = dst & src
3729void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); }
3730
3731// XOR Pointer Size - dst = dst & src
3732void MacroAssembler::XorP(Register dst, Register src) { xgr(dst, src); }
3733
3734// Non-clobbering XOR 32-bit - dst = src1 & src1
3735void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
3736 if (dst != src1 && dst != src2) {
3737 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3738 // as XR is a smaller instruction
3739 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3740 xrk(dst, src1, src2);
3741 return;
3742 } else {
3743 lr(dst, src1);
3744 }
3745 } else if (dst == src2) {
3746 src2 = src1;
3747 }
3748 Xor(dst, src2);
3749}
3750
3751// Non-clobbering XOR pointer size - dst = src1 & src1
3752void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
3753 if (dst != src1 && dst != src2) {
3754 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK
3755 // as XR is a smaller instruction
3756 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3757 xgrk(dst, src1, src2);
3758 return;
3759 } else {
3760 mov(dst, src1);
3761 }
3762 } else if (dst == src2) {
3763 src2 = src1;
3764 }
3765 XorP(dst, src2);
3766}
3767
3768// XOR 32-bit (Reg - Mem)
3769void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
3770 DCHECK(is_int20(opnd.offset()));
3771 if (is_uint12(opnd.offset()))
3772 x(dst, opnd);
3773 else
3774 xy(dst, opnd);
3775}
3776
3777// XOR Pointer Size (Reg - Mem)
3778void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
3779 DCHECK(is_int20(opnd.offset()));
3780 xg(dst, opnd);
3781}
3782
3783// XOR 32-bit - dst = dst & imm
3784void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
3785
3786// XOR Pointer Size - dst = dst & imm
3787void MacroAssembler::XorP(Register dst, const Operand& opnd) {
3788 intptr_t value = opnd.immediate();
3789 xihf(dst, Operand(value >> 32));
3790 xilf(dst, Operand(value & 0xFFFFFFFF));
3791}
3792
3793// XOR 32-bit - dst = src & imm
3794void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) {
3795 if (dst != src) lr(dst, src);
3796 xilf(dst, opnd);
3797}
3798
3799// XOR Pointer Size - dst = src & imm
3800void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
3801 if (dst != src) mov(dst, src);
3802 XorP(dst, opnd);
3803}
3804
3805void MacroAssembler::Not32(Register dst, Register src) {
3806 if (src != no_reg && src != dst) lr(dst, src);
3807 xilf(dst, Operand(0xFFFFFFFF));
3808}
3809
3810void MacroAssembler::Not64(Register dst, Register src) {
3811 if (src != no_reg && src != dst) lgr(dst, src);
3812 xihf(dst, Operand(0xFFFFFFFF));
3813 xilf(dst, Operand(0xFFFFFFFF));
3814}
3815
3816void MacroAssembler::NotP(Register dst, Register src) {
3817 Not64(dst, src);
3818}
3819
3820void MacroAssembler::LoadPositiveP(Register result, Register input) {
3821 lpgr(result, input);
3822}
3823
3824void MacroAssembler::LoadPositive32(Register result, Register input) {
3825 lpr(result, input);
3826 lgfr(result, result);
3827}
3828
3829//-----------------------------------------------------------------------------
3830// Compare Helpers
3831//-----------------------------------------------------------------------------
3832
3833// Compare 32-bit Register vs Register
3834void MacroAssembler::CmpS32(Register src1, Register src2) { cr_z(src1, src2); }
3835
3836// Compare Pointer Sized Register vs Register
3837void MacroAssembler::CmpS64(Register src1, Register src2) { cgr(src1, src2); }
3838
3839// Compare 32-bit Register vs Immediate
3840// This helper will set up proper relocation entries if required.
3841void MacroAssembler::CmpS32(Register dst, const Operand& opnd) {
3842 if (opnd.rmode() == RelocInfo::NO_INFO) {
3843 intptr_t value = opnd.immediate();
3844 if (is_int16(value))
3845 chi(dst, opnd);
3846 else
3847 cfi(dst, opnd);
3848 } else {
3849 // Need to generate relocation record here
3850 RecordRelocInfo(opnd.rmode(), opnd.immediate());
3851 cfi(dst, opnd);
3852 }
3853}
3854
3855// Compare Pointer Sized Register vs Immediate
3856// This helper will set up proper relocation entries if required.
3857void MacroAssembler::CmpS64(Register dst, const Operand& opnd) {
3858 if (opnd.rmode() == RelocInfo::NO_INFO) {
3859 cgfi(dst, opnd);
3860 } else {
3861 mov(r0, opnd); // Need to generate 64-bit relocation
3862 cgr(dst, r0);
3863 }
3864}
3865
3866// Compare 32-bit Register vs Memory
3867void MacroAssembler::CmpS32(Register dst, const MemOperand& opnd) {
3868 // make sure offset is within 20 bit range
3869 DCHECK(is_int20(opnd.offset()));
3870 if (is_uint12(opnd.offset()))
3871 c(dst, opnd);
3872 else
3873 cy(dst, opnd);
3874}
3875
3876// Compare Pointer Size Register vs Memory
3877void MacroAssembler::CmpS64(Register dst, const MemOperand& opnd) {
3878 // make sure offset is within 20 bit range
3879 DCHECK(is_int20(opnd.offset()));
3880 cg(dst, opnd);
3881}
3882
3884 cebr(src1, src2);
3885}
3886
3888 cdbr(src1, src2);
3889}
3890
3891void MacroAssembler::CmpF32(DoubleRegister src1, const MemOperand& src2) {
3892 DCHECK(is_int12(src2.offset()));
3893 ceb(src1, src2);
3894}
3895
3896void MacroAssembler::CmpF64(DoubleRegister src1, const MemOperand& src2) {
3897 DCHECK(is_int12(src2.offset()));
3898 cdb(src1, src2);
3899}
3900
3901// Using cs or scy based on the offset
3902void MacroAssembler::CmpAndSwap(Register old_val, Register new_val,
3903 const MemOperand& opnd) {
3904 if (is_uint12(opnd.offset())) {
3905 cs(old_val, new_val, opnd);
3906 } else {
3907 csy(old_val, new_val, opnd);
3908 }
3909}
3910
3911void MacroAssembler::CmpAndSwap64(Register old_val, Register new_val,
3912 const MemOperand& opnd) {
3913 DCHECK(is_int20(opnd.offset()));
3914 csg(old_val, new_val, opnd);
3915}
3916
3917//-----------------------------------------------------------------------------
3918// Compare Logical Helpers
3919//-----------------------------------------------------------------------------
3920
3921// Compare Logical 32-bit Register vs Register
3922void MacroAssembler::CmpU32(Register dst, Register src) { clr(dst, src); }
3923
3924// Compare Logical Pointer Sized Register vs Register
3925void MacroAssembler::CmpU64(Register dst, Register src) {
3926 clgr(dst, src);
3927}
3928
3929// Compare Logical 32-bit Register vs Immediate
3930void MacroAssembler::CmpU32(Register dst, const Operand& opnd) {
3931 clfi(dst, opnd);
3932}
3933
3934// Compare Logical Pointer Sized Register vs Immediate
3935void MacroAssembler::CmpU64(Register dst, const Operand& opnd) {
3936 DCHECK_EQ(static_cast<uint32_t>(opnd.immediate() >> 32), 0);
3937 clgfi(dst, opnd);
3938}
3939
3940// Compare Logical 32-bit Register vs Memory
3941void MacroAssembler::CmpU32(Register dst, const MemOperand& opnd) {
3942 // make sure offset is within 20 bit range
3943 DCHECK(is_int20(opnd.offset()));
3944 if (is_uint12(opnd.offset()))
3945 cl(dst, opnd);
3946 else
3947 cly(dst, opnd);
3948}
3949
3950// Compare Logical Pointer Sized Register vs Memory
3951void MacroAssembler::CmpU64(Register dst, const MemOperand& opnd) {
3952 // make sure offset is within 20 bit range
3953 DCHECK(is_int20(opnd.offset()));
3954 clg(dst, opnd);
3955}
3956
3957void MacroAssembler::Branch(Condition c, const Operand& opnd) {
3958 intptr_t value = opnd.immediate();
3959 if (is_int16(value))
3960 brc(c, opnd);
3961 else
3962 brcl(c, opnd);
3963}
3964
3965// Branch On Count. Decrement R1, and branch if R1 != 0.
3966void MacroAssembler::BranchOnCount(Register r1, Label* l) {
3968 if (is_int16(offset)) {
3969 brctg(r1, Operand(offset));
3970 } else {
3971 AddS64(r1, Operand(-1));
3972 Branch(ne, Operand(offset));
3973 }
3974}
3975
3976void MacroAssembler::LoadSmiLiteral(Register dst, Tagged<Smi> smi) {
3977 intptr_t value = static_cast<intptr_t>(smi.ptr());
3978#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3979 llilf(dst, Operand(value));
3980#else
3981 DCHECK_EQ(value & 0xFFFFFFFF, 0);
3982 // The smi value is loaded in upper 32-bits. Lower 32-bit are zeros.
3983 llihf(dst, Operand(value >> 32));
3984#endif
3985}
3986
3987void MacroAssembler::CmpSmiLiteral(Register src1, Tagged<Smi> smi,
3988 Register scratch) {
3989#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3990 // CFI takes 32-bit immediate.
3991 cfi(src1, Operand(smi));
3992#else
3993 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3994 cih(src1, Operand(static_cast<intptr_t>(smi.ptr()) >> 32));
3995 } else {
3996 LoadSmiLiteral(scratch, smi);
3997 cgr(src1, scratch);
3998 }
3999#endif
4000}
4001
4002void MacroAssembler::LoadU64(Register dst, const MemOperand& mem,
4003 Register scratch) {
4004 int offset = mem.offset();
4005
4006 MemOperand src = mem;
4007 if (!is_int20(offset)) {
4008 DCHECK(scratch != no_reg && scratch != r0 && mem.rx() == r0);
4009 DCHECK(scratch != mem.rb());
4010 mov(scratch, Operand(offset));
4011 src = MemOperand(mem.rb(), scratch);
4012 }
4013 lg(dst, src);
4014}
4015
4016// Store a "pointer" sized value to the memory location
4017void MacroAssembler::StoreU64(Register src, const MemOperand& mem,
4018 Register scratch) {
4019 if (!is_int20(mem.offset())) {
4020 DCHECK(scratch != no_reg);
4021 DCHECK(scratch != r0);
4022 mov(scratch, Operand(mem.offset()));
4023 stg(src, MemOperand(mem.rb(), scratch));
4024 } else {
4025 stg(src, mem);
4026 }
4027}
4028
4029// Store a "pointer" sized constant to the memory location
4030void MacroAssembler::StoreU64(const MemOperand& mem, const Operand& opnd,
4031 Register scratch) {
4032 // Relocations not supported
4033 DCHECK_EQ(opnd.rmode(), RelocInfo::NO_INFO);
4034
4035 // Try to use MVGHI/MVHI
4036 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
4037 mem.getIndexRegister() == r0 && is_int16(opnd.immediate())) {
4038 mvghi(mem, opnd);
4039 } else {
4040 mov(scratch, opnd);
4041 StoreU64(scratch, mem);
4042 }
4043}
4044
4045void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
4046 const MemOperand& mem) {
4047 DCHECK(is_int20(mem.offset()));
4048 lmg(dst1, dst2, mem);
4049}
4050
4051void MacroAssembler::StoreMultipleP(Register src1, Register src2,
4052 const MemOperand& mem) {
4053 DCHECK(is_int20(mem.offset()));
4054 stmg(src1, src2, mem);
4055}
4056
4057void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
4058 const MemOperand& mem) {
4059 if (is_uint12(mem.offset())) {
4060 lm(dst1, dst2, mem);
4061 } else {
4062 DCHECK(is_int20(mem.offset()));
4063 lmy(dst1, dst2, mem);
4064 }
4065}
4066
4067void MacroAssembler::StoreMultipleW(Register src1, Register src2,
4068 const MemOperand& mem) {
4069 if (is_uint12(mem.offset())) {
4070 stm(src1, src2, mem);
4071 } else {
4072 DCHECK(is_int20(mem.offset()));
4073 stmy(src1, src2, mem);
4074 }
4075}
4076
4077// Load 32-bits and sign extend if necessary.
4078void MacroAssembler::LoadS32(Register dst, Register src) {
4079 lgfr(dst, src);
4080}
4081
4082// Load 32-bits and sign extend if necessary.
4083void MacroAssembler::LoadS32(Register dst, const MemOperand& mem,
4084 Register scratch) {
4085 int offset = mem.offset();
4086
4087 if (!is_int20(offset)) {
4088 DCHECK(scratch != no_reg);
4089 mov(scratch, Operand(offset));
4090 lgf(dst, MemOperand(mem.rb(), scratch));
4091 } else {
4092 lgf(dst, mem);
4093 }
4094}
4095
4096// Load 32-bits and zero extend if necessary.
4097void MacroAssembler::LoadU32(Register dst, Register src) {
4098 llgfr(dst, src);
4099}
4100
4101// Variable length depending on whether offset fits into immediate field
4102// MemOperand of RX or RXY format
4103void MacroAssembler::LoadU32(Register dst, const MemOperand& mem,
4104 Register scratch) {
4105 Register base = mem.rb();
4106 int offset = mem.offset();
4107
4108 if (is_int20(offset)) {
4109 llgf(dst, mem);
4110 } else if (scratch != no_reg) {
4111 // Materialize offset into scratch register.
4112 mov(scratch, Operand(offset));
4113 llgf(dst, MemOperand(base, scratch));
4114 } else {
4115 DCHECK(false);
4116 }
4117}
4118
4119void MacroAssembler::LoadU16(Register dst, const MemOperand& mem) {
4120 // TODO(s390x): Add scratch reg
4121 llgh(dst, mem);
4122}
4123
4124void MacroAssembler::LoadU16(Register dst, Register src) {
4125 llghr(dst, src);
4126}
4127
4128void MacroAssembler::LoadS8(Register dst, const MemOperand& mem) {
4129 // TODO(s390x): Add scratch reg
4130 lgb(dst, mem);
4131}
4132
4133void MacroAssembler::LoadS8(Register dst, Register src) {
4134 lgbr(dst, src);
4135}
4136
4137void MacroAssembler::LoadU8(Register dst, const MemOperand& mem) {
4138 // TODO(s390x): Add scratch reg
4139 llgc(dst, mem);
4140}
4141
4142void MacroAssembler::LoadU8(Register dst, Register src) {
4143 llgcr(dst, src);
4144}
4145
4146#ifdef V8_TARGET_BIG_ENDIAN
4147void MacroAssembler::LoadU64LE(Register dst, const MemOperand& mem,
4148 Register scratch) {
4149 lrvg(dst, mem);
4150}
4151
4152void MacroAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
4153 Register scratch) {
4154 lrv(dst, opnd);
4155 LoadS32(dst, dst);
4156}
4157
4158void MacroAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
4159 Register scratch) {
4160 lrv(dst, opnd);
4161 LoadU32(dst, dst);
4162}
4163
4164void MacroAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
4165 lrvh(dst, opnd);
4166 LoadU16(dst, dst);
4167}
4168
4169void MacroAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
4170 lrvh(dst, opnd);
4171 LoadS16(dst, dst);
4172}
4173
4175 Register scratch0, Register scratch1) {
4176 bool use_vlbr = CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
4177 is_uint12(opnd.offset());
4178 if (use_vlbr) {
4179 vlbr(dst, opnd, Condition(4));
4180 } else {
4181 lrvg(scratch0, opnd);
4182 lrvg(scratch1,
4183 MemOperand(opnd.rx(), opnd.rb(), opnd.offset() + kSystemPointerSize));
4184 vlvgp(dst, scratch1, scratch0);
4185 }
4186}
4187
4189 Register scratch) {
4190 lrvg(scratch, opnd);
4191 ldgr(dst, scratch);
4192}
4193
4195 Register scratch) {
4196 lrv(scratch, opnd);
4197 ShiftLeftU64(scratch, scratch, Operand(32));
4198 ldgr(dst, scratch);
4199}
4200
4201void MacroAssembler::StoreU64LE(Register src, const MemOperand& mem,
4202 Register scratch) {
4203 if (!is_int20(mem.offset())) {
4204 DCHECK(scratch != no_reg);
4205 DCHECK(scratch != r0);
4206 mov(scratch, Operand(mem.offset()));
4207 strvg(src, MemOperand(mem.rb(), scratch));
4208 } else {
4209 strvg(src, mem);
4210 }
4211}
4212
4213void MacroAssembler::StoreU32LE(Register src, const MemOperand& mem,
4214 Register scratch) {
4215 if (!is_int20(mem.offset())) {
4216 DCHECK(scratch != no_reg);
4217 DCHECK(scratch != r0);
4218 mov(scratch, Operand(mem.offset()));
4219 strv(src, MemOperand(mem.rb(), scratch));
4220 } else {
4221 strv(src, mem);
4222 }
4223}
4224
4225void MacroAssembler::StoreU16LE(Register src, const MemOperand& mem,
4226 Register scratch) {
4227 if (!is_int20(mem.offset())) {
4228 DCHECK(scratch != no_reg);
4229 DCHECK(scratch != r0);
4230 mov(scratch, Operand(mem.offset()));
4231 strvh(src, MemOperand(mem.rb(), scratch));
4232 } else {
4233 strvh(src, mem);
4234 }
4235}
4236
4238 Register scratch) {
4239 DCHECK(is_int20(opnd.offset()));
4240 lgdr(scratch, src);
4241 strvg(scratch, opnd);
4242}
4243
4245 Register scratch) {
4246 DCHECK(is_int20(opnd.offset()));
4247 lgdr(scratch, src);
4248 ShiftRightU64(scratch, scratch, Operand(32));
4249 strv(scratch, opnd);
4250}
4251
4253 Register scratch1, Register scratch2) {
4254 bool use_vstbr = CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
4255 is_uint12(mem.offset());
4256 if (use_vstbr) {
4257 vstbr(src, mem, Condition(4));
4258 } else {
4259 vlgv(scratch1, src, MemOperand(r0, 1), Condition(3));
4260 vlgv(scratch2, src, MemOperand(r0, 0), Condition(3));
4261 strvg(scratch1, mem);
4262 strvg(scratch2,
4263 MemOperand(mem.rx(), mem.rb(), mem.offset() + kSystemPointerSize));
4264 }
4265}
4266
4267#else
4268void MacroAssembler::LoadU64LE(Register dst, const MemOperand& mem,
4269 Register scratch) {
4270 LoadU64(dst, mem, scratch);
4271}
4272
4273void MacroAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
4274 Register scratch) {
4275 LoadS32(dst, opnd, scratch);
4276}
4277
4278void MacroAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
4279 Register scratch) {
4280 LoadU32(dst, opnd, scratch);
4281}
4282
4283void MacroAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
4284 LoadU16(dst, opnd);
4285}
4286
4287void MacroAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
4288 LoadS16(dst, opnd);
4289}
4290
4292 Register scratch0, Register scratch1) {
4293 USE(scratch1);
4294 LoadV128(dst, opnd, scratch0);
4295}
4296
4298 Register scratch) {
4299 USE(scratch);
4300 LoadF64(dst, opnd);
4301}
4302
4304 Register scratch) {
4305 USE(scratch);
4306 LoadF32(dst, opnd);
4307}
4308
4309void MacroAssembler::StoreU64LE(Register src, const MemOperand& mem,
4310 Register scratch) {
4311 StoreU64(src, mem, scratch);
4312}
4313
4314void MacroAssembler::StoreU32LE(Register src, const MemOperand& mem,
4315 Register scratch) {
4316 StoreU32(src, mem, scratch);
4317}
4318
4319void MacroAssembler::StoreU16LE(Register src, const MemOperand& mem,
4320 Register scratch) {
4321 StoreU16(src, mem, scratch);
4322}
4323
4325 Register scratch) {
4326 StoreF64(src, opnd);
4327}
4328
4330 Register scratch) {
4331 StoreF32(src, opnd);
4332}
4333
4335 Register scratch1, Register scratch2) {
4336 StoreV128(src, mem, scratch1);
4337}
4338
4339#endif
4340
4341// Load And Test (Reg <- Reg)
4342void MacroAssembler::LoadAndTest32(Register dst, Register src) {
4343 ltr(dst, src);
4344}
4345
4346// Load And Test Pointer Sized (Reg <- Reg)
4347void MacroAssembler::LoadAndTestP(Register dst, Register src) {
4348 ltgr(dst, src);
4349}
4350
4351// Load And Test 32-bit (Reg <- Mem)
4352void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
4353 lt_z(dst, mem);
4354}
4355
4356// Load And Test Pointer Sized (Reg <- Mem)
4357void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
4358 ltg(dst, mem);
4359}
4360
4361// Load On Condition Pointer Sized (Reg <- Reg)
4362void MacroAssembler::LoadOnConditionP(Condition cond, Register dst,
4363 Register src) {
4364 locgr(cond, dst, src);
4365}
4366
4367// Load Double Precision (64-bit) Floating Point number from memory
4369 // for 32bit and 64bit we all use 64bit floating point regs
4370 if (is_uint12(mem.offset())) {
4371 ld(dst, mem);
4372 } else {
4373 ldy(dst, mem);
4374 }
4375}
4376
4377// Load Single Precision (32-bit) Floating Point number from memory
4379 if (is_uint12(mem.offset())) {
4380 le_z(dst, mem);
4381 } else {
4382 DCHECK(is_int20(mem.offset()));
4383 ley(dst, mem);
4384 }
4385}
4386
4388 Register scratch) {
4389 DCHECK(scratch != r0);
4390 if (is_uint12(mem.offset())) {
4391 vl(dst, mem, Condition(0));
4392 } else {
4393 DCHECK(is_int20(mem.offset()));
4394 lay(scratch, mem);
4395 vl(dst, MemOperand(scratch), Condition(0));
4396 }
4397}
4398
4399// Store Double Precision (64-bit) Floating Point number to memory
4401 if (is_uint12(mem.offset())) {
4402 std(dst, mem);
4403 } else {
4404 stdy(dst, mem);
4405 }
4406}
4407
4408// Store Single Precision (32-bit) Floating Point number to memory
4410 if (is_uint12(mem.offset())) {
4411 ste(src, mem);
4412 } else {
4413 stey(src, mem);
4414 }
4415}
4416
4418 Register scratch) {
4419 DCHECK(scratch != r0);
4420 if (is_uint12(mem.offset())) {
4421 vst(src, mem, Condition(0));
4422 } else {
4423 DCHECK(is_int20(mem.offset()));
4424 lay(scratch, mem);
4425 vst(src, MemOperand(scratch), Condition(0));
4426 }
4427}
4428
4430 DoubleRegister rhs) {
4431 if (dst == lhs) {
4432 aebr(dst, rhs);
4433 } else if (dst == rhs) {
4434 aebr(dst, lhs);
4435 } else {
4436 ler(dst, lhs);
4437 aebr(dst, rhs);
4438 }
4439}
4440
4442 DoubleRegister rhs) {
4443 if (dst == lhs) {
4444 sebr(dst, rhs);
4445 } else if (dst == rhs) {
4446 sebr(dst, lhs);
4447 lcebr(dst, dst);
4448 } else {
4449 ler(dst, lhs);
4450 sebr(dst, rhs);
4451 }
4452}
4453
4455 DoubleRegister rhs) {
4456 if (dst == lhs) {
4457 meebr(dst, rhs);
4458 } else if (dst == rhs) {
4459 meebr(dst, lhs);
4460 } else {
4461 ler(dst, lhs);
4462 meebr(dst, rhs);
4463 }
4464}
4465
4467 DoubleRegister rhs) {
4468 if (dst == lhs) {
4469 debr(dst, rhs);
4470 } else if (dst == rhs) {
4471 lay(sp, MemOperand(sp, -kSystemPointerSize));
4472 StoreF32(dst, MemOperand(sp));
4473 ler(dst, lhs);
4474 deb(dst, MemOperand(sp));
4475 la(sp, MemOperand(sp, kSystemPointerSize));
4476 } else {
4477 ler(dst, lhs);
4478 debr(dst, rhs);
4479 }
4480}
4481
4483 DoubleRegister rhs) {
4484 if (dst == lhs) {
4485 adbr(dst, rhs);
4486 } else if (dst == rhs) {
4487 adbr(dst, lhs);
4488 } else {
4489 ldr(dst, lhs);
4490 adbr(dst, rhs);
4491 }
4492}
4493
4495 DoubleRegister rhs) {
4496 if (dst == lhs) {
4497 sdbr(dst, rhs);
4498 } else if (dst == rhs) {
4499 sdbr(dst, lhs);
4500 lcdbr(dst, dst);
4501 } else {
4502 ldr(dst, lhs);
4503 sdbr(dst, rhs);
4504 }
4505}
4506
4508 DoubleRegister rhs) {
4509 if (dst == lhs) {
4510 mdbr(dst, rhs);
4511 } else if (dst == rhs) {
4512 mdbr(dst, lhs);
4513 } else {
4514 ldr(dst, lhs);
4515 mdbr(dst, rhs);
4516 }
4517}
4518
4520 DoubleRegister rhs) {
4521 if (dst == lhs) {
4522 ddbr(dst, rhs);
4523 } else if (dst == rhs) {
4524 lay(sp, MemOperand(sp, -kSystemPointerSize));
4525 StoreF64(dst, MemOperand(sp));
4526 ldr(dst, lhs);
4527 ddb(dst, MemOperand(sp));
4528 la(sp, MemOperand(sp, kSystemPointerSize));
4529 } else {
4530 ldr(dst, lhs);
4531 ddbr(dst, rhs);
4532 }
4533}
4534
4536 DoubleRegister scratch) {
4537 if (is_uint12(opnd.offset())) {
4538 aeb(dst, opnd);
4539 } else {
4540 ley(scratch, opnd);
4541 aebr(dst, scratch);
4542 }
4543}
4544
4546 DoubleRegister scratch) {
4547 if (is_uint12(opnd.offset())) {
4548 adb(dst, opnd);
4549 } else {
4550 ldy(scratch, opnd);
4551 adbr(dst, scratch);
4552 }
4553}
4554
4556 DoubleRegister scratch) {
4557 if (is_uint12(opnd.offset())) {
4558 seb(dst, opnd);
4559 } else {
4560 ley(scratch, opnd);
4561 sebr(dst, scratch);
4562 }
4563}
4564
4566 DoubleRegister scratch) {
4567 if (is_uint12(opnd.offset())) {
4568 sdb(dst, opnd);
4569 } else {
4570 ldy(scratch, opnd);
4571 sdbr(dst, scratch);
4572 }
4573}
4574
4576 DoubleRegister scratch) {
4577 if (is_uint12(opnd.offset())) {
4578 meeb(dst, opnd);
4579 } else {
4580 ley(scratch, opnd);
4581 meebr(dst, scratch);
4582 }
4583}
4584
4586 DoubleRegister scratch) {
4587 if (is_uint12(opnd.offset())) {
4588 mdb(dst, opnd);
4589 } else {
4590 ldy(scratch, opnd);
4591 mdbr(dst, scratch);
4592 }
4593}
4594
4596 DoubleRegister scratch) {
4597 if (is_uint12(opnd.offset())) {
4598 deb(dst, opnd);
4599 } else {
4600 ley(scratch, opnd);
4601 debr(dst, scratch);
4602 }
4603}
4604
4606 DoubleRegister scratch) {
4607 if (is_uint12(opnd.offset())) {
4608 ddb(dst, opnd);
4609 } else {
4610 ldy(scratch, opnd);
4611 ddbr(dst, scratch);
4612 }
4613}
4614
4616 if (is_uint12(opnd.offset())) {
4617 ldeb(dst, opnd);
4618 } else {
4619 ley(dst, opnd);
4620 ldebr(dst, dst);
4621 }
4622}
4623
4624// Variable length depending on whether offset fits into immediate field
4625// MemOperand of RX or RXY format
4626void MacroAssembler::StoreU32(Register src, const MemOperand& mem,
4627 Register scratch) {
4628 Register base = mem.rb();
4629 int offset = mem.offset();
4630
4631 bool use_RXform = false;
4632 bool use_RXYform = false;
4633
4634 if (is_uint12(offset)) {
4635 // RX-format supports unsigned 12-bits offset.
4636 use_RXform = true;
4637 } else if (is_int20(offset)) {
4638 // RXY-format supports signed 20-bits offset.
4639 use_RXYform = true;
4640 } else if (scratch != no_reg) {
4641 // Materialize offset into scratch register.
4642 mov(scratch, Operand(offset));
4643 } else {
4644 // scratch is no_reg
4645 DCHECK(false);
4646 }
4647
4648 if (use_RXform) {
4649 st(src, mem);
4650 } else if (use_RXYform) {
4651 sty(src, mem);
4652 } else {
4653 StoreU32(src, MemOperand(base, scratch));
4654 }
4655}
4656
4657void MacroAssembler::LoadS16(Register dst, Register src) {
4658 lghr(dst, src);
4659}
4660
4661// Loads 16-bits half-word value from memory and sign extends to pointer
4662// sized register
4663void MacroAssembler::LoadS16(Register dst, const MemOperand& mem,
4664 Register scratch) {
4665 Register base = mem.rb();
4666 int offset = mem.offset();
4667
4668 if (!is_int20(offset)) {
4669 DCHECK(scratch != no_reg);
4670 mov(scratch, Operand(offset));
4671 lgh(dst, MemOperand(base, scratch));
4672 } else {
4673 lgh(dst, mem);
4674 }
4675}
4676
4677// Variable length depending on whether offset fits into immediate field
4678// MemOperand current only supports d-form
4679void MacroAssembler::StoreU16(Register src, const MemOperand& mem,
4680 Register scratch) {
4681 Register base = mem.rb();
4682 int offset = mem.offset();
4683
4684 if (is_uint12(offset)) {
4685 sth(src, mem);
4686 } else if (is_int20(offset)) {
4687 sthy(src, mem);
4688 } else {
4689 DCHECK(scratch != no_reg);
4690 mov(scratch, Operand(offset));
4691 sth(src, MemOperand(base, scratch));
4692 }
4693}
4694
4695// Variable length depending on whether offset fits into immediate field
4696// MemOperand current only supports d-form
4697void MacroAssembler::StoreU8(Register src, const MemOperand& mem,
4698 Register scratch) {
4699 Register base = mem.rb();
4700 int offset = mem.offset();
4701
4702 if (is_uint12(offset)) {
4703 stc(src, mem);
4704 } else if (is_int20(offset)) {
4705 stcy(src, mem);
4706 } else {
4707 DCHECK(scratch != no_reg);
4708 mov(scratch, Operand(offset));
4709 stc(src, MemOperand(base, scratch));
4710 }
4711}
4712
4713// Shift left logical for 32-bit integer types.
4714void MacroAssembler::ShiftLeftU32(Register dst, Register src,
4715 const Operand& val) {
4716 ShiftLeftU32(dst, src, r0, val);
4717}
4718
4719// Shift left logical for 32-bit integer types.
4720void MacroAssembler::ShiftLeftU32(Register dst, Register src, Register val,
4721 const Operand& val2) {
4722 if (dst == src) {
4723 sll(dst, val, val2);
4724 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4725 sllk(dst, src, val, val2);
4726 } else {
4727 DCHECK(dst != val || val == r0); // The lr/sll path clobbers val.
4728 lr(dst, src);
4729 sll(dst, val, val2);
4730 }
4731}
4732
4733// Shift left logical for 32-bit integer types.
4734void MacroAssembler::ShiftLeftU64(Register dst, Register src,
4735 const Operand& val) {
4736 ShiftLeftU64(dst, src, r0, val);
4737}
4738
4739// Shift left logical for 32-bit integer types.
4740void MacroAssembler::ShiftLeftU64(Register dst, Register src, Register val,
4741 const Operand& val2) {
4742 sllg(dst, src, val, val2);
4743}
4744
4745// Shift right logical for 32-bit integer types.
4746void MacroAssembler::ShiftRightU32(Register dst, Register src,
4747 const Operand& val) {
4748 ShiftRightU32(dst, src, r0, val);
4749}
4750
4751// Shift right logical for 32-bit integer types.
4752void MacroAssembler::ShiftRightU32(Register dst, Register src, Register val,
4753 const Operand& val2) {
4754 if (dst == src) {
4755 srl(dst, val, val2);
4756 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4757 srlk(dst, src, val, val2);
4758 } else {
4759 DCHECK(dst != val || val == r0); // The lr/srl path clobbers val.
4760 lr(dst, src);
4761 srl(dst, val, val2);
4762 }
4763}
4764
4765void MacroAssembler::ShiftRightU64(Register dst, Register src, Register val,
4766 const Operand& val2) {
4767 srlg(dst, src, val, val2);
4768}
4769
4770// Shift right logical for 64-bit integer types.
4771void MacroAssembler::ShiftRightU64(Register dst, Register src,
4772 const Operand& val) {
4773 ShiftRightU64(dst, src, r0, val);
4774}
4775
4776// Shift right arithmetic for 32-bit integer types.
4777void MacroAssembler::ShiftRightS32(Register dst, Register src,
4778 const Operand& val) {
4779 ShiftRightS32(dst, src, r0, val);
4780}
4781
4782// Shift right arithmetic for 32-bit integer types.
4783void MacroAssembler::ShiftRightS32(Register dst, Register src, Register val,
4784 const Operand& val2) {
4785 if (dst == src) {
4786 sra(dst, val, val2);
4787 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4788 srak(dst, src, val, val2);
4789 } else {
4790 DCHECK(dst != val || val == r0); // The lr/sra path clobbers val.
4791 lr(dst, src);
4792 sra(dst, val, val2);
4793 }
4794}
4795
4796// Shift right arithmetic for 64-bit integer types.
4797void MacroAssembler::ShiftRightS64(Register dst, Register src,
4798 const Operand& val) {
4799 ShiftRightS64(dst, src, r0, val);
4800}
4801
4802// Shift right arithmetic for 64-bit integer types.
4803void MacroAssembler::ShiftRightS64(Register dst, Register src, Register val,
4804 const Operand& val2) {
4805 srag(dst, src, val, val2);
4806}
4807
4808// Clear right most # of bits
4809void MacroAssembler::ClearRightImm(Register dst, Register src,
4810 const Operand& val) {
4811 int numBitsToClear = val.immediate() % (kSystemPointerSize * 8);
4812
4813 // Try to use RISBG if possible
4814 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
4815 int endBit = 63 - numBitsToClear;
4816 RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
4817 Operand::Zero(), true);
4818 return;
4819 }
4820
4821 uint64_t hexMask = ~((1L << numBitsToClear) - 1);
4822
4823 // S390 AND instr clobbers source. Make a copy if necessary
4824 if (dst != src) mov(dst, src);
4825
4826 if (numBitsToClear <= 16) {
4827 nill(dst, Operand(static_cast<uint16_t>(hexMask)));
4828 } else if (numBitsToClear <= 32) {
4829 nilf(dst, Operand(static_cast<uint32_t>(hexMask)));
4830 } else if (numBitsToClear <= 64) {
4831 nilf(dst, Operand(static_cast<intptr_t>(0)));
4832 nihf(dst, Operand(hexMask >> 32));
4833 }
4834}
4835
4836void MacroAssembler::Popcnt32(Register dst, Register src) {
4837 DCHECK(src != r0);
4838 DCHECK(dst != r0);
4839
4840 popcnt(dst, src);
4841 ShiftRightU32(r0, dst, Operand(16));
4842 ar(dst, r0);
4843 ShiftRightU32(r0, dst, Operand(8));
4844 ar(dst, r0);
4845 llgcr(dst, dst);
4846}
4847
4848void MacroAssembler::Popcnt64(Register dst, Register src) {
4849 DCHECK(src != r0);
4850 DCHECK(dst != r0);
4851
4852 popcnt(dst, src);
4853 ShiftRightU64(r0, dst, Operand(32));
4854 AddS64(dst, r0);
4855 ShiftRightU64(r0, dst, Operand(16));
4856 AddS64(dst, r0);
4857 ShiftRightU64(r0, dst, Operand(8));
4858 AddS64(dst, r0);
4859 LoadU8(dst, dst);
4860}
4861
4862void MacroAssembler::SwapP(Register src, Register dst, Register scratch) {
4863 if (src == dst) return;
4864 DCHECK(!AreAliased(src, dst, scratch));
4865 mov(scratch, src);
4866 mov(src, dst);
4867 mov(dst, scratch);
4868}
4869
4870void MacroAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
4871 if (dst.rx() != r0) DCHECK(!AreAliased(src, dst.rx(), scratch));
4872 if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
4873 DCHECK(!AreAliased(src, scratch));
4874 mov(scratch, src);
4875 LoadU64(src, dst);
4876 StoreU64(scratch, dst);
4877}
4878
4879void MacroAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
4880 Register scratch_1) {
4881 if (src.rx() != r0) DCHECK(!AreAliased(src.rx(), scratch_0, scratch_1));
4882 if (src.rb() != r0) DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
4883 if (dst.rx() != r0) DCHECK(!AreAliased(dst.rx(), scratch_0, scratch_1));
4884 if (dst.rb() != r0) DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
4885 DCHECK(!AreAliased(scratch_0, scratch_1));
4886 LoadU64(scratch_0, src);
4887 LoadU64(scratch_1, dst);
4888 StoreU64(scratch_0, dst);
4889 StoreU64(scratch_1, src);
4890}
4891
4893 DoubleRegister scratch) {
4894 if (src == dst) return;
4895 DCHECK(!AreAliased(src, dst, scratch));
4896 ldr(scratch, src);
4897 ldr(src, dst);
4898 ldr(dst, scratch);
4899}
4900
4902 DoubleRegister scratch) {
4903 DCHECK(!AreAliased(src, scratch));
4904 ldr(scratch, src);
4905 LoadF32(src, dst);
4906 StoreF32(scratch, dst);
4907}
4908
4910 DoubleRegister scratch) {
4911 // push d0, to be used as scratch
4912 lay(sp, MemOperand(sp, -kDoubleSize));
4913 StoreF64(d0, MemOperand(sp));
4914 LoadF32(scratch, src);
4915 LoadF32(d0, dst);
4916 StoreF32(scratch, dst);
4917 StoreF32(d0, src);
4918 // restore d0
4919 LoadF64(d0, MemOperand(sp));
4920 lay(sp, MemOperand(sp, kDoubleSize));
4921}
4922
4924 DoubleRegister scratch) {
4925 if (src == dst) return;
4926 DCHECK(!AreAliased(src, dst, scratch));
4927 ldr(scratch, src);
4928 ldr(src, dst);
4929 ldr(dst, scratch);
4930}
4931
4933 DoubleRegister scratch) {
4934 DCHECK(!AreAliased(src, scratch));
4935 ldr(scratch, src);
4936 LoadF64(src, dst);
4937 StoreF64(scratch, dst);
4938}
4939
4941 DoubleRegister scratch) {
4942 // push d0, to be used as scratch
4943 lay(sp, MemOperand(sp, -kDoubleSize));
4944 StoreF64(d0, MemOperand(sp));
4945 LoadF64(scratch, src);
4946 LoadF64(d0, dst);
4947 StoreF64(scratch, dst);
4948 StoreF64(d0, src);
4949 // restore d0
4950 LoadF64(d0, MemOperand(sp));
4951 lay(sp, MemOperand(sp, kDoubleSize));
4952}
4953
4955 Simd128Register scratch) {
4956 if (src == dst) return;
4957 vlr(scratch, src, Condition(0), Condition(0), Condition(0));
4958 vlr(src, dst, Condition(0), Condition(0), Condition(0));
4959 vlr(dst, scratch, Condition(0), Condition(0), Condition(0));
4960}
4961
4963 Simd128Register scratch) {
4964 DCHECK(!AreAliased(src, scratch));
4965 vlr(scratch, src, Condition(0), Condition(0), Condition(0));
4966 LoadV128(src, dst, ip);
4967 StoreV128(scratch, dst, ip);
4968}
4969
4971 Simd128Register scratch) {
4972 // push d0, to be used as scratch
4973 lay(sp, MemOperand(sp, -kSimd128Size));
4974 StoreV128(d0, MemOperand(sp), ip);
4975 LoadV128(scratch, src, ip);
4976 LoadV128(d0, dst, ip);
4977 StoreV128(scratch, dst, ip);
4978 StoreV128(d0, src, ip);
4979 // restore d0
4980 LoadV128(d0, MemOperand(sp), ip);
4981 lay(sp, MemOperand(sp, kSimd128Size));
4982}
4983
4984void MacroAssembler::ComputeCodeStartAddress(Register dst) {
4985 larl(dst, Operand(-pc_offset() / 2));
4986}
4987
4988void MacroAssembler::LoadPC(Register dst) {
4989 Label current_pc;
4990 larl(dst, &current_pc);
4991 bind(&current_pc);
4992}
4993
4994void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
4995 CmpS32(x, Operand(y));
4996 beq(dest);
4997}
4998
4999void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
5000 CmpS32(x, Operand(y));
5001 blt(dest);
5002}
5003
5004void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index,
5005 Register target) {
5006 static_assert(kSystemPointerSize == 8);
5007 static_assert(kSmiTagSize == 1);
5008 static_assert(kSmiTag == 0);
5009 // The builtin_index register contains the builtin index as a Smi.
5010 if (SmiValuesAre32Bits()) {
5011 ShiftRightS64(target, builtin_index,
5013 } else {
5015 ShiftLeftU64(target, builtin_index,
5017 }
5018 LoadU64(target, MemOperand(kRootRegister, target,
5019 IsolateData::builtin_entry_table_offset()));
5020}
5021
5022void MacroAssembler::CallBuiltinByIndex(Register builtin_index,
5023 Register target) {
5024 LoadEntryFromBuiltinIndex(builtin_index, target);
5025 Call(target);
5026}
5027
5029 Register destination) {
5030 ASM_CODE_COMMENT(this);
5032}
5033
5035 ASM_CODE_COMMENT(this);
5039}
5040
5041#ifdef V8_ENABLE_LEAPTIERING
5042
5043void MacroAssembler::LoadEntrypointFromJSDispatchTable(Register destination,
5044 Register dispatch_handle,
5045 Register scratch) {
5046 DCHECK(!AreAliased(destination, dispatch_handle, scratch));
5047 ASM_CODE_COMMENT(this);
5048
5049 Register index = destination;
5050 Move(scratch, ExternalReference::js_dispatch_table_address());
5051 ShiftRightU64(index, dispatch_handle, Operand(kJSDispatchHandleShift));
5052 ShiftLeftU64(index, index, Operand(kJSDispatchTableEntrySizeLog2));
5053 AddS64(scratch, scratch, index);
5054 LoadU64(destination, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
5055}
5056
5057#endif // V8_ENABLE_LEAPTIERING
5058
5060 Register code_object,
5061 CodeEntrypointTag tag) {
5062 ASM_CODE_COMMENT(this);
5064 FieldMemOperand(code_object, Code::kInstructionStartOffset));
5065}
5066
5067void MacroAssembler::CallCodeObject(Register code_object) {
5068 ASM_CODE_COMMENT(this);
5069 LoadCodeInstructionStart(code_object, code_object);
5070 Call(code_object);
5071}
5072
5073void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
5074 ASM_CODE_COMMENT(this);
5075 DCHECK_EQ(JumpMode::kJump, jump_mode);
5076 LoadCodeInstructionStart(code_object, code_object);
5077 Jump(code_object);
5078}
5079
5080void MacroAssembler::CallJSFunction(Register function_object,
5081 uint16_t argument_count) {
5083#if V8_ENABLE_LEAPTIERING
5084 Register dispatch_handle = r0;
5085 Register scratch = ip;
5086 LoadU32(dispatch_handle,
5087 FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
5088 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
5089 Call(code);
5090#else
5091 LoadTaggedField(code,
5092 FieldMemOperand(function_object, JSFunction::kCodeOffset));
5093 CallCodeObject(code);
5094#endif // V8_ENABLE_LEAPTIERING
5095}
5096
5097#if V8_ENABLE_LEAPTIERING
5098void MacroAssembler::CallJSDispatchEntry(JSDispatchHandle dispatch_handle,
5099 uint16_t argument_count) {
5101 Register dispatch_handle_reg = r0;
5102 Register scratch = ip;
5103 mov(dispatch_handle_reg,
5104 Operand(dispatch_handle.value(), RelocInfo::JS_DISPATCH_HANDLE));
5105 // WARNING: This entrypoint load is only safe because we are storing a
5106 // RelocInfo for the dispatch handle in the movl above (thus keeping the
5107 // dispatch entry alive) _and_ because the entrypoints are not compactable
5108 // (thus meaning that the calculation in the entrypoint load is not
5109 // invalidated by a compaction).
5110 // TODO(leszeks): Make this less of a footgun.
5111 static_assert(!JSDispatchTable::kSupportsCompaction);
5112 LoadEntrypointFromJSDispatchTable(code, dispatch_handle_reg, scratch);
5113 CHECK_EQ(argument_count,
5114 IsolateGroup::current()->js_dispatch_table()->GetParameterCount(
5115 dispatch_handle));
5116 Call(code);
5117}
5118#endif
5119
5120void MacroAssembler::JumpJSFunction(Register function_object,
5121 JumpMode jump_mode) {
5123#if V8_ENABLE_LEAPTIERING
5124 Register dispatch_handle = r0;
5125 Register scratch = ip;
5126 LoadU32(dispatch_handle,
5127 FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
5128 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
5129 Jump(code);
5130#else
5131 LoadTaggedField(code,
5132 FieldMemOperand(function_object, JSFunction::kCodeOffset));
5133 JumpCodeObject(code, jump_mode);
5134#endif // V8_ENABLE_LEAPTIERING
5135}
5136
5137#if V8_OS_ZOS
5138// Helper for CallApiFunctionAndReturn().
5139void MacroAssembler::zosStoreReturnAddressAndCall(Register target,
5140 Register scratch) {
5141 DCHECK(target == r3 || target == r4);
5142 // Shuffle the arguments from Linux arg register to XPLINK arg regs
5143 mov(r1, r2);
5144 if (target == r3) {
5145 mov(r2, r3);
5146 } else {
5147 mov(r2, r3);
5148 mov(r3, r4);
5149 }
5150
5151 // Update System Stack Pointer with the appropriate XPLINK stack bias.
5152 lay(r4, MemOperand(sp, -kStackPointerBias));
5153
5154 // Preserve r7 by placing into callee-saved register r13
5155 mov(r13, r7);
5156
5157 // Load function pointer from slot 1 of fn desc.
5158 LoadU64(ip, MemOperand(scratch, kSystemPointerSize));
5159 // Load environment from slot 0 of fn desc.
5160 LoadU64(r5, MemOperand(scratch));
5161
5163
5164 // Restore r7 from r13
5165 mov(r7, r13);
5166}
5167#endif // V8_OS_ZOS
5168
5169#ifdef V8_ENABLE_WEBASSEMBLY
5170
5171void MacroAssembler::ResolveWasmCodePointer(Register target) {
5172 ASM_CODE_COMMENT(this);
5173 static_assert(!V8_ENABLE_SANDBOX_BOOL);
5174 ExternalReference global_jump_table =
5175 ExternalReference::wasm_code_pointer_table();
5176 UseScratchRegisterScope temps(this);
5177 Register scratch = temps.Acquire();
5178 Move(scratch, global_jump_table);
5179 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == kSystemPointerSize);
5180 ShiftLeftU32(target, target, Operand(kSystemPointerSizeLog2));
5181 LoadU64(target, MemOperand(scratch, target));
5182}
5183
5184void MacroAssembler::CallWasmCodePointer(Register target,
5185 CallJumpMode call_jump_mode) {
5186 ResolveWasmCodePointer(target);
5187 if (call_jump_mode == CallJumpMode::kTailCall) {
5188 Jump(target);
5189 } else {
5190 Call(target);
5191 }
5192}
5193
5194void MacroAssembler::LoadWasmCodePointer(Register dst, MemOperand src) {
5195 static_assert(sizeof(WasmCodePointer) == 4);
5196 LoadU32(dst, src);
5197}
5198
5199#endif
5200
5201void MacroAssembler::StoreReturnAddressAndCall(Register target) {
5202 // This generates the final instruction sequence for calls to C functions
5203 // once an exit frame has been constructed.
5204 //
5205 // Note that this assumes the caller code (i.e. the InstructionStream object
5206 // currently being generated) is immovable or that the callee function cannot
5207 // trigger GC, since the callee function will return to it.
5208
5209#if V8_OS_ZOS
5210 Register ra = r7;
5211#else
5212 Register ra = r14;
5213#endif
5214 Label return_label;
5215 larl(ra, &return_label); // Generate the return addr of call later.
5216#if V8_OS_ZOS
5217 // Mimic the XPLINK expected no-op (2-byte) instruction at the return point.
5218 // When the C call returns, the 2 bytes are skipped and then the proper
5219 // instruction is executed.
5220 lay(ra, MemOperand(ra, -2));
5221#endif
5223
5224 // zLinux ABI requires caller's frame to have sufficient space for callee
5225 // preserved regsiter save area.
5226 b(target);
5227 bind(&return_label);
5228}
5229
5230// Check if the code object is marked for deoptimization. If it is, then it
5231// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
5232// to:
5233// 1. read from memory the word that contains that bit, which can be found in
5234// the flags in the referenced {Code} object;
5235// 2. test kMarkedForDeoptimizationBit in those flags; and
5236// 3. if it is not zero then it jumps to the builtin.
5237//
5238// Note: With leaptiering we simply assert the code is not deoptimized.
5239void MacroAssembler::BailoutIfDeoptimized(Register scratch) {
5240 int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
5241 if (v8_flags.debug_code || !V8_ENABLE_LEAPTIERING_BOOL) {
5242 LoadTaggedField(scratch,
5244 TestCodeIsMarkedForDeoptimization(scratch, scratch);
5245 }
5246#ifdef V8_ENABLE_LEAPTIERING
5247 if (v8_flags.debug_code) {
5248 Assert(to_condition(kZero), AbortReason::kInvalidDeoptimizedCode);
5249 }
5250#else
5251 Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
5253#endif
5254}
5255
5256void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
5257 DeoptimizeKind kind, Label* ret,
5258 Label*) {
5259 ASM_CODE_COMMENT(this);
5262 Call(ip);
5266}
5267
5268void MacroAssembler::Trap() { stop(); }
5269void MacroAssembler::DebugBreak() { stop(); }
5270
5271void MacroAssembler::CountLeadingZerosU32(Register dst, Register src,
5272 Register scratch_pair) {
5273 llgfr(dst, src);
5274 flogr(scratch_pair,
5275 dst); // will modify a register pair scratch and scratch + 1
5276 AddS32(dst, scratch_pair, Operand(-32));
5277}
5278
5279void MacroAssembler::CountLeadingZerosU64(Register dst, Register src,
5280 Register scratch_pair) {
5281 flogr(scratch_pair,
5282 src); // will modify a register pair scratch and scratch + 1
5283 mov(dst, scratch_pair);
5284}
5285
5286void MacroAssembler::CountTrailingZerosU32(Register dst, Register src,
5287 Register scratch_pair) {
5288 Register scratch0 = scratch_pair;
5289 Register scratch1 = Register::from_code(scratch_pair.code() + 1);
5290 DCHECK(!AreAliased(dst, scratch0, scratch1));
5291 DCHECK(!AreAliased(src, scratch0, scratch1));
5292
5293 Label done;
5294 // Check if src is all zeros.
5295 ltr(scratch1, src);
5296 mov(dst, Operand(32));
5297 beq(&done);
5298 llgfr(scratch1, scratch1);
5299 lcgr(scratch0, scratch1);
5300 ngr(scratch1, scratch0);
5301 flogr(scratch0, scratch1);
5302 mov(dst, Operand(63));
5303 SubS64(dst, scratch0);
5304 bind(&done);
5305}
5306
5307void MacroAssembler::CountTrailingZerosU64(Register dst, Register src,
5308 Register scratch_pair) {
5309 Register scratch0 = scratch_pair;
5310 Register scratch1 = Register::from_code(scratch_pair.code() + 1);
5311 DCHECK(!AreAliased(dst, scratch0, scratch1));
5312 DCHECK(!AreAliased(src, scratch0, scratch1));
5313
5314 Label done;
5315 // Check if src is all zeros.
5316 ltgr(scratch1, src);
5317 mov(dst, Operand(64));
5318 beq(&done);
5319 lcgr(scratch0, scratch1);
5320 ngr(scratch0, scratch1);
5321 flogr(scratch0, scratch0);
5322 mov(dst, Operand(63));
5323 SubS64(dst, scratch0);
5324 bind(&done);
5325}
5326
5327void MacroAssembler::AtomicCmpExchangeHelper(Register addr, Register output,
5328 Register old_value,
5329 Register new_value, int start,
5330 int end, int shift_amount,
5331 int offset, Register temp0,
5332 Register temp1) {
5333 LoadU32(temp0, MemOperand(addr, offset));
5334 llgfr(temp1, temp0);
5335 RotateInsertSelectBits(temp0, old_value, Operand(start), Operand(end),
5336 Operand(shift_amount), false);
5337 RotateInsertSelectBits(temp1, new_value, Operand(start), Operand(end),
5338 Operand(shift_amount), false);
5339 CmpAndSwap(temp0, temp1, MemOperand(addr, offset));
5340 RotateInsertSelectBits(output, temp0, Operand(start + shift_amount),
5341 Operand(end + shift_amount),
5342 Operand(64 - shift_amount), true);
5343}
5344
5345void MacroAssembler::AtomicCmpExchangeU8(Register addr, Register output,
5346 Register old_value, Register new_value,
5347 Register temp0, Register temp1) {
5348#ifdef V8_TARGET_BIG_ENDIAN
5349#define ATOMIC_COMP_EXCHANGE_BYTE(i) \
5350 { \
5351 constexpr int idx = (i); \
5352 static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
5353 constexpr int start = 32 + 8 * idx; \
5354 constexpr int end = start + 7; \
5355 constexpr int shift_amount = (3 - idx) * 8; \
5356 AtomicCmpExchangeHelper(addr, output, old_value, new_value, start, end, \
5357 shift_amount, -idx, temp0, temp1); \
5358 }
5359#else
5360#define ATOMIC_COMP_EXCHANGE_BYTE(i) \
5361 { \
5362 constexpr int idx = (i); \
5363 static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
5364 constexpr int start = 32 + 8 * (3 - idx); \
5365 constexpr int end = start + 7; \
5366 constexpr int shift_amount = idx * 8; \
5367 AtomicCmpExchangeHelper(addr, output, old_value, new_value, start, end, \
5368 shift_amount, -idx, temp0, temp1); \
5369 }
5370#endif
5371
5372 Label one, two, three, done;
5373 tmll(addr, Operand(3));
5374 b(Condition(1), &three);
5375 b(Condition(2), &two);
5376 b(Condition(4), &one);
5377 /* ending with 0b00 */
5378 ATOMIC_COMP_EXCHANGE_BYTE(0);
5379 b(&done);
5380 /* ending with 0b01 */
5381 bind(&one);
5382 ATOMIC_COMP_EXCHANGE_BYTE(1);
5383 b(&done);
5384 /* ending with 0b10 */
5385 bind(&two);
5386 ATOMIC_COMP_EXCHANGE_BYTE(2);
5387 b(&done);
5388 /* ending with 0b11 */
5389 bind(&three);
5390 ATOMIC_COMP_EXCHANGE_BYTE(3);
5391 bind(&done);
5392}
5393
5394void MacroAssembler::AtomicCmpExchangeU16(Register addr, Register output,
5395 Register old_value,
5396 Register new_value, Register temp0,
5397 Register temp1) {
5398#ifdef V8_TARGET_BIG_ENDIAN
5399#define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
5400 { \
5401 constexpr int idx = (i); \
5402 static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
5403 constexpr int start = 32 + 16 * idx; \
5404 constexpr int end = start + 15; \
5405 constexpr int shift_amount = (1 - idx) * 16; \
5406 AtomicCmpExchangeHelper(addr, output, old_value, new_value, start, end, \
5407 shift_amount, -idx * 2, temp0, temp1); \
5408 }
5409#else
5410#define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
5411 { \
5412 constexpr int idx = (i); \
5413 static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
5414 constexpr int start = 32 + 16 * (1 - idx); \
5415 constexpr int end = start + 15; \
5416 constexpr int shift_amount = idx * 16; \
5417 AtomicCmpExchangeHelper(addr, output, old_value, new_value, start, end, \
5418 shift_amount, -idx * 2, temp0, temp1); \
5419 }
5420#endif
5421
5422 Label two, done;
5423 tmll(addr, Operand(3));
5424 b(Condition(2), &two);
5425 ATOMIC_COMP_EXCHANGE_HALFWORD(0);
5426 b(&done);
5427 bind(&two);
5428 ATOMIC_COMP_EXCHANGE_HALFWORD(1);
5429 bind(&done);
5430}
5431
5432void MacroAssembler::AtomicExchangeHelper(Register addr, Register value,
5433 Register output, int start, int end,
5434 int shift_amount, int offset,
5435 Register scratch) {
5436 Label do_cs;
5437 LoadU32(output, MemOperand(addr, offset));
5438 bind(&do_cs);
5439 llgfr(scratch, output);
5440 RotateInsertSelectBits(scratch, value, Operand(start), Operand(end),
5441 Operand(shift_amount), false);
5442 csy(output, scratch, MemOperand(addr, offset));
5443 bne(&do_cs, Label::kNear);
5444 srl(output, Operand(shift_amount));
5445}
5446
5447void MacroAssembler::AtomicExchangeU8(Register addr, Register value,
5448 Register output, Register scratch) {
5449#ifdef V8_TARGET_BIG_ENDIAN
5450#define ATOMIC_EXCHANGE_BYTE(i) \
5451 { \
5452 constexpr int idx = (i); \
5453 static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
5454 constexpr int start = 32 + 8 * idx; \
5455 constexpr int end = start + 7; \
5456 constexpr int shift_amount = (3 - idx) * 8; \
5457 AtomicExchangeHelper(addr, value, output, start, end, shift_amount, -idx, \
5458 scratch); \
5459 }
5460#else
5461#define ATOMIC_EXCHANGE_BYTE(i) \
5462 { \
5463 constexpr int idx = (i); \
5464 static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
5465 constexpr int start = 32 + 8 * (3 - idx); \
5466 constexpr int end = start + 7; \
5467 constexpr int shift_amount = idx * 8; \
5468 AtomicExchangeHelper(addr, value, output, start, end, shift_amount, -idx, \
5469 scratch); \
5470 }
5471#endif
5472 Label three, two, one, done;
5473 tmll(addr, Operand(3));
5474 b(Condition(1), &three);
5475 b(Condition(2), &two);
5476 b(Condition(4), &one);
5477
5478 // end with 0b00
5479 ATOMIC_EXCHANGE_BYTE(0);
5480 b(&done);
5481
5482 // ending with 0b01
5483 bind(&one);
5484 ATOMIC_EXCHANGE_BYTE(1);
5485 b(&done);
5486
5487 // ending with 0b10
5488 bind(&two);
5489 ATOMIC_EXCHANGE_BYTE(2);
5490 b(&done);
5491
5492 // ending with 0b11
5493 bind(&three);
5494 ATOMIC_EXCHANGE_BYTE(3);
5495
5496 bind(&done);
5497}
5498
5499void MacroAssembler::AtomicExchangeU16(Register addr, Register value,
5500 Register output, Register scratch) {
5501#ifdef V8_TARGET_BIG_ENDIAN
5502#define ATOMIC_EXCHANGE_HALFWORD(i) \
5503 { \
5504 constexpr int idx = (i); \
5505 static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
5506 constexpr int start = 32 + 16 * idx; \
5507 constexpr int end = start + 15; \
5508 constexpr int shift_amount = (1 - idx) * 16; \
5509 AtomicExchangeHelper(addr, value, output, start, end, shift_amount, \
5510 -idx * 2, scratch); \
5511 }
5512#else
5513#define ATOMIC_EXCHANGE_HALFWORD(i) \
5514 { \
5515 constexpr int idx = (i); \
5516 static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
5517 constexpr int start = 32 + 16 * (1 - idx); \
5518 constexpr int end = start + 15; \
5519 constexpr int shift_amount = idx * 16; \
5520 AtomicExchangeHelper(addr, value, output, start, end, shift_amount, \
5521 -idx * 2, scratch); \
5522 }
5523#endif
5524 Label two, done;
5525 tmll(addr, Operand(3));
5526 b(Condition(2), &two);
5527
5528 // end with 0b00
5529 ATOMIC_EXCHANGE_HALFWORD(0);
5530 b(&done);
5531
5532 // ending with 0b10
5533 bind(&two);
5534 ATOMIC_EXCHANGE_HALFWORD(1);
5535
5536 bind(&done);
5537}
5538
5539// Simd Support.
5541 vrep(dst, src, Operand(0), Condition(3));
5542}
5543
5545 vrep(dst, src, Operand(0), Condition(2));
5546}
5547
5548void MacroAssembler::I64x2Splat(Simd128Register dst, Register src) {
5549 vlvg(dst, src, MemOperand(r0, 0), Condition(3));
5550 vrep(dst, dst, Operand(0), Condition(3));
5551}
5552
5553void MacroAssembler::I32x4Splat(Simd128Register dst, Register src) {
5554 vlvg(dst, src, MemOperand(r0, 0), Condition(2));
5555 vrep(dst, dst, Operand(0), Condition(2));
5556}
5557
5558void MacroAssembler::I16x8Splat(Simd128Register dst, Register src) {
5559 vlvg(dst, src, MemOperand(r0, 0), Condition(1));
5560 vrep(dst, dst, Operand(0), Condition(1));
5561}
5562
5563void MacroAssembler::I8x16Splat(Simd128Register dst, Register src) {
5564 vlvg(dst, src, MemOperand(r0, 0), Condition(0));
5565 vrep(dst, dst, Operand(0), Condition(0));
5566}
5567
5569 uint8_t imm_lane_idx, Register) {
5570 vrep(dst, src, Operand(1 - imm_lane_idx), Condition(3));
5571}
5572
5574 uint8_t imm_lane_idx, Register) {
5575 vrep(dst, src, Operand(3 - imm_lane_idx), Condition(2));
5576}
5577
5579 uint8_t imm_lane_idx, Register) {
5580 vlgv(dst, src, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
5581}
5582
5584 uint8_t imm_lane_idx, Register) {
5585 vlgv(dst, src, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
5586}
5587
5589 uint8_t imm_lane_idx, Register) {
5590 vlgv(dst, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
5591}
5592
5594 uint8_t imm_lane_idx, Register scratch) {
5595 vlgv(scratch, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
5596 lghr(dst, scratch);
5597}
5598
5600 uint8_t imm_lane_idx, Register) {
5601 vlgv(dst, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
5602}
5603
5605 uint8_t imm_lane_idx, Register scratch) {
5606 vlgv(scratch, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
5607 lgbr(dst, scratch);
5608}
5609
5611 DoubleRegister src2, uint8_t imm_lane_idx,
5612 Register scratch) {
5613 vlgv(scratch, src2, MemOperand(r0, 0), Condition(3));
5614 if (src1 != dst) {
5615 vlr(dst, src1, Condition(0), Condition(0), Condition(0));
5616 }
5617 vlvg(dst, scratch, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
5618}
5619
5621 DoubleRegister src2, uint8_t imm_lane_idx,
5622 Register scratch) {
5623 vlgv(scratch, src2, MemOperand(r0, 0), Condition(2));
5624 if (src1 != dst) {
5625 vlr(dst, src1, Condition(0), Condition(0), Condition(0));
5626 }
5627 vlvg(dst, scratch, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
5628}
5629
5631 Register src2, uint8_t imm_lane_idx,
5632 Register) {
5633 if (src1 != dst) {
5634 vlr(dst, src1, Condition(0), Condition(0), Condition(0));
5635 }
5636 vlvg(dst, src2, MemOperand(r0, 1 - imm_lane_idx), Condition(3));
5637}
5638
5640 Register src2, uint8_t imm_lane_idx,
5641 Register) {
5642 if (src1 != dst) {
5643 vlr(dst, src1, Condition(0), Condition(0), Condition(0));
5644 }
5645 vlvg(dst, src2, MemOperand(r0, 3 - imm_lane_idx), Condition(2));
5646}
5647
5649 Register src2, uint8_t imm_lane_idx,
5650 Register) {
5651 if (src1 != dst) {
5652 vlr(dst, src1, Condition(0), Condition(0), Condition(0));
5653 }
5654 vlvg(dst, src2, MemOperand(r0, 7 - imm_lane_idx), Condition(1));
5655}
5656
5658 Register src2, uint8_t imm_lane_idx,
5659 Register) {
5660 if (src1 != dst) {
5661 vlr(dst, src1, Condition(0), Condition(0), Condition(0));
5662 }
5663 vlvg(dst, src2, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
5664}
5665
5667 vno(dst, src, src, Condition(0), Condition(0), Condition(0));
5668}
5669
5670void MacroAssembler::S128Zero(Simd128Register dst, Simd128Register src) {
5671 vx(dst, src, src, Condition(0), Condition(0), Condition(0));
5672}
5673
5674void MacroAssembler::S128AllOnes(Simd128Register dst, Simd128Register src) {
5675 vceq(dst, src, src, Condition(0), Condition(3));
5676}
5677
5680 vsel(dst, src1, src2, mask, Condition(0), Condition(0));
5681}
5682
5683#define SIMD_UNOP_LIST_VRR_A(V) \
5684 V(F64x2Abs, vfpso, 2, 0, 3) \
5685 V(F64x2Neg, vfpso, 0, 0, 3) \
5686 V(F64x2Sqrt, vfsq, 0, 0, 3) \
5687 V(F64x2Ceil, vfi, 6, 0, 3) \
5688 V(F64x2Floor, vfi, 7, 0, 3) \
5689 V(F64x2Trunc, vfi, 5, 0, 3) \
5690 V(F64x2NearestInt, vfi, 4, 0, 3) \
5691 V(F32x4Abs, vfpso, 2, 0, 2) \
5692 V(F32x4Neg, vfpso, 0, 0, 2) \
5693 V(F32x4Sqrt, vfsq, 0, 0, 2) \
5694 V(F32x4Ceil, vfi, 6, 0, 2) \
5695 V(F32x4Floor, vfi, 7, 0, 2) \
5696 V(F32x4Trunc, vfi, 5, 0, 2) \
5697 V(F32x4NearestInt, vfi, 4, 0, 2) \
5698 V(I64x2Abs, vlp, 0, 0, 3) \
5699 V(I64x2Neg, vlc, 0, 0, 3) \
5700 V(I64x2SConvertI32x4Low, vupl, 0, 0, 2) \
5701 V(I64x2SConvertI32x4High, vuph, 0, 0, 2) \
5702 V(I64x2UConvertI32x4Low, vupll, 0, 0, 2) \
5703 V(I64x2UConvertI32x4High, vuplh, 0, 0, 2) \
5704 V(I32x4Abs, vlp, 0, 0, 2) \
5705 V(I32x4Neg, vlc, 0, 0, 2) \
5706 V(I32x4SConvertI16x8Low, vupl, 0, 0, 1) \
5707 V(I32x4SConvertI16x8High, vuph, 0, 0, 1) \
5708 V(I32x4UConvertI16x8Low, vupll, 0, 0, 1) \
5709 V(I32x4UConvertI16x8High, vuplh, 0, 0, 1) \
5710 V(I16x8Abs, vlp, 0, 0, 1) \
5711 V(I16x8Neg, vlc, 0, 0, 1) \
5712 V(I16x8SConvertI8x16Low, vupl, 0, 0, 0) \
5713 V(I16x8SConvertI8x16High, vuph, 0, 0, 0) \
5714 V(I16x8UConvertI8x16Low, vupll, 0, 0, 0) \
5715 V(I16x8UConvertI8x16High, vuplh, 0, 0, 0) \
5716 V(I8x16Abs, vlp, 0, 0, 0) \
5717 V(I8x16Neg, vlc, 0, 0, 0) \
5718 V(I8x16Popcnt, vpopct, 0, 0, 0)
5719
5720#define EMIT_SIMD_UNOP_VRR_A(name, op, c1, c2, c3) \
5721 void MacroAssembler::name(Simd128Register dst, Simd128Register src) { \
5722 op(dst, src, Condition(c1), Condition(c2), Condition(c3)); \
5723 }
5724SIMD_UNOP_LIST_VRR_A(EMIT_SIMD_UNOP_VRR_A)
5725#undef EMIT_SIMD_UNOP_VRR_A
5726#undef SIMD_UNOP_LIST_VRR_A
5727
5728#define SIMD_BINOP_LIST_VRR_B(V) \
5729 V(I64x2Eq, vceq, 0, 3) \
5730 V(I64x2GtS, vch, 0, 3) \
5731 V(I32x4Eq, vceq, 0, 2) \
5732 V(I32x4GtS, vch, 0, 2) \
5733 V(I32x4GtU, vchl, 0, 2) \
5734 V(I16x8Eq, vceq, 0, 1) \
5735 V(I16x8GtS, vch, 0, 1) \
5736 V(I16x8GtU, vchl, 0, 1) \
5737 V(I8x16Eq, vceq, 0, 0) \
5738 V(I8x16GtS, vch, 0, 0) \
5739 V(I8x16GtU, vchl, 0, 0)
5740
5741#define EMIT_SIMD_BINOP_VRR_B(name, op, c1, c2) \
5742 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
5743 Simd128Register src2) { \
5744 op(dst, src1, src2, Condition(c1), Condition(c2)); \
5745 }
5746SIMD_BINOP_LIST_VRR_B(EMIT_SIMD_BINOP_VRR_B)
5747#undef EMIT_SIMD_BINOP_VRR_B
5748#undef SIMD_BINOP_LIST_VRR_B
5749
5750#define SIMD_BINOP_LIST_VRR_C(V) \
5751 V(F64x2Add, vfa, 0, 0, 3) \
5752 V(F64x2Sub, vfs, 0, 0, 3) \
5753 V(F64x2Mul, vfm, 0, 0, 3) \
5754 V(F64x2Div, vfd, 0, 0, 3) \
5755 V(F64x2Min, vfmin, 1, 0, 3) \
5756 V(F64x2Max, vfmax, 1, 0, 3) \
5757 V(F64x2Eq, vfce, 0, 0, 3) \
5758 V(F64x2Pmin, vfmin, 3, 0, 3) \
5759 V(F64x2Pmax, vfmax, 3, 0, 3) \
5760 V(F32x4Add, vfa, 0, 0, 2) \
5761 V(F32x4Sub, vfs, 0, 0, 2) \
5762 V(F32x4Mul, vfm, 0, 0, 2) \
5763 V(F32x4Div, vfd, 0, 0, 2) \
5764 V(F32x4Min, vfmin, 1, 0, 2) \
5765 V(F32x4Max, vfmax, 1, 0, 2) \
5766 V(F32x4Eq, vfce, 0, 0, 2) \
5767 V(F32x4Pmin, vfmin, 3, 0, 2) \
5768 V(F32x4Pmax, vfmax, 3, 0, 2) \
5769 V(I64x2Add, va, 0, 0, 3) \
5770 V(I64x2Sub, vs, 0, 0, 3) \
5771 V(I32x4Add, va, 0, 0, 2) \
5772 V(I32x4Sub, vs, 0, 0, 2) \
5773 V(I32x4Mul, vml, 0, 0, 2) \
5774 V(I32x4MinS, vmn, 0, 0, 2) \
5775 V(I32x4MinU, vmnl, 0, 0, 2) \
5776 V(I32x4MaxS, vmx, 0, 0, 2) \
5777 V(I32x4MaxU, vmxl, 0, 0, 2) \
5778 V(I16x8Add, va, 0, 0, 1) \
5779 V(I16x8Sub, vs, 0, 0, 1) \
5780 V(I16x8Mul, vml, 0, 0, 1) \
5781 V(I16x8MinS, vmn, 0, 0, 1) \
5782 V(I16x8MinU, vmnl, 0, 0, 1) \
5783 V(I16x8MaxS, vmx, 0, 0, 1) \
5784 V(I16x8MaxU, vmxl, 0, 0, 1) \
5785 V(I16x8RoundingAverageU, vavgl, 0, 0, 1) \
5786 V(I8x16Add, va, 0, 0, 0) \
5787 V(I8x16Sub, vs, 0, 0, 0) \
5788 V(I8x16MinS, vmn, 0, 0, 0) \
5789 V(I8x16MinU, vmnl, 0, 0, 0) \
5790 V(I8x16MaxS, vmx, 0, 0, 0) \
5791 V(I8x16MaxU, vmxl, 0, 0, 0) \
5792 V(I8x16RoundingAverageU, vavgl, 0, 0, 0) \
5793 V(S128And, vn, 0, 0, 0) \
5794 V(S128Or, vo, 0, 0, 0) \
5795 V(S128Xor, vx, 0, 0, 0) \
5796 V(S128AndNot, vnc, 0, 0, 0)
5797
5798#define EMIT_SIMD_BINOP_VRR_C(name, op, c1, c2, c3) \
5799 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
5800 Simd128Register src2) { \
5801 op(dst, src1, src2, Condition(c1), Condition(c2), Condition(c3)); \
5802 }
5803SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
5804#undef EMIT_SIMD_BINOP_VRR_C
5805#undef SIMD_BINOP_LIST_VRR_C
5806
5807#define SIMD_SHIFT_LIST(V) \
5808 V(I64x2Shl, veslv, 3) \
5809 V(I64x2ShrS, vesrav, 3) \
5810 V(I64x2ShrU, vesrlv, 3) \
5811 V(I32x4Shl, veslv, 2) \
5812 V(I32x4ShrS, vesrav, 2) \
5813 V(I32x4ShrU, vesrlv, 2) \
5814 V(I16x8Shl, veslv, 1) \
5815 V(I16x8ShrS, vesrav, 1) \
5816 V(I16x8ShrU, vesrlv, 1) \
5817 V(I8x16Shl, veslv, 0) \
5818 V(I8x16ShrS, vesrav, 0) \
5819 V(I8x16ShrU, vesrlv, 0)
5820
5821#define EMIT_SIMD_SHIFT(name, op, c1) \
5822 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
5823 Register src2, Simd128Register scratch) { \
5824 vlvg(scratch, src2, MemOperand(r0, 0), Condition(c1)); \
5825 vrep(scratch, scratch, Operand(0), Condition(c1)); \
5826 op(dst, src1, scratch, Condition(0), Condition(0), Condition(c1)); \
5827 } \
5828 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
5829 const Operand& src2, Register scratch1, \
5830 Simd128Register scratch2) { \
5831 mov(scratch1, src2); \
5832 name(dst, src1, scratch1, scratch2); \
5833 }
5835#undef EMIT_SIMD_SHIFT
5836#undef SIMD_SHIFT_LIST
5837
5838#define SIMD_EXT_MUL_LIST(V) \
5839 V(I64x2ExtMulLowI32x4S, vme, vmo, vmrl, 2) \
5840 V(I64x2ExtMulHighI32x4S, vme, vmo, vmrh, 2) \
5841 V(I64x2ExtMulLowI32x4U, vmle, vmlo, vmrl, 2) \
5842 V(I64x2ExtMulHighI32x4U, vmle, vmlo, vmrh, 2) \
5843 V(I32x4ExtMulLowI16x8S, vme, vmo, vmrl, 1) \
5844 V(I32x4ExtMulHighI16x8S, vme, vmo, vmrh, 1) \
5845 V(I32x4ExtMulLowI16x8U, vmle, vmlo, vmrl, 1) \
5846 V(I32x4ExtMulHighI16x8U, vmle, vmlo, vmrh, 1) \
5847 V(I16x8ExtMulLowI8x16S, vme, vmo, vmrl, 0) \
5848 V(I16x8ExtMulHighI8x16S, vme, vmo, vmrh, 0) \
5849 V(I16x8ExtMulLowI8x16U, vmle, vmlo, vmrl, 0) \
5850 V(I16x8ExtMulHighI8x16U, vmle, vmlo, vmrh, 0)
5851
5852#define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge, mode) \
5853 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
5854 Simd128Register src2, Simd128Register scratch) { \
5855 mul_even(scratch, src1, src2, Condition(0), Condition(0), \
5856 Condition(mode)); \
5857 mul_odd(dst, src1, src2, Condition(0), Condition(0), Condition(mode)); \
5858 merge(dst, scratch, dst, Condition(0), Condition(0), Condition(mode + 1)); \
5859 }
5861#undef EMIT_SIMD_EXT_MUL
5862#undef SIMD_EXT_MUL_LIST
5863
5864#define SIMD_ALL_TRUE_LIST(V) \
5865 V(I64x2AllTrue, 3) \
5866 V(I32x4AllTrue, 2) \
5867 V(I16x8AllTrue, 1) \
5868 V(I8x16AllTrue, 0)
5869
5870#define EMIT_SIMD_ALL_TRUE(name, mode) \
5871 void MacroAssembler::name(Register dst, Simd128Register src, \
5872 Register scratch1, Simd128Register scratch2) { \
5873 mov(scratch1, Operand(1)); \
5874 xgr(dst, dst); \
5875 vx(scratch2, scratch2, scratch2, Condition(0), Condition(0), \
5876 Condition(2)); \
5877 vceq(scratch2, src, scratch2, Condition(0), Condition(mode)); \
5878 vtm(scratch2, scratch2, Condition(0), Condition(0), Condition(0)); \
5879 locgr(Condition(8), dst, scratch1); \
5880 }
5882#undef EMIT_SIMD_ALL_TRUE
5883#undef SIMD_ALL_TRUE_LIST
5884
5885#define SIMD_QFM_LIST(V) \
5886 V(F64x2Qfma, vfma, 3) \
5887 V(F64x2Qfms, vfnms, 3) \
5888 V(F32x4Qfma, vfma, 2) \
5889 V(F32x4Qfms, vfnms, 2)
5890
5891#define EMIT_SIMD_QFM(name, op, c1) \
5892 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
5893 Simd128Register src2, Simd128Register src3) { \
5894 op(dst, src1, src2, src3, Condition(c1), Condition(0)); \
5895 }
5897#undef EMIT_SIMD_QFM
5898#undef SIMD_QFM_LIST
5899
5901 Simd128Register src2, Register scratch1,
5902 Register scratch2, Register scratch3) {
5903 Register scratch_1 = scratch1;
5904 Register scratch_2 = scratch2;
5905 for (int i = 0; i < 2; i++) {
5906 vlgv(scratch_1, src1, MemOperand(r0, i), Condition(3));
5907 vlgv(scratch_2, src2, MemOperand(r0, i), Condition(3));
5908 MulS64(scratch_1, scratch_2);
5909 scratch_1 = scratch2;
5910 scratch_2 = scratch3;
5911 }
5912 vlvgp(dst, scratch1, scratch2);
5913}
5914
5915void MacroAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1,
5916 Simd128Register src2) {
5917 vfce(dst, src1, src2, Condition(0), Condition(0), Condition(3));
5918 vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
5919}
5920
5921void MacroAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1,
5922 Simd128Register src2) {
5923 vfch(dst, src2, src1, Condition(0), Condition(0), Condition(3));
5924}
5925
5926void MacroAssembler::F64x2Le(Simd128Register dst, Simd128Register src1,
5927 Simd128Register src2) {
5928 vfche(dst, src2, src1, Condition(0), Condition(0), Condition(3));
5929}
5930
5931void MacroAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1,
5932 Simd128Register src2) {
5933 vfce(dst, src1, src2, Condition(0), Condition(0), Condition(2));
5934 vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
5935}
5936
5937void MacroAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1,
5938 Simd128Register src2) {
5939 vfch(dst, src2, src1, Condition(0), Condition(0), Condition(2));
5940}
5941
5942void MacroAssembler::F32x4Le(Simd128Register dst, Simd128Register src1,
5943 Simd128Register src2) {
5944 vfche(dst, src2, src1, Condition(0), Condition(0), Condition(2));
5945}
5946
5948 Simd128Register src2) {
5949 vceq(dst, src1, src2, Condition(0), Condition(3));
5950 vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
5951}
5952
5954 Simd128Register src2) {
5955 // Compute !(B > A) which is equal to A >= B.
5956 vch(dst, src2, src1, Condition(0), Condition(3));
5957 vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
5958}
5959
5960void MacroAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1,
5961 Simd128Register src2) {
5962 vceq(dst, src1, src2, Condition(0), Condition(2));
5963 vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
5964}
5965
5966void MacroAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1,
5967 Simd128Register src2) {
5968 // Compute !(B > A) which is equal to A >= B.
5969 vch(dst, src2, src1, Condition(0), Condition(2));
5970 vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
5971}
5972
5974 Simd128Register src2, Simd128Register scratch) {
5975 vceq(scratch, src1, src2, Condition(0), Condition(2));
5976 vchl(dst, src1, src2, Condition(0), Condition(2));
5977 vo(dst, dst, scratch, Condition(0), Condition(0), Condition(2));
5978}
5979
5980void MacroAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1,
5981 Simd128Register src2) {
5982 vceq(dst, src1, src2, Condition(0), Condition(1));
5983 vno(dst, dst, dst, Condition(0), Condition(0), Condition(1));
5984}
5985
5986void MacroAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1,
5987 Simd128Register src2) {
5988 // Compute !(B > A) which is equal to A >= B.
5989 vch(dst, src2, src1, Condition(0), Condition(1));
5990 vno(dst, dst, dst, Condition(0), Condition(0), Condition(1));
5991}
5992
5994 Simd128Register src2, Simd128Register scratch) {
5995 vceq(scratch, src1, src2, Condition(0), Condition(1));
5996 vchl(dst, src1, src2, Condition(0), Condition(1));
5997 vo(dst, dst, scratch, Condition(0), Condition(0), Condition(1));
5998}
5999
6000void MacroAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1,
6001 Simd128Register src2) {
6002 vceq(dst, src1, src2, Condition(0), Condition(0));
6003 vno(dst, dst, dst, Condition(0), Condition(0), Condition(0));
6004}
6005
6006void MacroAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1,
6007 Simd128Register src2) {
6008 // Compute !(B > A) which is equal to A >= B.
6009 vch(dst, src2, src1, Condition(0), Condition(0));
6010 vno(dst, dst, dst, Condition(0), Condition(0), Condition(0));
6011}
6012
6014 Simd128Register src2, Simd128Register scratch) {
6015 vceq(scratch, src1, src2, Condition(0), Condition(0));
6016 vchl(dst, src1, src2, Condition(0), Condition(0));
6017 vo(dst, dst, scratch, Condition(0), Condition(0), Condition(0));
6018}
6019
6020void MacroAssembler::I64x2BitMask(Register dst, Simd128Register src,
6021 Register scratch1, Simd128Register scratch2) {
6022 mov(scratch1, Operand(0x8080808080800040));
6023 vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3));
6024 vbperm(scratch2, src, scratch2, Condition(0), Condition(0), Condition(0));
6025 vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0));
6026}
6027
6028void MacroAssembler::I32x4BitMask(Register dst, Simd128Register src,
6029 Register scratch1, Simd128Register scratch2) {
6030 mov(scratch1, Operand(0x8080808000204060));
6031 vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3));
6032 vbperm(scratch2, src, scratch2, Condition(0), Condition(0), Condition(0));
6033 vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0));
6034}
6035
6036void MacroAssembler::I16x8BitMask(Register dst, Simd128Register src,
6037 Register scratch1, Simd128Register scratch2) {
6038 mov(scratch1, Operand(0x10203040506070));
6039 vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3));
6040 vbperm(scratch2, src, scratch2, Condition(0), Condition(0), Condition(0));
6041 vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0));
6042}
6043
6045 Simd128Register src) {
6046 vupl(dst, src, Condition(0), Condition(0), Condition(2));
6047 vcdg(dst, dst, Condition(4), Condition(0), Condition(3));
6048}
6049
6051 Simd128Register src) {
6052 vupll(dst, src, Condition(0), Condition(0), Condition(2));
6053 vcdlg(dst, dst, Condition(4), Condition(0), Condition(3));
6054}
6055
6056void MacroAssembler::I8x16BitMask(Register dst, Simd128Register src,
6057 Register scratch1, Register scratch2,
6058 Simd128Register scratch3) {
6059 mov(scratch1, Operand(0x4048505860687078));
6060 mov(scratch2, Operand(0x8101820283038));
6061 vlvgp(scratch3, scratch2, scratch1);
6062 vbperm(scratch3, src, scratch3, Condition(0), Condition(0), Condition(0));
6063 vlgv(dst, scratch3, MemOperand(r0, 3), Condition(1));
6064}
6065
6066void MacroAssembler::V128AnyTrue(Register dst, Simd128Register src,
6067 Register scratch) {
6068 mov(dst, Operand(1));
6069 xgr(scratch, scratch);
6070 vtm(src, src, Condition(0), Condition(0), Condition(0));
6071 locgr(Condition(8), dst, scratch);
6072}
6073
6074#define CONVERT_FLOAT_TO_INT32(convert, dst, src, scratch1, scratch2) \
6075 for (int index = 0; index < 4; index++) { \
6076 vlgv(scratch2, src, MemOperand(r0, index), Condition(2)); \
6077 MovIntToFloat(scratch1, scratch2); \
6078 convert(scratch2, scratch1, kRoundToZero); \
6079 vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \
6080 }
6082 Simd128Register src,
6083 Simd128Register scratch1,
6084 Register scratch2) {
6085 // NaN to 0.
6086 vfce(scratch1, src, src, Condition(0), Condition(0), Condition(2));
6087 vn(dst, src, scratch1, Condition(0), Condition(0), Condition(0));
6088 if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
6089 vcgd(dst, dst, Condition(5), Condition(0), Condition(2));
6090 } else {
6091 CONVERT_FLOAT_TO_INT32(ConvertFloat32ToInt32, dst, dst, scratch1, scratch2)
6092 }
6093}
6094
6096 Simd128Register src,
6097 Simd128Register scratch1,
6098 Register scratch2) {
6099 // vclgd or ConvertFloat32ToUnsignedInt32 will convert NaN to 0, negative to 0
6100 // automatically.
6101 if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
6102 vclgd(dst, src, Condition(5), Condition(0), Condition(2));
6103 } else {
6104 CONVERT_FLOAT_TO_INT32(ConvertFloat32ToUnsignedInt32, dst, src, scratch1,
6105 scratch2)
6106 }
6107}
6108#undef CONVERT_FLOAT_TO_INT32
6109
6110#define CONVERT_INT32_TO_FLOAT(convert, dst, src, scratch1, scratch2) \
6111 for (int index = 0; index < 4; index++) { \
6112 vlgv(scratch2, src, MemOperand(r0, index), Condition(2)); \
6113 convert(scratch1, scratch2); \
6114 MovFloatToInt(scratch2, scratch1); \
6115 vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \
6116 }
6118 Simd128Register src,
6119 Simd128Register scratch1,
6120 Register scratch2) {
6121 if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
6122 vcdg(dst, src, Condition(4), Condition(0), Condition(2));
6123 } else {
6124 CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, dst, src, scratch1, scratch2)
6125 }
6126}
6128 Simd128Register src,
6129 Simd128Register scratch1,
6130 Register scratch2) {
6131 if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
6132 vcdlg(dst, src, Condition(4), Condition(0), Condition(2));
6133 } else {
6134 CONVERT_INT32_TO_FLOAT(ConvertUnsignedIntToFloat, dst, src, scratch1,
6135 scratch2)
6136 }
6137}
6138#undef CONVERT_INT32_TO_FLOAT
6139
6141 Simd128Register src1,
6142 Simd128Register src2) {
6143 vpks(dst, src2, src1, Condition(0), Condition(2));
6144}
6145
6147 Simd128Register src1,
6148 Simd128Register src2) {
6149 vpks(dst, src2, src1, Condition(0), Condition(1));
6150}
6151
6152#define VECTOR_PACK_UNSIGNED(dst, src1, src2, scratch, mode) \
6153 vx(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero, Condition(0), \
6154 Condition(0), Condition(mode)); \
6155 vmx(scratch, src1, kDoubleRegZero, Condition(0), Condition(0), \
6156 Condition(mode)); \
6157 vmx(dst, src2, kDoubleRegZero, Condition(0), Condition(0), Condition(mode));
6159 Simd128Register src1,
6160 Simd128Register src2,
6161 Simd128Register scratch) {
6162 // treat inputs as signed, and saturate to unsigned (negative to 0).
6163 VECTOR_PACK_UNSIGNED(dst, src1, src2, scratch, 2)
6164 vpkls(dst, dst, scratch, Condition(0), Condition(2));
6165}
6166
6167void MacroAssembler::I8x16UConvertI16x8(Simd128Register dst,
6168 Simd128Register src1,
6169 Simd128Register src2,
6170 Simd128Register scratch) {
6171 // treat inputs as signed, and saturate to unsigned (negative to 0).
6172 VECTOR_PACK_UNSIGNED(dst, src1, src2, scratch, 1)
6173 vpkls(dst, dst, scratch, Condition(0), Condition(1));
6174}
6175#undef VECTOR_PACK_UNSIGNED
6176
6177#define BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, op, extract_high, \
6178 extract_low, mode) \
6179 DCHECK(dst != scratch1 && dst != scratch2); \
6180 DCHECK(dst != src1 && dst != src2); \
6181 extract_high(scratch1, src1, Condition(0), Condition(0), Condition(mode)); \
6182 extract_high(scratch2, src2, Condition(0), Condition(0), Condition(mode)); \
6183 op(dst, scratch1, scratch2, Condition(0), Condition(0), \
6184 Condition(mode + 1)); \
6185 extract_low(scratch1, src1, Condition(0), Condition(0), Condition(mode)); \
6186 extract_low(scratch2, src2, Condition(0), Condition(0), Condition(mode)); \
6187 op(scratch1, scratch1, scratch2, Condition(0), Condition(0), \
6188 Condition(mode + 1));
6189void MacroAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1,
6190 Simd128Register src2,
6191 Simd128Register scratch1,
6192 Simd128Register scratch2) {
6193 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, va, vuph, vupl, 1)
6194 vpks(dst, dst, scratch1, Condition(0), Condition(2));
6195}
6196
6197void MacroAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1,
6198 Simd128Register src2,
6199 Simd128Register scratch1,
6200 Simd128Register scratch2) {
6201 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, vs, vuph, vupl, 1)
6202 vpks(dst, dst, scratch1, Condition(0), Condition(2));
6203}
6204
6205void MacroAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1,
6206 Simd128Register src2,
6207 Simd128Register scratch1,
6208 Simd128Register scratch2) {
6209 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, va, vuplh, vupll, 1)
6210 vpkls(dst, dst, scratch1, Condition(0), Condition(2));
6211}
6212
6213void MacroAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1,
6214 Simd128Register src2,
6215 Simd128Register scratch1,
6216 Simd128Register scratch2) {
6217 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, vs, vuplh, vupll, 1)
6218 // negative intermediate values to 0.
6220 Condition(0));
6221 vmx(dst, kDoubleRegZero, dst, Condition(0), Condition(0), Condition(2));
6222 vmx(scratch1, kDoubleRegZero, scratch1, Condition(0), Condition(0),
6223 Condition(2));
6224 vpkls(dst, dst, scratch1, Condition(0), Condition(2));
6225}
6226
6227void MacroAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1,
6228 Simd128Register src2,
6229 Simd128Register scratch1,
6230 Simd128Register scratch2) {
6231 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, va, vuph, vupl, 0)
6232 vpks(dst, dst, scratch1, Condition(0), Condition(1));
6233}
6234
6235void MacroAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1,
6236 Simd128Register src2,
6237 Simd128Register scratch1,
6238 Simd128Register scratch2) {
6239 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, vs, vuph, vupl, 0)
6240 vpks(dst, dst, scratch1, Condition(0), Condition(1));
6241}
6242
6243void MacroAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1,
6244 Simd128Register src2,
6245 Simd128Register scratch1,
6246 Simd128Register scratch2) {
6247 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, va, vuplh, vupll, 0)
6248 vpkls(dst, dst, scratch1, Condition(0), Condition(1));
6249}
6250
6251void MacroAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1,
6252 Simd128Register src2,
6253 Simd128Register scratch1,
6254 Simd128Register scratch2) {
6255 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, vs, vuplh, vupll, 0)
6256 // negative intermediate values to 0.
6258 Condition(0));
6259 vmx(dst, kDoubleRegZero, dst, Condition(0), Condition(0), Condition(1));
6260 vmx(scratch1, kDoubleRegZero, scratch1, Condition(0), Condition(0),
6261 Condition(1));
6262 vpkls(dst, dst, scratch1, Condition(0), Condition(1));
6263}
6264#undef BINOP_EXTRACT
6265
6267 Simd128Register src,
6268 Simd128Register scratch1,
6269 Register scratch2, Register scratch3,
6270 Register scratch4) {
6271 Register holder = scratch3;
6272 for (int index = 0; index < 2; ++index) {
6273 vlgv(scratch2, src, MemOperand(scratch2, index + 2), Condition(2));
6274 MovIntToFloat(scratch1, scratch2);
6275 ldebr(scratch1, scratch1);
6276 MovDoubleToInt64(holder, scratch1);
6277 holder = scratch4;
6278 }
6279 vlvgp(dst, scratch3, scratch4);
6280}
6281
6283 Simd128Register src,
6284 Simd128Register scratch1,
6285 Register scratch2, Register scratch3,
6286 Register scratch4) {
6287 Register holder = scratch3;
6288 for (int index = 0; index < 2; ++index) {
6289 vlgv(scratch2, src, MemOperand(r0, index), Condition(3));
6290 MovInt64ToDouble(scratch1, scratch2);
6291 ledbr(scratch1, scratch1);
6292 MovFloatToInt(holder, scratch1);
6293 holder = scratch4;
6294 }
6295 vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
6296 vlvg(dst, scratch3, MemOperand(r0, 2), Condition(2));
6297 vlvg(dst, scratch4, MemOperand(r0, 3), Condition(2));
6298}
6299
6300#define EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, lane_size, mul_even, \
6301 mul_odd) \
6302 CHECK_NE(src, scratch2); \
6303 vrepi(scratch2, Operand(1), Condition(lane_size)); \
6304 mul_even(scratch1, src, scratch2, Condition(0), Condition(0), \
6305 Condition(lane_size)); \
6306 mul_odd(scratch2, src, scratch2, Condition(0), Condition(0), \
6307 Condition(lane_size)); \
6308 va(dst, scratch1, scratch2, Condition(0), Condition(0), \
6309 Condition(lane_size + 1));
6311 Simd128Register src,
6312 Simd128Register scratch1,
6313 Simd128Register scratch2) {
6314 EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 1, vme, vmo)
6315}
6316
6318 Simd128Register src,
6319 Simd128Register /* scratch1 */,
6320 Simd128Register /* scratch2 */) {
6321 // Unnamed scratch parameters are still kept to make this function
6322 // have the same signature as the other ExtAddPairwise functions.
6323 // TF and Liftoff use a uniform Macro for all of them.
6324 // TODO(miladfarca): Add a default argument or separate them in TF and
6325 // Liftoff.
6327 Condition(3));
6328 vsum(dst, src, kDoubleRegZero, Condition(0), Condition(0), Condition(1));
6329}
6330
6332 Simd128Register src,
6333 Simd128Register scratch1,
6334 Simd128Register scratch2) {
6335 EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 0, vme, vmo)
6336}
6337
6339 Simd128Register src,
6340 Simd128Register scratch1,
6341 Simd128Register scratch2) {
6342 EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 0, vmle, vmlo)
6343}
6344#undef EXT_ADD_PAIRWISE
6345
6347 Simd128Register src,
6348 Simd128Register scratch) {
6349 // NaN to 0.
6350 vfce(scratch, src, src, Condition(0), Condition(0), Condition(3));
6351 vn(scratch, src, scratch, Condition(0), Condition(0), Condition(0));
6352 vcgd(scratch, scratch, Condition(5), Condition(0), Condition(3));
6353 vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
6354 vpks(dst, dst, scratch, Condition(0), Condition(3));
6355}
6356
6358 Simd128Register src,
6359 Simd128Register scratch) {
6360 vclgd(scratch, src, Condition(5), Condition(0), Condition(3));
6361 vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
6362 vpkls(dst, dst, scratch, Condition(0), Condition(3));
6363}
6364
6365void MacroAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low,
6366 Register scratch1, Register scratch2) {
6367 mov(scratch1, Operand(low));
6368 mov(scratch2, Operand(high));
6369 vlvgp(dst, scratch2, scratch1);
6370}
6371
6373 Simd128Register src2, Register scratch1,
6374 Register scratch2, Simd128Register scratch3) {
6375 DCHECK(!AreAliased(src1, scratch3));
6376 DCHECK(!AreAliased(src2, scratch3));
6377 // Saturate the indices to 5 bits. Input indices more than 31 should
6378 // return 0.
6379 vrepi(scratch3, Operand(31), Condition(0));
6380 vmnl(scratch3, src2, scratch3, Condition(0), Condition(0), Condition(0));
6381 // Input needs to be reversed.
6382 vlgv(scratch1, src1, MemOperand(r0, 0), Condition(3));
6383 vlgv(scratch2, src1, MemOperand(r0, 1), Condition(3));
6384 lrvgr(scratch1, scratch1);
6385 lrvgr(scratch2, scratch2);
6386 vlvgp(dst, scratch2, scratch1);
6388 Condition(0));
6389 vperm(dst, dst, kDoubleRegZero, scratch3, Condition(0), Condition(0));
6390}
6391
6393 Simd128Register src2, uint64_t high,
6394 uint64_t low, Register scratch1,
6395 Register scratch2, Simd128Register scratch3) {
6396 mov(scratch1, Operand(low));
6397 mov(scratch2, Operand(high));
6398 vlvgp(scratch3, scratch2, scratch1);
6399 vperm(dst, src1, src2, scratch3, Condition(0), Condition(0));
6400}
6401
6403 Simd128Register src2,
6404 Simd128Register scratch) {
6405 vme(scratch, src1, src2, Condition(0), Condition(0), Condition(1));
6406 vmo(dst, src1, src2, Condition(0), Condition(0), Condition(1));
6407 va(dst, scratch, dst, Condition(0), Condition(0), Condition(2));
6408}
6409
6412 Simd128Register src3, Simd128Register scratch1, Simd128Register scratch2) {
6413 DCHECK_NE(dst, src3);
6414 // I8 -> I16.
6415 vme(scratch1, src1, src2, Condition(0), Condition(0), Condition(0));
6416 vmo(dst, src1, src2, Condition(0), Condition(0), Condition(0));
6417 va(dst, scratch1, dst, Condition(0), Condition(0), Condition(1));
6418 // I16 -> I32.
6419 vrepi(scratch2, Operand(1), Condition(1));
6420 vme(scratch1, dst, scratch2, Condition(0), Condition(0), Condition(1));
6421 vmo(dst, dst, scratch2, Condition(0), Condition(0), Condition(1));
6422 va(dst, scratch1, dst, Condition(0), Condition(0), Condition(2));
6423 // Add src3.
6424 va(dst, dst, src3, Condition(0), Condition(0), Condition(2));
6425}
6426
6428 Simd128Register src2,
6429 Simd128Register scratch) {
6430 vme(scratch, src1, src2, Condition(0), Condition(0), Condition(0));
6431 vmo(dst, src1, src2, Condition(0), Condition(0), Condition(0));
6432 va(dst, scratch, dst, Condition(0), Condition(0), Condition(1));
6433}
6434
6435#define Q15_MUL_ROAUND(accumulator, src1, src2, const_val, scratch, unpack) \
6436 unpack(scratch, src1, Condition(0), Condition(0), Condition(1)); \
6437 unpack(accumulator, src2, Condition(0), Condition(0), Condition(1)); \
6438 vml(accumulator, scratch, accumulator, Condition(0), Condition(0), \
6439 Condition(2)); \
6440 va(accumulator, accumulator, const_val, Condition(0), Condition(0), \
6441 Condition(2)); \
6442 vrepi(scratch, Operand(15), Condition(2)); \
6443 vesrav(accumulator, accumulator, scratch, Condition(0), Condition(0), \
6444 Condition(2));
6446 Simd128Register src2,
6447 Simd128Register scratch1,
6448 Simd128Register scratch2,
6449 Simd128Register scratch3) {
6450 DCHECK(!AreAliased(src1, scratch1, scratch2, scratch3));
6451 DCHECK(!AreAliased(src2, scratch1, scratch2, scratch3));
6452 vrepi(scratch1, Operand(0x4000), Condition(2));
6453 Q15_MUL_ROAUND(scratch2, src1, src2, scratch1, scratch3, vupl)
6454 Q15_MUL_ROAUND(dst, src1, src2, scratch1, scratch3, vuph)
6455 vpks(dst, dst, scratch2, Condition(0), Condition(2));
6456}
6457#undef Q15_MUL_ROAUND
6458
6459// Vector LE Load and Transform instructions.
6460#ifdef V8_TARGET_BIG_ENDIAN
6461#define IS_BIG_ENDIAN true
6462#else
6463#define IS_BIG_ENDIAN false
6464#endif
6465
6466#define CAN_LOAD_STORE_REVERSE \
6467 IS_BIG_ENDIAN&& CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)
6468
6469#define LOAD_SPLAT_LIST(V) \
6470 V(64x2, vlbrrep, LoadU64LE, 3) \
6471 V(32x4, vlbrrep, LoadU32LE, 2) \
6472 V(16x8, vlbrrep, LoadU16LE, 1) \
6473 V(8x16, vlrep, LoadU8, 0)
6474
6475#define LOAD_SPLAT(name, vector_instr, scalar_instr, condition) \
6476 void MacroAssembler::LoadAndSplat##name##LE( \
6477 Simd128Register dst, const MemOperand& mem, Register scratch) { \
6478 if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
6479 vector_instr(dst, mem, Condition(condition)); \
6480 return; \
6481 } \
6482 scalar_instr(scratch, mem); \
6483 vlvg(dst, scratch, MemOperand(r0, 0), Condition(condition)); \
6484 vrep(dst, dst, Operand(0), Condition(condition)); \
6485 }
6486LOAD_SPLAT_LIST(LOAD_SPLAT)
6487#undef LOAD_SPLAT
6488#undef LOAD_SPLAT_LIST
6489
6490#define LOAD_EXTEND_LIST(V) \
6491 V(32x2U, vuplh, 2) \
6492 V(32x2S, vuph, 2) \
6493 V(16x4U, vuplh, 1) \
6494 V(16x4S, vuph, 1) \
6495 V(8x8U, vuplh, 0) \
6496 V(8x8S, vuph, 0)
6497
6498#define LOAD_EXTEND(name, unpack_instr, condition) \
6499 void MacroAssembler::LoadAndExtend##name##LE( \
6500 Simd128Register dst, const MemOperand& mem, Register scratch) { \
6501 if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
6502 vlebrg(dst, mem, Condition(0)); \
6503 } else { \
6504 LoadU64LE(scratch, mem); \
6505 vlvg(dst, scratch, MemOperand(r0, 0), Condition(3)); \
6506 } \
6507 unpack_instr(dst, dst, Condition(0), Condition(0), Condition(condition)); \
6508 }
6509LOAD_EXTEND_LIST(LOAD_EXTEND)
6510#undef LOAD_EXTEND
6511#undef LOAD_EXTEND
6512
6514 Register scratch) {
6515 vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
6516 if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
6517 vlebrf(dst, mem, Condition(3));
6518 return;
6519 }
6520 LoadU32LE(scratch, mem);
6521 vlvg(dst, scratch, MemOperand(r0, 3), Condition(2));
6522}
6523
6525 Register scratch) {
6526 vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
6527 if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
6528 vlebrg(dst, mem, Condition(1));
6529 return;
6530 }
6531 LoadU64LE(scratch, mem);
6532 vlvg(dst, scratch, MemOperand(r0, 1), Condition(3));
6533}
6534
6535#define LOAD_LANE_LIST(V) \
6536 V(64, vlebrg, LoadU64LE, 3) \
6537 V(32, vlebrf, LoadU32LE, 2) \
6538 V(16, vlebrh, LoadU16LE, 1) \
6539 V(8, vleb, LoadU8, 0)
6540
6541#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
6542 void MacroAssembler::LoadLane##name##LE(Simd128Register dst, \
6543 const MemOperand& mem, int lane, \
6544 Register scratch) { \
6545 if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
6546 vector_instr(dst, mem, Condition(lane)); \
6547 return; \
6548 } \
6549 scalar_instr(scratch, mem); \
6550 vlvg(dst, scratch, MemOperand(r0, lane), Condition(condition)); \
6551 }
6552LOAD_LANE_LIST(LOAD_LANE)
6553#undef LOAD_LANE
6554#undef LOAD_LANE_LIST
6555
6556#define STORE_LANE_LIST(V) \
6557 V(64, vstebrg, StoreU64LE, 3) \
6558 V(32, vstebrf, StoreU32LE, 2) \
6559 V(16, vstebrh, StoreU16LE, 1) \
6560 V(8, vsteb, StoreU8, 0)
6561
6562#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
6563 void MacroAssembler::StoreLane##name##LE(Simd128Register src, \
6564 const MemOperand& mem, int lane, \
6565 Register scratch) { \
6566 if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
6567 vector_instr(src, mem, Condition(lane)); \
6568 return; \
6569 } \
6570 vlgv(scratch, src, MemOperand(r0, lane), Condition(condition)); \
6571 scalar_instr(scratch, mem); \
6572 }
6573STORE_LANE_LIST(STORE_LANE)
6574#undef STORE_LANE
6575#undef STORE_LANE_LIST
6576#undef CAN_LOAD_STORE_REVERSE
6577#undef IS_BIG_ENDIAN
6578
6580 ASM_CODE_COMMENT(this);
6584 : IsolateData::jslimit_offset();
6585 CHECK(is_int32(offset));
6587}
6588
6589void MacroAssembler::Switch(Register scratch, Register value,
6590 int case_value_base, Label** labels,
6591 int num_labels) {
6592 Label fallthrough, jump_table;
6593 if (case_value_base != 0) {
6594 SubS64(value, value, Operand(case_value_base));
6595 }
6596 CmpU64(value, Operand(num_labels));
6597 bge(&fallthrough);
6598
6599 int entry_size_log2 = 3;
6600 ShiftLeftU32(value, value, Operand(entry_size_log2));
6601 larl(r1, &jump_table);
6602 lay(r1, MemOperand(value, r1));
6603 b(r1);
6604
6605 bind(&jump_table);
6606 for (int i = 0; i < num_labels; ++i) {
6607 b(labels[i]);
6608 dh(0);
6609 }
6610 bind(&fallthrough);
6611}
6612
6614 Register code, Register scratch, Label* if_marked_for_deoptimization) {
6615 TestCodeIsMarkedForDeoptimization(code, scratch);
6616 bne(if_marked_for_deoptimization);
6617}
6618
6619void MacroAssembler::JumpIfCodeIsTurbofanned(Register code, Register scratch,
6620 Label* if_turbofanned) {
6621 LoadU32(scratch, FieldMemOperand(code, Code::kFlagsOffset));
6622 TestBit(scratch, Code::kIsTurbofannedBit, scratch);
6623 bne(if_turbofanned);
6624}
6625
6626void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
6627 CodeKind min_opt_level,
6628 Register feedback_vector,
6629 FeedbackSlot slot,
6630 Label* on_result,
6632 Label fallthrough, clear_slot;
6634 scratch_and_result,
6635 FieldMemOperand(feedback_vector,
6636 FeedbackVector::OffsetOfElementAt(slot.ToInt())));
6637 LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
6638
6639 // Is it marked_for_deoptimization? If yes, clear the slot.
6640 {
6641 // The entry references a CodeWrapper object. Unwrap it now.
6643 scratch_and_result,
6644 FieldMemOperand(scratch_and_result, CodeWrapper::kCodeOffset));
6645
6646 UseScratchRegisterScope temps(this);
6647 Register temp = temps.Acquire();
6648 JumpIfCodeIsMarkedForDeoptimization(scratch_and_result, temp, &clear_slot);
6649 if (min_opt_level == CodeKind::TURBOFAN_JS) {
6650 JumpIfCodeIsTurbofanned(scratch_and_result, temp, on_result);
6651 b(&fallthrough);
6652 } else {
6653 b(on_result);
6654 }
6655 }
6656
6657 bind(&clear_slot);
6658 mov(scratch_and_result, ClearedValue());
6660 scratch_and_result,
6661 FieldMemOperand(feedback_vector,
6662 FeedbackVector::OffsetOfElementAt(slot.ToInt())));
6663
6664 bind(&fallthrough);
6665 mov(scratch_and_result, Operand::Zero());
6666}
6667
6668// Calls an API function. Allocates HandleScope, extracts returned value
6669// from handle and propagates exceptions. Clobbers C argument registers
6670// and C caller-saved registers. Restores context. On return removes
6671// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
6672// (GCed, includes the call JS arguments space and the additional space
6673// allocated for the fast call).
6674void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
6675 Register function_address,
6676 ExternalReference thunk_ref, Register thunk_arg,
6677 int slots_to_drop_on_return,
6678 MemOperand* argc_operand,
6679 MemOperand return_value_operand) {
6680 using ER = ExternalReference;
6681
6682 Isolate* isolate = masm->isolate();
6684 ER::handle_scope_next_address(isolate), no_reg);
6686 ER::handle_scope_limit_address(isolate), no_reg);
6688 ER::handle_scope_level_address(isolate), no_reg);
6689
6690 Register return_value = r2;
6691#if V8_OS_ZOS
6692 Register scratch = r6;
6693#else
6694 Register scratch = ip;
6695#endif
6696 Register scratch2 = r1;
6697
6698 // Allocate HandleScope in callee-saved registers.
6699 // We will need to restore the HandleScope after the call to the API function,
6700 // by allocating it in callee-saved registers it'll be preserved by C code.
6701#if V8_OS_ZOS
6702 Register prev_next_address_reg = r14;
6703#else
6704 Register prev_next_address_reg = r6;
6705#endif
6706 Register prev_limit_reg = r7;
6707 Register prev_level_reg = r8;
6708
6709 // C arguments (kCArgRegs[0/1]) are expected to be initialized outside, so
6710 // this function must not corrupt them (return_value overlaps with
6711 // kCArgRegs[0] but that's ok because we start using it only after the C
6712 // call).
6713 DCHECK(!AreAliased(kCArgRegs[0], kCArgRegs[1], // C args
6714 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
6715 // function_address and thunk_arg might overlap but this function must not
6716 // corrupted them until the call is made (i.e. overlap with return_value is
6717 // fine).
6718 DCHECK(!AreAliased(function_address, // incoming parameters
6719 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
6720 DCHECK(!AreAliased(thunk_arg, // incoming parameters
6721 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
6722 {
6724 "Allocate HandleScope in callee-save registers.");
6725 __ LoadU64(prev_next_address_reg, next_mem_op);
6726 __ LoadU64(prev_limit_reg, limit_mem_op);
6727 __ LoadU32(prev_level_reg, level_mem_op);
6728 __ AddS64(scratch, prev_level_reg, Operand(1));
6729 __ StoreU32(scratch, level_mem_op);
6730 }
6731
6732 Label profiler_or_side_effects_check_enabled, done_api_call;
6733 if (with_profiling) {
6734 __ RecordComment("Check if profiler or side effects check is enabled");
6735 __ LoadU8(scratch,
6736 __ ExternalReferenceAsOperand(IsolateFieldId::kExecutionMode));
6737 __ CmpS64(scratch, Operand::Zero());
6738 __ bne(&profiler_or_side_effects_check_enabled, Label::kNear);
6739#ifdef V8_RUNTIME_CALL_STATS
6740 __ RecordComment("Check if RCS is enabled");
6741 __ Move(scratch, ER::address_of_runtime_stats_flag());
6742 __ LoadU32(scratch, MemOperand(scratch, 0));
6743 __ CmpS64(scratch, Operand::Zero());
6744 __ bne(&profiler_or_side_effects_check_enabled, Label::kNear);
6745#endif // V8_RUNTIME_CALL_STATS
6746 }
6747
6748 __ RecordComment("Call the api function directly.");
6749#if V8_OS_ZOS
6750 __ mov(scratch, function_address);
6751 __ zosStoreReturnAddressAndCall(function_address, scratch);
6752#else
6753 __ StoreReturnAddressAndCall(function_address);
6754#endif
6755 __ bind(&done_api_call);
6756
6757 Label propagate_exception;
6758 Label delete_allocated_handles;
6759 Label leave_exit_frame;
6760
6761 __ RecordComment("Load the value from ReturnValue");
6762 __ LoadU64(r2, return_value_operand);
6763
6764 {
6766 masm,
6767 "No more valid handles (the result handle was the last one)."
6768 "Restore previous handle scope.");
6769 __ StoreU64(prev_next_address_reg, next_mem_op);
6770 if (v8_flags.debug_code) {
6771 __ LoadU32(scratch, level_mem_op);
6772 __ SubS64(scratch, Operand(1));
6773 __ CmpS64(scratch, prev_level_reg);
6774 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
6775 }
6776 __ StoreU32(prev_level_reg, level_mem_op);
6777 __ CmpS64(prev_limit_reg, limit_mem_op);
6778 __ bne(&delete_allocated_handles, Label::kNear);
6779 }
6780
6781 __ RecordComment("Leave the API exit frame.");
6782 __ bind(&leave_exit_frame);
6783 Register argc_reg = prev_limit_reg;
6784 if (argc_operand != nullptr) {
6785 // Load the number of stack slots to drop before LeaveExitFrame modifies sp.
6786 __ LoadU64(argc_reg, *argc_operand);
6787 }
6788 __ LeaveExitFrame(scratch);
6789
6790 // Check if the function scheduled an exception.
6791 {
6793 "Check if the function scheduled an exception.");
6795 ER::exception_address(isolate), no_reg));
6796 __ CompareRoot(scratch2, RootIndex::kTheHoleValue);
6797 __ bne(&propagate_exception, Label::kNear);
6798 }
6799
6800 __ AssertJSAny(return_value, scratch, scratch2,
6801 AbortReason::kAPICallReturnedInvalidObject);
6802
6803 if (argc_operand == nullptr) {
6804 DCHECK_NE(slots_to_drop_on_return, 0);
6805 __ AddS64(sp, sp, Operand(slots_to_drop_on_return * kSystemPointerSize));
6806
6807 } else {
6808 // {argc_operand} was loaded into {argc_reg} above.
6809 __ AddS64(sp, sp, Operand(slots_to_drop_on_return * kSystemPointerSize));
6810 __ ShiftLeftU64(r0, argc_reg, Operand(kSystemPointerSizeLog2));
6811 __ AddS64(sp, sp, r0);
6812 }
6813
6814 __ b(r14);
6815
6816 if (with_profiling) {
6817 ASM_CODE_COMMENT_STRING(masm, "Call the api function via thunk wrapper.");
6818 __ bind(&profiler_or_side_effects_check_enabled);
6819 // Additional parameter is the address of the actual callback function.
6820 if (thunk_arg.is_valid()) {
6821 MemOperand thunk_arg_mem_op = __ ExternalReferenceAsOperand(
6822 IsolateFieldId::kApiCallbackThunkArgument);
6823 __ StoreU64(thunk_arg, thunk_arg_mem_op);
6824 }
6825 __ Move(scratch, thunk_ref);
6826#if V8_OS_ZOS
6827 __ zosStoreReturnAddressAndCall(function_address, scratch);
6828#else
6830#endif
6831 __ b(&done_api_call);
6832 }
6833
6834 __ RecordComment("An exception was thrown. Propagate it.");
6835 __ bind(&propagate_exception);
6836 __ TailCallRuntime(Runtime::kPropagateException);
6837
6838 // HandleScope limit has changed. Delete allocated extensions.
6839 {
6841 masm, "HandleScope limit has changed. Delete allocated extensions.");
6842 __ bind(&delete_allocated_handles);
6843 __ StoreU64(prev_limit_reg, limit_mem_op);
6844 // Save the return value in a callee-save register.
6845 Register saved_result = prev_limit_reg;
6846 __ mov(saved_result, return_value);
6847 __ PrepareCallCFunction(1, scratch);
6848 __ Move(kCArgRegs[0], ER::isolate_address());
6849 __ CallCFunction(ER::delete_handle_scope_extensions(), 1);
6850 __ mov(return_value, saved_result);
6851 __ b(&leave_exit_frame, Label::kNear);
6852 }
6853}
6854
6855} // namespace internal
6856} // namespace v8
6857
6858#undef __
6859
6860#endif // V8_TARGET_ARCH_S390X
friend Zone
Definition asm-types.cc:195
#define one
constexpr int kPageSizeBits
#define Assert(condition)
Builtins::Kind kind
Definition builtins.cc:40
#define BUILTIN_CODE(isolate, name)
Definition builtins.h:45
static int ActivationFrameAlignment()
void RequestHeapNumber(HeapNumberRequest request)
Definition assembler.cc:262
friend class ConstantPoolUnavailableScope
Definition assembler.h:579
EmbeddedObjectIndex AddEmbeddedObject(IndirectHandle< HeapObject > object)
Definition assembler.cc:285
friend class FrameAndConstantPoolScope
Definition assembler.h:578
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
const AssemblerOptions & options() const
Definition assembler.h:339
void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask=NoMask)
void cgdbr(Condition m3, R1 r1, R2 r2)
void ld(Register rd, const MemOperand &rs)
void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void seb(Register rd, Register rt)
void popcnt(Register dst, Register src)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
int branch_offset(Label *L)
void stm(BlockAddrMode am, Register base, RegList src, Condition cond=al)
void bne(Register rj, Register rd, int32_t offset)
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc=0)
void lzdr(DoubleRegister r1)
void jump(Handle< Code > target, RelocInfo::Mode rmode, Condition cond)
void blt(Register rj, Register rd, int32_t offset)
void ble(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void cdfbr(R1 r1, R2 r2)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
Simd128Register Simd128Register ra
void ledbr(R1 r1, R2 r2)
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data=0)
void cfdbr(Condition m3, R1 r1, R2 r2)
friend class UseScratchRegisterScope
void inc(Register dst)
void cfebr(Condition m3, R1 r1, R2 r2)
void cdgbr(R1 r1, R2 r2)
void larl(Register r, Label *l)
void cegbr(R1 r1, R2 r2)
void srl(Register rd, Register rt, uint16_t sa)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
void brxhg(Register dst, Register inc, Label *L)
void bge(Register rj, Register rd, int32_t offset)
void vsel(const Condition cond, const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2)
void bunordered(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void cgebr(Condition m3, R1 r1, R2 r2)
void bkpt(uint32_t imm16)
void sth(Register dst, const MemOperand &src)
void bgt(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
int SizeOfCodeGeneratedSince(Label *label)
void beq(Register rj, Register rd, int32_t offset)
void dh(uint16_t data)
void sra(Register rt, Register rd, uint16_t sa)
static constexpr Builtin RecordWrite(SaveFPRegsMode fp_mode)
static bool IsIsolateIndependentBuiltin(Tagged< Code > code)
Definition builtins.cc:372
V8_EXPORT_PRIVATE Handle< Code > code_handle(Builtin builtin)
Definition builtins.cc:154
static constexpr Builtin RuntimeCEntry(int result_size, bool switch_to_central_stack=false)
static constexpr Builtin EphemeronKeyBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin CEntry(int result_size, ArgvMode argv_mode, bool builtin_exit_frame=false, bool switch_to_central_stack=false)
static const int kIsTurbofannedBit
Definition code.h:458
static const int kMarkedForDeoptimizationBit
Definition code.h:456
static const int kInvalidContext
Definition contexts.h:578
static V8_INLINE constexpr int SlotOffset(int index)
Definition contexts.h:516
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static constexpr int kCallerSPDisplacement
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr uint32_t kFlagsMaybeHasMaglevCode
static constexpr uint32_t kFlagsTieringStateIsAnyRequested
static constexpr uint32_t kFlagsLogNextExecution
static constexpr uint32_t kFlagsMaybeHasTurbofanCode
static constexpr int OffsetOfElementAt(int index)
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr int BuiltinEntrySlotOffset(Builtin id)
static constexpr int real_jslimit_offset()
static IsolateGroup * current()
Builtins * builtins()
Definition isolate.h:1443
bool IsGeneratingEmbeddedBuiltins() const
Definition isolate.h:1897
Address BuiltinEntry(Builtin builtin)
static bool IsAddressableThroughRootRegister(Isolate *isolate, const ExternalReference &reference)
static constexpr bool CanBeImmediate(RootIndex index)
V8_INLINE std::string CommentForOffHeapTrampoline(const char *prefix, Builtin builtin)
static int32_t RootRegisterOffsetForExternalReferenceTableEntry(Isolate *isolate, const ExternalReference &reference)
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
Tagged_t ReadOnlyRootPtr(RootIndex index)
void IndirectLoadConstant(Register destination, Handle< HeapObject > object)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
void IndirectLoadExternalReference(Register destination, ExternalReference reference)
void F32x4ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2, Register scratch3)
void DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void I16x8DotI8x16S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void DivFloat32(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void F32x4UConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void PushAll(RegList registers)
void NearestIntF32(DoubleRegister dst, DoubleRegister src)
void ConvertIntToFloat(Register src, DoubleRegister dst)
void LoadStackLimit(Register destination, StackLimitKind kind)
void SubU64(Register dst, const MemOperand &opnd)
void StoreV128(Simd128Register src, const MemOperand &mem, Register scratch)
void ConvertIntToDouble(Register src, DoubleRegister dst)
void MulHighU64(Register dst, Register src1, Register src2)
void LoadS16LE(Register dst, const MemOperand &mem, Register scratch)
void Call(Register target, Condition cond=al)
void CallJSFunction(Register function_object, uint16_t argument_count)
void S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask)
void CountTrailingZerosU64(Register dst, Register src, Register scratch1=ip, Register scratch2=r0, RCBit r=LeaveRC)
void LoadF32AsF64(DoubleRegister dst, const MemOperand &opnd)
void TestCodeIsMarkedForDeoptimization(Register code, Register scratch)
void AtomicExchangeU8(Register addr, Register value, Register output, Register scratch)
void LoadU64LE(Register dst, const MemOperand &mem, Register scratch)
void AssertMap(Register object) NOOP_UNLESS_DEBUG_CODE
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void LoadU8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void DecompressTaggedSigned(const Register &destination, const MemOperand &field_operand)
void I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2, Simd128Register scratch3)
void SubFloat32(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void Drop(int count, Condition cond=al)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void XorP(Register dst, Register src)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void MovFromFloatResult(DwVfpRegister dst)
void PopCommonFrame(Register marker_reg=no_reg)
void RotateInsertSelectBits(Register dst, Register src, const Operand &startBit, const Operand &endBit, const Operand &shiftAmt, bool zeroBits)
void mov(Register rd, Register rj)
void CompareTagged(Register src1, Register src2, CRegister cr=cr0)
void ModS32(Register dst, Register src, Register value)
void IsObjectType(Register heap_object, Register scratch1, Register scratch2, InstanceType type)
MemOperand StackLimitAsMemOperand(StackLimitKind kind)
void ModS64(Register dst, Register src, Register value)
void I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void SmiUntag(Register reg, SBit s=LeaveCC)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void TestIfSmi(Register value, Register scratch)
void I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void MulHighU32(Register dst, Register src1, const MemOperand &src2)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void MulS32(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
static int CallSizeNotPredictableCodeSize(Address target, RelocInfo::Mode rmode, Condition cond=al)
void ConvertFloat32ToUnsignedInt32(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void CompareLogicalChar(const MemOperand &opnd1, const MemOperand &opnd2, const Operand &length)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void DivFloat64(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void F32x4SConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void PushStandardFrame(Register function_reg)
void StoreF64(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void LoadAndSub32(Register dst, Register src, const MemOperand &opnd)
void AtomicCmpExchangeHelper(Register addr, Register output, Register old_value, Register new_value, int start, int end, int shift_amount, int offset, Register temp0, Register temp1)
void I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void I8x16ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void CompareRoot(Register obj, RootIndex index)
void I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void LoadV64ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void MovFromFloatParameter(DwVfpRegister dst)
void StoreU64LE(Register src, const MemOperand &mem, Register scratch)
void LoadF32(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void ModU64(Register dst, Register src, Register value)
void LoadV32ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void Move(Register dst, Tagged< Smi > smi)
void I32x4BitMask(Register dst, VRegister src)
void CountTrailingZerosU32(Register dst, Register src, Register scratch1=ip, Register scratch2=r0, RCBit r=LeaveRC)
void MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void StoreF32LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
void I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void StoreReturnAddressAndCall(Register target)
void I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void StoreU16LE(Register src, const MemOperand &mem, Register scratch)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
void I16x8Splat(Simd128Register dst, Register src)
void LoadRootRelative(Register destination, int32_t offset) final
void AtomicCmpExchangeU16(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1)
void JumpIfSmi(Register value, Label *smi_label)
void TruncF64(DoubleRegister dst, DoubleRegister src)
void LoadPC(Register dst)
void BranchRelativeOnIdxHighP(Register dst, Register inc, Label *L)
void ShiftRightU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void LoadU16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void MultiPopV128(Simd128RegList dregs, Register scratch, Register location=sp)
void CallCodeObject(Register code_object)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void I32x4UConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void ConvertFloat32ToUnsignedInt64(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void DivS32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void SwapDouble(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void LoadS32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void SubU32(Register dst, const MemOperand &opnd)
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
void CheckDebugHook(Register fun, Register new_target, Register expected_parameter_count_or_dispatch_handle, Register actual_parameter_count)
void CountLeadingZerosU32(Register dst, Register src, RCBit r=LeaveRC)
void I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scrahc2, Register scratch3, Simd128Register scratch4)
void I32x4SConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void StoreV128LE(Simd128Register src, const MemOperand &mem, Register scratch1, Register scratch2)
void SwapFloat32(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void CmpAndSwap64(Register old_val, Register new_val, const MemOperand &opnd)
void LoadS32LE(Register dst, const MemOperand &mem, Register scratch)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void CmpU32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadU16LE(Register dst, const MemOperand &mem, Register scratch)
void DoubleMax(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg)
void MultiPopDoubles(DoubleRegList dregs, Register location=sp)
void CompareTaggedRoot(Register with, RootIndex index)
void ModU32(Register dst, Register src, Register value)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void SmiTag(Register reg, SBit s=LeaveCC)
void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label *out_of_line)
void LoadOnConditionP(Condition cond, Register dst, Register src)
void I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void MovDoubleToInt64(Register dst, DoubleRegister src)
void ConvertFloat32ToInt32(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode)
void F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, DoubleRegister scratch2, Simd128Register scratch3)
void PushArray(Register array, Register size, Register scratch, PushArrayOrder order=PushArrayOrder::kNormal)
void I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3)
void Sqrt(DoubleRegister result, DoubleRegister input)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void TestIfInt32(Register value, Register scratch, CRegister cr=cr0)
void ConvertDoubleToUnsignedInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void MovToFloatResult(DwVfpRegister src)
void AddS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void I16x8ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void CallJSEntry(Register target)
void SubS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ShiftRightU32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void StoreF64LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void Zero(const MemOperand &dest)
void ShiftRightS32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void MovToFloatParameter(DwVfpRegister src)
void MovFloatToInt(Register dst, DoubleRegister src, DoubleRegister scratch)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void I16x8ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void PushCommonFrame(Register marker_reg=no_reg)
void LoadAndSub64(Register dst, Register src, const MemOperand &opnd)
void JumpIfEqual(Register x, int32_t y, Label *dest)
void Not32(Register dst, Register src=no_reg)
int LeaveFrame(StackFrame::Type type)
void GetLabelAddress(Register dst, Label *target)
void StoreF32(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void MulHighS64(Register dst, Register src1, Register src2)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void V128AnyTrue(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
void ShiftLeftU32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void SwapSimd128(Simd128Register src, Simd128Register dst, Simd128Register scratch)
void CmpU64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadF64(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void I32x4Splat(Simd128Register dst, Register src)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void AddS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
void I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void Jump(Register target, Condition cond=al)
void StoreU32(Register src, const MemOperand &mem, Register scratch)
void LoadRoot(Register destination, RootIndex index) final
void MultiPushF64OrV128(DoubleRegList dregs, Register scratch, Register location=sp)
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void ClearRightImm(Register dst, Register src, const Operand &val)
void LoadAndTest32(Register dst, Register src)
void I8x16ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void MovInt64ToDouble(DoubleRegister dst, Register src)
void LoadF32LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void LoadU32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void Popcnt32(Register dst, Register src)
void CanonicalizeNaN(const VRegister &dst, const VRegister &src)
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src)
void DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void SwapP(Register src, Register dst, Register scratch)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void CompareRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit)
void S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void I64x2Splat(Simd128Register dst, Register src)
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst)
void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst)
void MulHighS32(Register dst, Register src1, const MemOperand &src2)
void BranchOnCount(Register r1, Label *l)
void JumpIfCodeIsTurbofanned(Register code, Register scratch, Label *if_turbofanned)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void LoadFromConstantsTable(Register destination, int constant_index) final
void AddU32(Register dst, Register src1, Register src2)
void ShiftRightS64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void LoadS8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void CompareObjectTypeRange(Register heap_object, Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void ConvertDoubleToInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void CeilF32(DoubleRegister dst, DoubleRegister src)
void MaybeSaveRegisters(RegList registers)
void LoadV128LE(DoubleRegister dst, const MemOperand &mem, Register scratch0, Register scratch1)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void LoadTaggedRoot(Register destination, RootIndex index)
void FloorF64(DoubleRegister dst, DoubleRegister src)
void LoadPositive32(Register result, Register input)
void LoadPositiveP(Register result, Register input)
void I16x8BitMask(Register dst, VRegister src)
void I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void SubS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void I64x2BitMask(Register dst, QwNeonRegister src)
void AtomicExchangeHelper(Register addr, Register value, Register output, int start, int end, int shift_amount, int offset, Register scratch)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
void ConvertDoubleToUnsignedInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void LoadCompressedMap(Register dst, Register object)
Condition LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void CallBuiltin(Builtin builtin, Condition cond=al)
void DivU32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void Popcnt64(Register dst, Register src)
void SubFloat64(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void F64x2ConvertLowI32x4S(QwNeonRegister dst, QwNeonRegister src)
void MulS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode)
void ShiftLeftU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void CmpS64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void StoreMultipleP(Register dst1, Register dst2, const MemOperand &mem)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void FloorF32(DoubleRegister dst, DoubleRegister src)
void LoadSmiLiteral(Register dst, Tagged< Smi > smi)
void Xor(Register dst, Register src)
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void Check(Condition cond, AbortReason reason)
void DivU64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2, Register scratch3, Register scratch4)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void LoadU32LE(Register dst, const MemOperand &mem, Register scratch)
void OrP(Register dst, Register src)
void MovIntToFloat(DoubleRegister dst, Register src, Register scratch)
void Or(Register dst, Register src)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void I32x4DotI8x16AddS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register src3)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void F64x2ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2)
void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch, Label *if_marked_for_deoptimization)
void LoadMultipleW(Register dst1, Register dst2, const MemOperand &mem)
void NotP(Register dst, Register src=no_reg)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void MoveChar(const MemOperand &opnd1, const MemOperand &opnd2, const Operand &length)
void AssertZeroExtended(Register int32_register)
void AddFloat32(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void CmpS32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void StoreU64(Register src, const MemOperand &mem, Register scratch=no_reg)
void MulFloat64(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void StoreU16(Register src, const MemOperand &mem, Register scratch)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void AddFloat64(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void StoreU8(Register src, const MemOperand &mem, Register scratch)
void LoadMultipleP(Register dst1, Register dst2, const MemOperand &mem)
void MultiPushV128(Simd128RegList dregs, Register scratch, Register location=sp)
void ExclusiveOrChar(const MemOperand &opnd1, const MemOperand &opnd2, const Operand &length)
void JumpIfLessThan(Register x, int32_t y, Label *dest)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void CeilF64(DoubleRegister dst, DoubleRegister src)
void TestBitMask(Register value, uintptr_t mask, Register scratch=r0)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Branch(Label *label, bool need_link=false)
void Not64(Register dst, Register src=no_reg)
void AddU64(Register dst, const Operand &imm)
void I8x16BitMask(Register dst, VRegister src, VRegister temp=NoVReg)
void DivS64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void CompareInstanceTypeRange(Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadIsolateField(Register dst, IsolateFieldId id)
void LoadU64(Register dst, const MemOperand &mem, Register scratch=no_reg)
void Mul32WithOverflowIfCCUnequal(Register dst, Register src1, const MemOperand &src2)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void MaybeRestoreRegisters(RegList registers)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void ConvertFloat32ToInt64(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void CmpF64(DoubleRegister src1, DoubleRegister src2)
void TryLoadOptimizedOsrCode(Register scratch_and_result, CodeKind min_opt_level, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void I8x16Splat(Simd128Register dst, Register src)
void CmpSmiLiteral(Register src1, Tagged< Smi > smi, Register scratch, CRegister cr=cr0)
void LoadF64LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void F32x4Splat(Simd128Register dst, DoubleRegister src, DoubleRegister scratch1, Register scratch2)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void LoadAndTestP(Register dst, Register src)
void StoreMultipleW(Register dst1, Register dst2, const MemOperand &mem)
void NearestIntF64(DoubleRegister dst, DoubleRegister src)
void SmiUntagField(Register dst, const MemOperand &src)
void PopAll(RegList registers)
void StubPrologue(StackFrame::Type type)
void TruncF32(DoubleRegister dst, DoubleRegister src)
void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2)
void StoreU32LE(Register src, const MemOperand &mem, Register scratch)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void StoreRootRelative(int32_t offset, Register value) final
void DoubleMin(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg)
void F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, Simd128Register scratch2)
void CmpF32(DoubleRegister src1, DoubleRegister src2)
void CountLeadingZerosU64(Register dst, Register src, RCBit r=LeaveRC)
void LoadTaggedSignedField(const Register &destination, const MemOperand &field_operand)
void LoadMap(Register destination, Register object)
void CmpAndSwap(Register old_val, Register new_val, const MemOperand &opnd)
void TailCallRuntime(Runtime::FunctionId fid)
void MultiPopF64OrV128(DoubleRegList dregs, Register scratch, Register location=sp)
void TestBit(Register value, int bitNumber, Register scratch=r0)
void LoadNativeContextSlot(Register dst, int index)
void I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label *out_of_line)
void AtomicCmpExchangeU8(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1)
void ConvertInt64ToDouble(Register src, DoubleRegister double_dst)
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst)
void MulFloat32(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void F64x2Splat(Simd128Register dst, DoubleRegister src, Register scratch)
void AtomicExchangeU16(Register addr, Register value, Register output, Register scratch)
void I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void LoadS16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertDoubleToInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void LoadV128(Simd128Register dst, const MemOperand &mem, Register scratch)
void Switch(Register scratch, Register value, int case_value_base, Label **labels, int num_labels)
void I8x16Swizzle(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Simd128Register scratch3)
void AndP(Register dst, Register src)
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void MultiPushDoubles(DoubleRegList dregs, Register location=sp)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr intptr_t FlagsOffset()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static V8_INLINE Operand Zero()
static constexpr DwVfpRegister from_code(int8_t code)
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr bool IsCompressedEmbeddedObject(Mode mode)
Definition reloc-info.h:206
static constexpr bool IsCodeTarget(Mode mode)
Definition reloc-info.h:196
static constexpr bool IsFullEmbeddedObject(Mode mode)
Definition reloc-info.h:203
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
Definition runtime.cc:350
void S128Not(XMMRegister dst, XMMRegister src, XMMRegister scratch)
void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src, XMMRegister tmp)
void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src, XMMRegister scratch, Register tmp)
void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src, Register scratch)
void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src, Register scratch)
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static bool IsJavaScript(Type t)
Definition frames.h:284
static constexpr int kFixedFrameSizeFromFp
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode)
#define EMIT_SIMD_QFM(name)
#define EMIT_SIMD_ALL_TRUE(name)
#define EMIT_SIMD_SHIFT(name)
#define LOAD_EXTEND(type)
#define EMIT_SIMD_EXT_MUL(name)
#define STORE_LANE(type, lane)
#define LOAD_SPLAT(type)
#define LOAD_LANE(type, lane)
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define V8_ENABLE_LEAPTIERING_BOOL
Definition globals.h:151
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_EMBEDDED_CONSTANT_POOL_BOOL
Definition globals.h:81
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
#define ABI_CALL_VIA_IP
int start
int end
DirectHandle< Object > new_target
Definition execution.cc:75
int32_t offset
std::optional< TNode< JSArray > > a
TNode< Object > receiver
RoundingMode rounding_mode
ZoneVector< RpoNumber > & result
LiftoffRegister reg
int x
uint32_t const mask
#define SIMD_ALL_TRUE_LIST(V)
#define SIMD_QFM_LIST(V)
#define ClearRightImm
#define SIMD_SHIFT_LIST(V)
#define SIMD_EXT_MUL_LIST(V)
SmiCheck
InvokeType
SetIsolateDataSlots
JumpMode
RegListBase< RegisterT > registers
InstructionOperand destination
int s
Definition mul-fft.cc:297
int m
Definition mul-fft.cc:294
int n
Definition mul-fft.cc:296
STL namespace.
int int32_t
Definition unicode.cc:40
unsigned short uint16_t
Definition unicode.cc:39
signed short int16_t
Definition unicode.cc:38
constexpr unsigned CountLeadingZeros64(uint64_t value)
Definition bits.h:125
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr int WhichPowerOfTwo(T value)
Definition bits.h:195
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
auto Reversed(T &t)
Definition iterator.h:105
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr Register kRootRegister
constexpr VFPRoundingMode kRoundToNearest
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr VFPRoundingMode kRoundToMinusInf
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kSimd128Size
Definition globals.h:706
const int kStackFrameRASlot
const int kNumRequiredStackFrameSlots
DwVfpRegister DoubleRegister
const int kSmiTagSize
Definition v8-internal.h:87
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr bool CodeKindCanTierUp(CodeKind kind)
Definition code-kind.h:95
constexpr Register kJavaScriptCallTargetRegister
const Address kWeakHeapObjectMask
Definition globals.h:967
constexpr Register kJavaScriptCallArgCountRegister
Address Tagged_t
Definition globals.h:547
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
base::StrongAlias< JSDispatchHandleAliasTag, uint32_t > JSDispatchHandle
Definition globals.h:557
const int kNumCallerSavedDoubles
Definition reglist-ppc.h:53
static const int kRegisterPassedArguments
Flag flags[]
Definition flags.cc:3797
constexpr int L
QwNeonRegister Simd128Register
MemOperand FieldMemOperand(Register object, int offset)
constexpr VFPRoundingMode kRoundToPlusInf
constexpr int kSystemPointerSize
Definition globals.h:410
const char * GetAbortReason(AbortReason reason)
static constexpr int kMaxCParameters
constexpr uint32_t kZapValue
Definition globals.h:1005
const int kStackFrameSPSlot
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const int kHeapObjectTag
Definition v8-internal.h:72
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
const DoubleRegList kCallerSavedDoubles
Definition reglist-ppc.h:46
const RegList kJSCallerSaved
Definition reglist-arm.h:23
Register ToRegister(int num)
constexpr bool SmiValuesAre32Bits()
constexpr Register kJavaScriptCallCodeStartRegister
constexpr int kJSDispatchTableEntrySizeLog2
Definition globals.h:562
constexpr Register kPtrComprCageBaseRegister
constexpr VFPRoundingMode kRoundToZero
return value
Definition map-inl.h:893
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
const int kSmiTag
Definition v8-internal.h:86
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr Register cp
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
Definition globals.h:407
const uint32_t kClearedWeakHeapObjectLower32
Definition globals.h:981
constexpr uint32_t kMaxUInt32
Definition globals.h:387
Condition to_condition(Condition cond)
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
Local< T > Handle
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define OFFSET_OF_DATA_START(Type)