v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
macro-assembler-ppc.cc
Go to the documentation of this file.
1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <assert.h> // For assert
6#include <limits.h> // For LONG_MIN, LONG_MAX.
7
8#if V8_TARGET_ARCH_PPC64
9
10#include <optional>
11
12#include "src/base/bits.h"
22#include "src/debug/debug.h"
28#include "src/runtime/runtime.h"
30
31// Satisfy cpplint check, but don't include platform-specific header. It is
32// included recursively via macro-assembler.h.
33#if 0
35#endif
36
37#define __ ACCESS_MASM(masm)
38
39namespace v8 {
40namespace internal {
41
42namespace {
43
44// Simd and Floating Pointer registers are not shared. For WebAssembly we save
45// both registers, If we are not running Wasm, we can get away with only saving
46// FP registers.
47#if V8_ENABLE_WEBASSEMBLY
48constexpr int kStackSavedSavedFPSizeInBytes =
51#else
52constexpr int kStackSavedSavedFPSizeInBytes =
54#endif // V8_ENABLE_WEBASSEMBLY
55
56} // namespace
57
59 Register exclusion1,
60 Register exclusion2,
61 Register exclusion3) const {
62 int bytes = 0;
63
64 RegList exclusions = {exclusion1, exclusion2, exclusion3};
65 RegList list = kJSCallerSaved - exclusions;
66 bytes += list.Count() * kSystemPointerSize;
67
68 if (fp_mode == SaveFPRegsMode::kSave) {
69 bytes += kStackSavedSavedFPSizeInBytes;
70 }
71
72 return bytes;
73}
74
75int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch1,
76 Register scratch2, Register exclusion1,
77 Register exclusion2, Register exclusion3) {
78 int bytes = 0;
79
80 RegList exclusions = {exclusion1, exclusion2, exclusion3};
81 RegList list = kJSCallerSaved - exclusions;
82 MultiPush(list);
83 bytes += list.Count() * kSystemPointerSize;
84
85 if (fp_mode == SaveFPRegsMode::kSave) {
87 scratch2);
88 bytes += kStackSavedSavedFPSizeInBytes;
89 }
90
91 return bytes;
92}
93
94int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch1,
95 Register scratch2, Register exclusion1,
96 Register exclusion2, Register exclusion3) {
97 int bytes = 0;
98 if (fp_mode == SaveFPRegsMode::kSave) {
100 scratch2);
101 bytes += kStackSavedSavedFPSizeInBytes;
102 }
103
104 RegList exclusions = {exclusion1, exclusion2, exclusion3};
105 RegList list = kJSCallerSaved - exclusions;
106 MultiPop(list);
107 bytes += list.Count() * kSystemPointerSize;
108
109 return bytes;
110}
111
112void MacroAssembler::GetLabelAddress(Register dest, Label* target) {
113 // This should be just a
114 // add(dest, pc, branch_offset(target));
115 // but current implementation of Assembler::bind_to()/target_at_put() add
116 // (InstructionStream::kHeaderSize - kHeapObjectTag) to a position of a label
117 // in a "linked" state and thus making it usable only for mov_label_offset().
118 // TODO(ishell): fix branch_offset() and re-implement
119 // RegExpMacroAssemblerARM::PushBacktrack() without mov_label_offset().
120 mov_label_offset(dest, target);
121 // mov_label_offset computes offset of the |target| relative to the "current
122 // InstructionStream object pointer" which is essentially pc_offset() of the
123 // label added with (InstructionStream::kHeaderSize - kHeapObjectTag).
124 // Compute "current InstructionStream object pointer" and add it to the
125 // offset in |lr| register.
126 int current_instr_code_object_relative_offset =
129 LoadPC(r0);
130 // LoadPC emits 2 instructions, pc_offset() is pointing to it's first
131 // instruction but real pc will be pointing to it's second instruction, make
132 // an adjustment so they both point to the same offset.
133 current_instr_code_object_relative_offset -= kInstrSize;
134 AddS64(dest, r0, dest);
135 SubS64(dest, dest, Operand(current_instr_code_object_relative_offset));
136}
137
138void MacroAssembler::Jump(Register target) {
139 mtctr(target);
140 bctr();
141}
142
144 int constant_index) {
145 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
146
148 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
151 constant_index)),
152 r0);
153}
154
157}
158
159void MacroAssembler::StoreRootRelative(int32_t offset, Register value) {
161}
162
164 intptr_t offset) {
165 if (offset == 0) {
167 } else {
169 }
170}
171
173 ExternalReference reference, Register scratch) {
174 if (root_array_available()) {
175 if (reference.IsIsolateFieldId()) {
176 return MemOperand(kRootRegister, reference.offset_from_root_register());
177 }
178 if (options().enable_root_relative_access) {
179 intptr_t offset =
181 if (is_int32(offset)) {
182 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
183 }
184 }
185 if (options().isolate_independent_code) {
186 if (IsAddressableThroughRootRegister(isolate(), reference)) {
187 // Some external references can be efficiently loaded as an offset from
188 // kRootRegister.
189 intptr_t offset =
191 CHECK(is_int32(offset));
192 return MemOperand(kRootRegister, static_cast<int32_t>(offset));
193 } else {
194 // Otherwise, do a memory load from the external reference table.
195 LoadU64(scratch,
198 isolate(), reference)));
199 return MemOperand(scratch, 0);
200 }
201 }
202 }
203 Move(scratch, reference);
204 return MemOperand(scratch, 0);
205}
206
207void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
208 Condition cond, CRegister cr) {
209 Label skip;
210
211 if (cond != al) b(NegateCondition(cond), &skip, cr);
212
213 mov(ip, Operand(target, rmode));
214 mtctr(ip);
215 bctr();
216
217 bind(&skip);
218}
219
221 CRegister cr) {
223 Jump(static_cast<intptr_t>(target), rmode, cond, cr);
224}
225
227 Condition cond, CRegister cr) {
229 DCHECK_IMPLIES(options().isolate_independent_code,
231
233 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
234 TailCallBuiltin(builtin, cond, cr);
235 return;
236 }
237 int32_t target_index = AddCodeTarget(code);
238 Jump(static_cast<intptr_t>(target_index), rmode, cond, cr);
239}
240
241void MacroAssembler::Jump(const ExternalReference& reference) {
242 UseScratchRegisterScope temps(this);
243 Register scratch = temps.Acquire();
244 Move(scratch, reference);
246 // AIX uses a function descriptor. When calling C code be
247 // aware of this descriptor and pick up values from it.
250 LoadU64(scratch, MemOperand(scratch, 0));
251 }
252 Jump(scratch);
253}
254
255void MacroAssembler::Call(Register target) {
256 BlockTrampolinePoolScope block_trampoline_pool(this);
257 // branch via link register and set LK bit for return point
258 mtctr(target);
259 bctrl();
260}
261
262void MacroAssembler::CallJSEntry(Register target) {
263 CHECK(target == r5);
264 Call(target);
265}
266
268 RelocInfo::Mode rmode,
269 Condition cond) {
271}
272
274 Condition cond) {
275 BlockTrampolinePoolScope block_trampoline_pool(this);
276 DCHECK(cond == al);
277
278 // This can likely be optimized to make use of bc() with 24bit relative
279 //
280 // RecordRelocInfo(x.rmode_, x.immediate);
281 // bc( BA, .... offset, LKset);
282 //
283
284 mov(ip, Operand(target, rmode));
285 mtctr(ip);
286 bctrl();
287}
288
290 Condition cond) {
291 BlockTrampolinePoolScope block_trampoline_pool(this);
293 DCHECK_IMPLIES(options().isolate_independent_code,
295
297 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
298 CallBuiltin(builtin, cond);
299 return;
300 }
301 int32_t target_index = AddCodeTarget(code);
302 Call(static_cast<Address>(target_index), rmode, cond);
303}
304
307 // Use ip directly instead of using UseScratchRegisterScope, as we do not
308 // preserve scratch registers across calls.
309 switch (options().builtin_call_jump_mode) {
311 Label skip;
312 mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
313 if (cond != al) b(NegateCondition(cond), &skip);
314 Call(ip);
315 bind(&skip);
316 break;
317 }
319 UNREACHABLE();
321 Label skip;
322 LoadU64(ip, EntryFromBuiltinAsOperand(builtin), r0);
323 if (cond != al) b(NegateCondition(cond), &skip);
324 Call(ip);
325 bind(&skip);
326 break;
327 }
329 if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
330 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
331 int32_t code_target_index = AddCodeTarget(code);
332 Call(static_cast<Address>(code_target_index), RelocInfo::CODE_TARGET,
333 cond);
334 } else {
335 Label skip;
336 LoadU64(ip, EntryFromBuiltinAsOperand(builtin), r0);
337 if (cond != al) b(NegateCondition(cond), &skip);
338 Call(ip);
339 bind(&skip);
340 }
341 break;
342 }
343 }
344}
345
347 CRegister cr) {
349 CommentForOffHeapTrampoline("tail call", builtin));
350 // Use ip directly instead of using UseScratchRegisterScope, as we do not
351 // preserve scratch registers across calls.
352 switch (options().builtin_call_jump_mode) {
354 Label skip;
355 mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
356 if (cond != al) b(NegateCondition(cond), &skip, cr);
357 Jump(ip);
358 bind(&skip);
359 break;
360 }
362 UNREACHABLE();
364 Label skip;
365 LoadU64(ip, EntryFromBuiltinAsOperand(builtin), r0);
366 if (cond != al) b(NegateCondition(cond), &skip, cr);
367 Jump(ip);
368 bind(&skip);
369 break;
370 }
372 if (options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
373 Handle<Code> code = isolate()->builtins()->code_handle(builtin);
374 int32_t code_target_index = AddCodeTarget(code);
375 Jump(static_cast<intptr_t>(code_target_index), RelocInfo::CODE_TARGET,
376 cond, cr);
377 } else {
378 Label skip;
379 LoadU64(ip, EntryFromBuiltinAsOperand(builtin), r0);
380 if (cond != al) b(NegateCondition(cond), &skip, cr);
381 Jump(ip);
382 bind(&skip);
383 }
384 break;
385 }
386 }
387}
388
389void MacroAssembler::Drop(int count) {
390 if (count > 0) {
391 AddS64(sp, sp, Operand(count * kSystemPointerSize), r0);
392 }
393}
394
395void MacroAssembler::Drop(Register count, Register scratch) {
396 ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2));
397 add(sp, sp, scratch);
398}
399
400// Enforce alignment of sp.
402 int frame_alignment = ActivationFrameAlignment();
403 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
404
405 uint64_t frame_alignment_mask = ~(static_cast<uint64_t>(frame_alignment) - 1);
406 AndU64(sp, sp, Operand(frame_alignment_mask));
407}
408
410 Register scratch1,
411 Register scratch2) {
412 LoadU32(scratch1, FieldMemOperand(code, Code::kFlagsOffset), scratch2);
413 TestBit(scratch1, Code::kMarkedForDeoptimizationBit, scratch2);
414}
415
416Operand MacroAssembler::ClearedValue() const {
417 return Operand(static_cast<int32_t>(i::ClearedValue(isolate()).ptr()));
418}
419
420void MacroAssembler::Call(Label* target) { b(target, SetLK); }
421
422void MacroAssembler::Push(Handle<HeapObject> handle) {
423 mov(r0, Operand(handle));
424 push(r0);
425}
426
428 mov(r0, Operand(smi));
429 push(r0);
430}
431
432void MacroAssembler::PushArray(Register array, Register size, Register scratch,
433 Register scratch2, PushArrayOrder order) {
434 Label loop, done;
435
436 if (order == kNormal) {
437 cmpi(size, Operand::Zero());
438 beq(&done);
439 ShiftLeftU64(scratch, size, Operand(kSystemPointerSizeLog2));
440 add(scratch, array, scratch);
441 mtctr(size);
442
443 bind(&loop);
444 LoadU64WithUpdate(scratch2, MemOperand(scratch, -kSystemPointerSize));
446 bdnz(&loop);
447
448 bind(&done);
449 } else {
450 cmpi(size, Operand::Zero());
451 beq(&done);
452
453 mtctr(size);
454 subi(scratch, array, Operand(kSystemPointerSize));
455
456 bind(&loop);
459 bdnz(&loop);
460 bind(&done);
461 }
462}
463
464void MacroAssembler::Move(Register dst, Handle<HeapObject> value,
465 RelocInfo::Mode rmode) {
466 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
467 // non-isolate-independent code. In many cases it might be cheaper than
468 // embedding the relocatable value.
469 if (root_array_available_ && options().isolate_independent_code) {
470 IndirectLoadConstant(dst, value);
471 return;
472 } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
474 DCHECK(is_uint32(index));
475 mov(dst, Operand(static_cast<int>(index), rmode));
476 } else {
478 mov(dst, Operand(value.address(), rmode));
479 }
480}
481
482void MacroAssembler::Move(Register dst, ExternalReference reference) {
483 if (root_array_available()) {
484 if (reference.IsIsolateFieldId()) {
486 Operand(reference.offset_from_root_register()));
487 return;
488 }
489 if (options().isolate_independent_code) {
490 IndirectLoadExternalReference(dst, reference);
491 return;
492 }
493 }
494
495 // External references should not get created with IDs if
496 // `!root_array_available()`.
497 CHECK(!reference.IsIsolateFieldId());
498 mov(dst, Operand(reference));
499}
500
503}
504
505void MacroAssembler::Move(Register dst, Register src, Condition cond) {
506 DCHECK(cond == al);
507 if (dst != src) {
508 mr(dst, src);
509 }
510}
511
513 if (dst != src) {
514 fmr(dst, src);
515 }
516}
517
518void MacroAssembler::MultiPush(RegList regs, Register location) {
519 int16_t num_to_push = regs.Count();
520 int16_t stack_offset = num_to_push * kSystemPointerSize;
521
522 subi(location, location, Operand(stack_offset));
523 for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
524 if ((regs.bits() & (1 << i)) != 0) {
525 stack_offset -= kSystemPointerSize;
526 StoreU64(ToRegister(i), MemOperand(location, stack_offset));
527 }
528 }
529}
530
531void MacroAssembler::MultiPop(RegList regs, Register location) {
532 int16_t stack_offset = 0;
533
534 for (int16_t i = 0; i < Register::kNumRegisters; i++) {
535 if ((regs.bits() & (1 << i)) != 0) {
536 LoadU64(ToRegister(i), MemOperand(location, stack_offset));
537 stack_offset += kSystemPointerSize;
538 }
539 }
540 addi(location, location, Operand(stack_offset));
541}
542
543void MacroAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
544 int16_t num_to_push = dregs.Count();
545 int16_t stack_offset = num_to_push * kDoubleSize;
546
547 subi(location, location, Operand(stack_offset));
548 for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
549 if ((dregs.bits() & (1 << i)) != 0) {
551 stack_offset -= kDoubleSize;
552 stfd(dreg, MemOperand(location, stack_offset));
553 }
554 }
555}
556
557void MacroAssembler::MultiPushV128(Simd128RegList simd_regs, Register scratch,
558 Register location) {
559 int16_t num_to_push = simd_regs.Count();
560 int16_t stack_offset = num_to_push * kSimd128Size;
561
562 subi(location, location, Operand(stack_offset));
563 for (int16_t i = Simd128Register::kNumRegisters - 1; i >= 0; i--) {
564 if ((simd_regs.bits() & (1 << i)) != 0) {
566 stack_offset -= kSimd128Size;
567 StoreSimd128(simd_reg, MemOperand(location, stack_offset), scratch);
568 }
569 }
570}
571
572void MacroAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
573 int16_t stack_offset = 0;
574
575 for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
576 if ((dregs.bits() & (1 << i)) != 0) {
578 lfd(dreg, MemOperand(location, stack_offset));
579 stack_offset += kDoubleSize;
580 }
581 }
582 addi(location, location, Operand(stack_offset));
583}
584
585void MacroAssembler::MultiPopV128(Simd128RegList simd_regs, Register scratch,
586 Register location) {
587 int16_t stack_offset = 0;
588
589 for (int16_t i = 0; i < Simd128Register::kNumRegisters; i++) {
590 if ((simd_regs.bits() & (1 << i)) != 0) {
592 LoadSimd128(simd_reg, MemOperand(location, stack_offset), scratch);
593 stack_offset += kSimd128Size;
594 }
595 }
596 addi(location, location, Operand(stack_offset));
597}
598
600 Simd128RegList simd_regs,
601 Register scratch1, Register scratch2,
602 Register location) {
603 MultiPushDoubles(dregs);
604#if V8_ENABLE_WEBASSEMBLY
605 bool generating_bultins =
607 if (generating_bultins) {
608 // V8 uses the same set of fp param registers as Simd param registers.
609 // As these registers are two different sets on ppc we must make
610 // sure to also save them when Simd is enabled.
611 // Check the comments under crrev.com/c/2645694 for more details.
612 Label push_empty_simd, simd_pushed;
613 Move(scratch1, ExternalReference::supports_wasm_simd_128_address());
614 LoadU8(scratch1, MemOperand(scratch1), scratch2);
615 cmpi(scratch1, Operand::Zero()); // If > 0 then simd is available.
616 ble(&push_empty_simd);
617 MultiPushV128(simd_regs, scratch1);
618 b(&simd_pushed);
619 bind(&push_empty_simd);
620 // We still need to allocate empty space on the stack even if we
621 // are not pushing Simd registers (see kFixedFrameSizeFromFp).
622 addi(sp, sp,
623 Operand(-static_cast<int8_t>(simd_regs.Count()) * kSimd128Size));
624 bind(&simd_pushed);
625 } else {
627 MultiPushV128(simd_regs, scratch1);
628 } else {
629 addi(sp, sp,
630 Operand(-static_cast<int8_t>(simd_regs.Count()) * kSimd128Size));
631 }
632 }
633#endif
634}
635
637 Simd128RegList simd_regs,
638 Register scratch1, Register scratch2,
639 Register location) {
640#if V8_ENABLE_WEBASSEMBLY
641 bool generating_bultins =
643 if (generating_bultins) {
644 Label pop_empty_simd, simd_popped;
645 Move(scratch1, ExternalReference::supports_wasm_simd_128_address());
646 LoadU8(scratch1, MemOperand(scratch1), scratch2);
647 cmpi(scratch1, Operand::Zero()); // If > 0 then simd is available.
648 ble(&pop_empty_simd);
649 MultiPopV128(simd_regs, scratch1);
650 b(&simd_popped);
651 bind(&pop_empty_simd);
652 addi(sp, sp,
653 Operand(static_cast<int8_t>(simd_regs.Count()) * kSimd128Size));
654 bind(&simd_popped);
655 } else {
657 MultiPopV128(simd_regs, scratch1);
658 } else {
659 addi(sp, sp,
660 Operand(static_cast<int8_t>(simd_regs.Count()) * kSimd128Size));
661 }
662 }
663#endif
664 MultiPopDoubles(dregs);
665}
666
668 ASM_CODE_COMMENT(this);
669 if (CanBeImmediate(index)) {
671 return;
672 }
673 LoadRoot(destination, index);
674}
675
677 Condition cond) {
678 DCHECK(cond == al);
679 if (CanBeImmediate(index)) {
681 return;
682 }
685}
686
688 const MemOperand& field_operand,
689 const Register& scratch) {
691 DecompressTagged(destination, field_operand);
692 } else {
693 LoadU64(destination, field_operand, scratch);
694 }
695}
696
697void MacroAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc,
698 Register scratch) {
699 if (SmiValuesAre31Bits()) {
700 LoadU32(dst, src, scratch);
701 } else {
702 LoadU64(dst, src, scratch);
703 }
704
705 SmiUntag(dst, rc);
706}
707
708void MacroAssembler::StoreTaggedField(const Register& value,
709 const MemOperand& dst_field_operand,
710 const Register& scratch) {
712 RecordComment("[ StoreTagged");
713 StoreU32(value, dst_field_operand, scratch);
714 RecordComment("]");
715 } else {
716 StoreU64(value, dst_field_operand, scratch);
717 }
718}
719
721 Register src) {
722 RecordComment("[ DecompressTaggedSigned");
724 RecordComment("]");
725}
726
728 MemOperand field_operand) {
729 RecordComment("[ DecompressTaggedSigned");
730 LoadU32(destination, field_operand, r0);
731 RecordComment("]");
732}
733
734void MacroAssembler::DecompressTagged(Register destination, Register source) {
735 RecordComment("[ DecompressTagged");
736 ZeroExtWord32(destination, source);
738 RecordComment("]");
739}
740
742 MemOperand field_operand) {
743 RecordComment("[ DecompressTagged");
744 LoadU32(destination, field_operand, r0);
746 RecordComment("]");
747}
748
750 Tagged_t immediate) {
751 ASM_CODE_COMMENT(this);
753 Operand(immediate, RelocInfo::Mode::NO_INFO));
754}
755
757 MemOperand field_operand,
758 Register scratch) {
760 DecompressTaggedSigned(destination, field_operand);
761 } else {
762 LoadU64(destination, field_operand, scratch);
763 }
764}
765
766void MacroAssembler::RecordWriteField(Register object, int offset,
767 Register value, Register slot_address,
768 LinkRegisterStatus lr_status,
769 SaveFPRegsMode save_fp,
770 SmiCheck smi_check, SlotDescriptor slot) {
771 // First, check if a write barrier is even needed. The tests below
772 // catch stores of Smis.
773 Label done;
774
775 // Skip barrier if writing a smi.
776 if (smi_check == SmiCheck::kInline) {
777 JumpIfSmi(value, &done);
778 }
779
780 // Although the object register is tagged, the offset is relative to the start
781 // of the object, so so offset must be a multiple of kSystemPointerSize.
783
784 AddS64(slot_address, object, Operand(offset - kHeapObjectTag), r0);
785 if (v8_flags.slow_debug_code) {
786 Label ok;
787 andi(r0, slot_address, Operand(kTaggedSize - 1));
788 beq(&ok, cr0);
789 stop();
790 bind(&ok);
791 }
792
793 RecordWrite(object, slot_address, value, lr_status, save_fp, SmiCheck::kOmit,
794 slot);
795
796 bind(&done);
797
798 // Clobber clobbered input registers when running with the debug-code flag
799 // turned on to provoke errors.
800 if (v8_flags.slow_debug_code) {
801 mov(value, Operand(base::bit_cast<intptr_t>(kZapValue + 4)));
802 mov(slot_address, Operand(base::bit_cast<intptr_t>(kZapValue + 8)));
803 }
804}
805
806void MacroAssembler::DecodeSandboxedPointer(Register value) {
807 ASM_CODE_COMMENT(this);
808#ifdef V8_ENABLE_SANDBOX
809 ShiftRightU64(value, value, Operand(kSandboxedPointerShift));
810 AddS64(value, value, kPtrComprCageBaseRegister);
811#else
812 UNREACHABLE();
813#endif
814}
815
817 const MemOperand& field_operand,
818 Register scratch) {
819 ASM_CODE_COMMENT(this);
820#ifdef V8_ENABLE_SANDBOX
821 LoadU64(destination, field_operand, scratch);
823#else
824 UNREACHABLE();
825#endif
826}
827
829 Register value, const MemOperand& dst_field_operand, Register scratch) {
830 ASM_CODE_COMMENT(this);
831#ifdef V8_ENABLE_SANDBOX
832 UseScratchRegisterScope temps(this);
833 Register scratch2 = temps.Acquire();
834 DCHECK(!AreAliased(scratch, scratch2));
835 SubS64(scratch2, value, kPtrComprCageBaseRegister);
836 ShiftLeftU64(scratch2, scratch2, Operand(kSandboxedPointerShift));
837 StoreU64(scratch2, dst_field_operand, scratch);
838#else
839 UNREACHABLE();
840#endif
841}
842
844 MemOperand field_operand,
846 Register isolate_root,
847 Register scratch) {
848 DCHECK(!AreAliased(destination, isolate_root));
849 ASM_CODE_COMMENT(this);
850#ifdef V8_ENABLE_SANDBOX
853 UseScratchRegisterScope temps(this);
854 Register external_table = temps.Acquire();
855 DCHECK(!AreAliased(scratch, external_table));
856 if (isolate_root == no_reg) {
858 isolate_root = kRootRegister;
859 }
860 LoadU64(external_table,
861 MemOperand(isolate_root,
862 IsolateData::external_pointer_table_offset() +
864 scratch);
865 LoadU32(destination, field_operand, scratch);
866 ShiftRightU64(destination, destination, Operand(kExternalPointerIndexShift));
868 Operand(kExternalPointerTableEntrySizeLog2));
869 LoadU64(destination, MemOperand(external_table, destination), scratch);
870 mov(scratch, Operand(~tag));
871 AndU64(destination, destination, scratch);
872#else
873 LoadU64(destination, field_operand, scratch);
874#endif // V8_ENABLE_SANDBOX
875}
876
878 MemOperand field_operand,
880 Register scratch) {
881#ifdef V8_ENABLE_SANDBOX
882 LoadIndirectPointerField(destination, field_operand, tag, scratch);
883#else
884 LoadTaggedField(destination, field_operand, scratch);
885#endif
886}
887
889 MemOperand dst_field_operand,
890 Register scratch) {
891#ifdef V8_ENABLE_SANDBOX
892 StoreIndirectPointerField(value, dst_field_operand, scratch);
893#else
894 StoreTaggedField(value, dst_field_operand, scratch);
895#endif
896}
897
898void MacroAssembler::JumpIfJSAnyIsNotPrimitive(Register heap_object,
899 Register scratch, Label* target,
900 Label::Distance distance,
901 Condition cc) {
902 CHECK(cc == Condition::kUnsignedLessThan ||
903 cc == Condition::kUnsignedGreaterThanEqual);
905#ifdef DEBUG
906 Label ok;
907 LoadMap(scratch, heap_object);
908 CompareInstanceTypeRange(scratch, scratch, r0, FIRST_JS_RECEIVER_TYPE,
909 LAST_JS_RECEIVER_TYPE);
910 ble(&ok);
911 LoadMap(scratch, heap_object);
912 CompareInstanceTypeRange(scratch, scratch, r0,
913 FIRST_PRIMITIVE_HEAP_OBJECT_TYPE,
914 LAST_PRIMITIVE_HEAP_OBJECT_TYPE);
915 ble(&ok);
916 Abort(AbortReason::kInvalidReceiver);
917 bind(&ok);
918#endif // DEBUG
919
920 // All primitive object's maps are allocated at the start of the read only
921 // heap. Thus JS_RECEIVER's must have maps with larger (compressed)
922 // addresses.
923 UseScratchRegisterScope temps(this);
924 Register scratch2 = temps.Acquire();
925 DCHECK(!AreAliased(scratch2, scratch));
926 LoadCompressedMap(scratch, heap_object, scratch2);
928 CompareTagged(scratch, scratch2);
929 } else {
930 static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
931 CompareObjectType(heap_object, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
932 }
933 b(to_condition(cc), target);
934}
935
937 MemOperand field_operand,
939 Register scratch) {
940#ifdef V8_ENABLE_SANDBOX
941 ASM_CODE_COMMENT(this);
942 Register handle = scratch;
944 LoadU32(handle, field_operand, scratch);
945 ResolveIndirectPointerHandle(destination, handle, tag, scratch);
946#else
947 UNREACHABLE();
948#endif // V8_ENABLE_SANDBOX
949}
950
952 MemOperand dst_field_operand,
953 Register scratch) {
954#ifdef V8_ENABLE_SANDBOX
955 ASM_CODE_COMMENT(this);
956 UseScratchRegisterScope temps(this);
957 Register scratch2 = temps.Acquire();
958 DCHECK(!AreAliased(scratch, scratch2));
959 LoadU32(
960 scratch2,
961 FieldMemOperand(value, ExposedTrustedObject::kSelfIndirectPointerOffset),
962 scratch);
963 StoreU32(scratch2, dst_field_operand, scratch);
964#else
965 UNREACHABLE();
966#endif // V8_ENABLE_SANDBOX
967}
968
969#ifdef V8_ENABLE_SANDBOX
970void MacroAssembler::ResolveIndirectPointerHandle(Register destination,
971 Register handle,
973 Register scratch) {
974 // Pointer resolution will fail in several paths if handle == ra
975 DCHECK(!AreAliased(handle, r0));
976
977 // The tag implies which pointer table to use.
978 if (tag == kUnknownIndirectPointerTag) {
979 // In this case we have to rely on the handle marking to determine which
980 // pointer table to use.
981 Label is_trusted_pointer_handle, done;
982 mov(scratch, Operand(kCodePointerHandleMarker));
983 AndU64(scratch, handle, scratch, SetRC);
984 beq(&is_trusted_pointer_handle, cr0);
985 ResolveCodePointerHandle(destination, handle, scratch);
986 b(&done);
987 bind(&is_trusted_pointer_handle);
988 ResolveTrustedPointerHandle(destination, handle, kUnknownIndirectPointerTag,
989 scratch);
990 bind(&done);
991 } else if (tag == kCodeIndirectPointerTag) {
992 ResolveCodePointerHandle(destination, handle, scratch);
993 } else {
994 ResolveTrustedPointerHandle(destination, handle, tag, scratch);
995 }
996}
997
998void MacroAssembler::ResolveTrustedPointerHandle(Register destination,
999 Register handle,
1001 Register scratch) {
1002 DCHECK_NE(tag, kCodeIndirectPointerTag);
1004
1006 Register table = destination;
1007 Move(table, ExternalReference::trusted_pointer_table_base_address(isolate()));
1010 LoadU64(destination, MemOperand(table, handle), scratch);
1011 // The LSB is used as marking bit by the trusted pointer table, so here we
1012 // have to set it using a bitwise OR as it may or may not be set.
1013 mov(handle, Operand(kHeapObjectTag));
1015}
1016
1017void MacroAssembler::ResolveCodePointerHandle(Register destination,
1018 Register handle,
1019 Register scratch) {
1021
1022 Register table = destination;
1023 LoadCodePointerTableBase(table);
1026 AddS64(handle, table, handle);
1029 // The LSB is used as marking bit by the code pointer table, so here we have
1030 // to set it using a bitwise OR as it may or may not be set.
1031 mov(handle, Operand(kHeapObjectTag));
1033}
1034
1035void MacroAssembler::LoadCodeEntrypointViaCodePointer(Register destination,
1036 MemOperand field_operand,
1037 Register scratch) {
1038 ASM_CODE_COMMENT(this);
1039
1040 // Due to register pressure, table is also used as a scratch register
1041 DCHECK(destination != r0);
1042 Register table = scratch;
1043 LoadU32(destination, field_operand, scratch);
1044 LoadCodePointerTableBase(table);
1045 // TODO(tpearson): can the offset computation be done more efficiently?
1050}
1051
1052void MacroAssembler::LoadCodePointerTableBase(Register destination) {
1053#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
1054 if (!options().isolate_independent_code && isolate()) {
1055 // Embed the code pointer table address into the code.
1057 ExternalReference::code_pointer_table_base_address(isolate()));
1058 } else {
1059 // Force indirect load via root register as a workaround for
1060 // isolate-independent code (for example, for Wasm).
1064 destination));
1065 }
1066#else
1067 // Embed the code pointer table address into the code.
1069 ExternalReference::global_code_pointer_table_base_address());
1070#endif // V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
1071}
1072#endif // V8_ENABLE_SANDBOX
1073
1074void MacroAssembler::Zero(const MemOperand& dest) {
1075 ASM_CODE_COMMENT(this);
1076 Register scratch = r0;
1077
1078 mov(scratch, Operand::Zero());
1079 StoreU64(scratch, dest);
1080}
1081void MacroAssembler::Zero(const MemOperand& dest1, const MemOperand& dest2) {
1082 ASM_CODE_COMMENT(this);
1083 Register scratch = r0;
1084
1085 mov(scratch, Operand::Zero());
1086 StoreU64(scratch, dest1);
1087 StoreU64(scratch, dest2);
1088}
1089
1091 if (registers.is_empty()) return;
1093}
1094
1096 if (registers.is_empty()) return;
1098}
1099
1100void MacroAssembler::CallEphemeronKeyBarrier(Register object,
1101 Register slot_address,
1102 SaveFPRegsMode fp_mode) {
1103 DCHECK(!AreAliased(object, slot_address));
1107
1109 Register slot_address_parameter =
1111
1112 // TODO(tpearson): The following is equivalent to
1113 // MovePair(slot_address_parameter, slot_address, object_parameter, object);
1114 // Implement with MoveObjectAndSlot()
1115 push(object);
1116 push(slot_address);
1117 pop(slot_address_parameter);
1118 pop(object_parameter);
1119
1122}
1123
1125 Register slot_address,
1126 SaveFPRegsMode fp_mode,
1127 IndirectPointerTag tag) {
1128 ASM_CODE_COMMENT(this);
1129 DCHECK(!AreAliased(object, slot_address));
1132 object, slot_address);
1134
1135 Register object_parameter =
1137 Register slot_address_parameter =
1139 Register tag_parameter =
1141 DCHECK(!AreAliased(object_parameter, slot_address_parameter, tag_parameter));
1142
1143 // TODO(tpearson): The following is equivalent to
1144 // MovePair(slot_address_parameter, slot_address, object_parameter, object);
1145 // Implement with MoveObjectAndSlot()
1146 push(object);
1147 push(slot_address);
1148 pop(slot_address_parameter);
1149 pop(object_parameter);
1150
1151 mov(tag_parameter, Operand(tag));
1152
1155}
1156
1158 Register slot_address,
1159 SaveFPRegsMode fp_mode,
1160 StubCallMode mode) {
1161 DCHECK(!AreAliased(object, slot_address));
1165
1167 Register slot_address_parameter =
1169
1170 push(object);
1171 push(slot_address);
1172 pop(slot_address_parameter);
1173 pop(object_parameter);
1174
1175 CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode);
1176
1178}
1179
1180void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
1181 SaveFPRegsMode fp_mode,
1182 StubCallMode mode) {
1183 // Use CallRecordWriteStubSaveRegisters if the object and slot registers
1184 // need to be caller saved.
1187#if V8_ENABLE_WEBASSEMBLY
1188 if (mode == StubCallMode::kCallWasmRuntimeStub) {
1189 // Use {near_call} for direct Wasm call within a module.
1190 auto wasm_target =
1191 static_cast<Address>(wasm::WasmCode::GetRecordWriteBuiltin(fp_mode));
1192 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
1193#else
1194 if (false) {
1195#endif
1196 } else {
1198 }
1199}
1200
1201// Will clobber 4 registers: object, address, scratch, ip. The
1202// register 'object' contains a heap object pointer. The heap object
1203// tag is shifted away.
1204void MacroAssembler::RecordWrite(Register object, Register slot_address,
1205 Register value, LinkRegisterStatus lr_status,
1206 SaveFPRegsMode fp_mode, SmiCheck smi_check,
1207 SlotDescriptor slot) {
1208 ASM_CODE_COMMENT(this);
1209 DCHECK(!AreAliased(object, value, slot_address));
1210 if (v8_flags.slow_debug_code) {
1211 Register value_check = r0;
1212 // TODO(tpearson): Figure out why ScratchRegisterScope returns a
1213 // register that is aliased with one of our other in-use registers
1214 // For now, use r11 (kScratchReg in the code generator)
1215 Register scratch = r11;
1216 ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
1217 DCHECK(!AreAliased(object, value, value_check, scratch));
1218 if (slot.contains_indirect_pointer()) {
1219 LoadIndirectPointerField(value_check, MemOperand(slot_address),
1220 slot.indirect_pointer_tag(), scratch);
1221 } else {
1222 DCHECK(slot.contains_direct_pointer());
1223 LoadTaggedField(value_check, MemOperand(slot_address));
1224 }
1225 CmpS64(value_check, value);
1226 Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
1227 }
1228
1229 if (v8_flags.disable_write_barriers) {
1230 return;
1231 }
1232
1233 // First, check if a write barrier is even needed. The tests below
1234 // catch stores of smis and stores into the young generation.
1235 Label done;
1236
1237 if (smi_check == SmiCheck::kInline) {
1238 JumpIfSmi(value, &done);
1239 }
1240
1241 CheckPageFlag(value,
1242 value, // Used as scratch.
1244 CheckPageFlag(object,
1245 value, // Used as scratch.
1247
1248 // Record the actual write.
1249 if (lr_status == kLRHasNotBeenSaved) {
1250 mflr(r0);
1251 push(r0);
1252 }
1253 if (slot.contains_direct_pointer()) {
1254 CallRecordWriteStubSaveRegisters(object, slot_address, fp_mode,
1256 } else {
1257 DCHECK(slot.contains_indirect_pointer());
1258 CallIndirectPointerBarrier(object, slot_address, fp_mode,
1259 slot.indirect_pointer_tag());
1260 }
1261 if (lr_status == kLRHasNotBeenSaved) {
1262 pop(r0);
1263 mtlr(r0);
1264 }
1265
1266 if (v8_flags.slow_debug_code) mov(slot_address, Operand(kZapValue));
1267
1268 bind(&done);
1269
1270 // Clobber clobbered registers when running with the debug-code flag
1271 // turned on to provoke errors.
1272 if (v8_flags.slow_debug_code) {
1273 mov(slot_address, Operand(base::bit_cast<intptr_t>(kZapValue + 12)));
1274 mov(value, Operand(base::bit_cast<intptr_t>(kZapValue + 16)));
1275 }
1276}
1277
1278void MacroAssembler::PushCommonFrame(Register marker_reg) {
1279 int fp_delta = 0;
1280 mflr(r0);
1282 if (marker_reg.is_valid()) {
1283 Push(r0, fp, kConstantPoolRegister, marker_reg);
1284 fp_delta = 2;
1285 } else {
1286 Push(r0, fp, kConstantPoolRegister);
1287 fp_delta = 1;
1288 }
1289 } else {
1290 if (marker_reg.is_valid()) {
1291 Push(r0, fp, marker_reg);
1292 fp_delta = 1;
1293 } else {
1294 Push(r0, fp);
1295 fp_delta = 0;
1296 }
1297 }
1298 addi(fp, sp, Operand(fp_delta * kSystemPointerSize));
1299}
1300
1301void MacroAssembler::PushStandardFrame(Register function_reg) {
1302 int fp_delta = 0;
1303 mflr(r0);
1305 if (function_reg.is_valid()) {
1306 Push(r0, fp, kConstantPoolRegister, cp, function_reg);
1307 fp_delta = 3;
1308 } else {
1309 Push(r0, fp, kConstantPoolRegister, cp);
1310 fp_delta = 2;
1311 }
1312 } else {
1313 if (function_reg.is_valid()) {
1314 Push(r0, fp, cp, function_reg);
1315 fp_delta = 2;
1316 } else {
1317 Push(r0, fp, cp);
1318 fp_delta = 1;
1319 }
1320 }
1321 addi(fp, sp, Operand(fp_delta * kSystemPointerSize));
1323}
1324
1330 }
1333 mtlr(r0);
1334}
1335
1337 const DoubleRegister src) {
1338 // Turn potential sNaN into qNaN.
1339 fsub(dst, src, kDoubleRegZero);
1340}
1341
1342void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
1343 MovIntToDouble(dst, src, r0);
1344 fcfid(dst, dst);
1345}
1346
1348 DoubleRegister dst) {
1349 MovUnsignedIntToDouble(dst, src, r0);
1350 fcfid(dst, dst);
1351}
1352
1353void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
1354 MovIntToDouble(dst, src, r0);
1355 fcfids(dst, dst);
1356}
1357
1359 DoubleRegister dst) {
1360 MovUnsignedIntToDouble(dst, src, r0);
1361 fcfids(dst, dst);
1362}
1363
1364void MacroAssembler::ConvertInt64ToDouble(Register src,
1365 DoubleRegister double_dst) {
1366 MovInt64ToDouble(double_dst, src);
1367 fcfid(double_dst, double_dst);
1368}
1369
1371 DoubleRegister double_dst) {
1372 MovInt64ToDouble(double_dst, src);
1373 fcfidus(double_dst, double_dst);
1374}
1375
1377 DoubleRegister double_dst) {
1378 MovInt64ToDouble(double_dst, src);
1379 fcfidu(double_dst, double_dst);
1380}
1381
1382void MacroAssembler::ConvertInt64ToFloat(Register src,
1383 DoubleRegister double_dst) {
1384 MovInt64ToDouble(double_dst, src);
1385 fcfids(double_dst, double_dst);
1386}
1387
1389 const Register dst,
1390 const DoubleRegister double_dst,
1392 if (rounding_mode == kRoundToZero) {
1393 fctidz(double_dst, double_input);
1394 } else {
1396 fctid(double_dst, double_input);
1398 }
1399
1401 dst, double_dst);
1402}
1403
1405 const DoubleRegister double_input, const Register dst,
1406 const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
1407 if (rounding_mode == kRoundToZero) {
1408 fctiduz(double_dst, double_input);
1409 } else {
1411 fctidu(double_dst, double_input);
1413 }
1414
1415 MovDoubleToInt64(dst, double_dst);
1416}
1417
1419 Register code_target_address, Register scratch1, Register scratch2) {
1420 // Builtins do not use the constant pool (see is_constant_pool_available).
1422
1423#ifdef V8_ENABLE_SANDBOX
1424 LoadCodeEntrypointViaCodePointer(
1425 scratch2,
1426 FieldMemOperand(code_target_address, Code::kSelfIndirectPointerOffset),
1427 scratch1);
1428#else
1429 LoadU64(scratch2,
1430 FieldMemOperand(code_target_address, Code::kInstructionStartOffset),
1431 scratch1);
1432#endif
1433 LoadU32(scratch1,
1434 FieldMemOperand(code_target_address, Code::kInstructionSizeOffset),
1435 scratch1);
1436 add(scratch2, scratch1, scratch2);
1438 FieldMemOperand(code_target_address, Code::kConstantPoolOffsetOffset),
1439 scratch1);
1441}
1442
1443void MacroAssembler::LoadPC(Register dst) {
1444 b(4, SetLK);
1445 mflr(dst);
1446}
1447
1448void MacroAssembler::ComputeCodeStartAddress(Register dst) {
1449 mflr(r0);
1450 LoadPC(dst);
1451 subi(dst, dst, Operand(pc_offset() - kInstrSize));
1452 mtlr(r0);
1453}
1454
1456 //
1457 // Builtins do not use the constant pool (see is_constant_pool_available).
1459
1461 int32_t delta = -pc_offset() + 4;
1463 ConstantPoolPosition(), delta);
1464}
1465
1467 {
1468 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1469 mov(r11, Operand(StackFrame::TypeToMarker(type)));
1471 }
1475 }
1476}
1477
1481 // base contains prologue address
1484 }
1485}
1486
1487void MacroAssembler::DropArguments(Register count) {
1488 ShiftLeftU64(ip, count, Operand(kSystemPointerSizeLog2));
1489 add(sp, sp, ip);
1490}
1491
1493 Register receiver) {
1494 DCHECK(!AreAliased(argc, receiver));
1495 DropArguments(argc);
1496 push(receiver);
1497}
1498
1500 bool load_constant_pool_pointer_reg) {
1501 if (V8_EMBEDDED_CONSTANT_POOL_BOOL && load_constant_pool_pointer_reg) {
1502 // Push type explicitly so we can leverage the constant pool.
1503 // This path cannot rely on ip containing code entry.
1506 if (!StackFrame::IsJavaScript(type)) {
1507 mov(ip, Operand(StackFrame::TypeToMarker(type)));
1508 push(ip);
1509 }
1510 } else {
1511 Register scratch = no_reg;
1512 if (!StackFrame::IsJavaScript(type)) {
1513 scratch = ip;
1514 mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1515 }
1516 PushCommonFrame(scratch);
1517 }
1518#if V8_ENABLE_WEBASSEMBLY
1519 if (type == StackFrame::WASM) Push(kWasmImplicitArgRegister);
1520#endif // V8_ENABLE_WEBASSEMBLY
1521}
1522
1523int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
1524 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1525 // r3: preserved
1526 // r4: preserved
1527 // r5: preserved
1528
1529 // Drop the execution stack down to the frame pointer and restore
1530 // the caller's state.
1531 int frame_ends;
1537 }
1538 mtlr(r0);
1539 frame_ends = pc_offset();
1540 AddS64(sp, fp,
1541 Operand(StandardFrameConstants::kCallerSPOffset + stack_adjustment),
1542 r0);
1543 mr(fp, ip);
1544 return frame_ends;
1545}
1546
1547// ExitFrame layout (probably wrongish.. needs updating)
1548//
1549// SP -> previousSP
1550// LK reserved
1551// sp_on_exit (for debug?)
1552// oldSP->prev SP
1553// LK
1554// <parameters on stack>
1555
1556// Prior to calling EnterExitFrame, we've got a bunch of parameters
1557// on the stack that we need to wrap a real frame around.. so first
1558// we reserve a slot for LK and push the previous SP which is captured
1559// in the fp register (r31)
1560// Then - we buy a new frame
1561
1562void MacroAssembler::EnterExitFrame(Register scratch, int stack_space,
1563 StackFrame::Type frame_type) {
1564 DCHECK(frame_type == StackFrame::EXIT ||
1565 frame_type == StackFrame::BUILTIN_EXIT ||
1566 frame_type == StackFrame::API_ACCESSOR_EXIT ||
1567 frame_type == StackFrame::API_CALLBACK_EXIT);
1568
1569 using ER = ExternalReference;
1570
1571 // Set up the frame structure on the stack.
1575
1576 // This is an opportunity to build a frame to wrap
1577 // all of the pushes that have happened inside of V8
1578 // since we were called from C code
1579
1580 mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
1581 PushCommonFrame(ip);
1582 // Reserve room for saved entry sp.
1584
1585 if (v8_flags.debug_code) {
1586 li(r8, Operand::Zero());
1588 }
1592 }
1593
1594 // Save the frame pointer and the context in top.
1595 ER c_entry_fp_address =
1596 ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
1597 StoreU64(fp, ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
1598
1599 ER context_address = ER::Create(IsolateAddressId::kContextAddress, isolate());
1600 StoreU64(cp, ExternalReferenceAsOperand(context_address, no_reg));
1601
1602 AddS64(sp, sp, Operand(-(stack_space + 1) * kSystemPointerSize));
1603
1604 // Allocate and align the frame preparing for calling the runtime
1605 // function.
1606 const int frame_alignment = ActivationFrameAlignment();
1607 if (frame_alignment > kSystemPointerSize) {
1608 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1609 ClearRightImm(sp, sp,
1610 Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
1611 }
1612 li(r0, Operand::Zero());
1615
1616 // Set the exit frame sp value to point just before the return address
1617 // location.
1618 AddS64(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize),
1619 r0);
1621}
1622
1624#if !defined(USE_SIMULATOR)
1625 // Running on the real platform. Use the alignment as mandated by the local
1626 // environment.
1627 // Note: This will break if we ever start generating snapshots on one PPC
1628 // platform for another PPC platform with a different alignment.
1630#else // Simulated
1631 // If we are using the simulator then we should always align to the expected
1632 // alignment. As the simulator is used to generate snapshots we do not know
1633 // if the target platform will need alignment, so this is controlled from a
1634 // flag.
1635 return v8_flags.sim_stack_alignment;
1636#endif
1637}
1638
1639void MacroAssembler::LeaveExitFrame(Register scratch) {
1640 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1641
1642 using ER = ExternalReference;
1643
1644 // Restore current context from top and clear it in debug mode.
1645 ER context_address = ER::Create(IsolateAddressId::kContextAddress, isolate());
1646 LoadU64(cp, ExternalReferenceAsOperand(context_address, no_reg));
1647
1648#ifdef DEBUG
1649 mov(scratch, Operand(Context::kInvalidContext));
1650 StoreU64(scratch, ExternalReferenceAsOperand(context_address, no_reg));
1651#endif
1652
1653 // Clear the top frame.
1654 ER c_entry_fp_address =
1655 ER::Create(IsolateAddressId::kCEntryFPAddress, isolate());
1656 mov(scratch, Operand::Zero());
1657 StoreU64(scratch, ExternalReferenceAsOperand(c_entry_fp_address, no_reg));
1658
1659 // Tear down the exit frame, pop the arguments, and return.
1660 LeaveFrame(StackFrame::EXIT);
1661}
1662
1664 Move(dst, d1);
1665}
1666
1668 Move(dst, d1);
1669}
1670
1672 Register scratch) {
1676 : IsolateData::jslimit_offset();
1677 CHECK(is_int32(offset));
1679}
1680
1681void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
1682 Label* stack_overflow) {
1683 // Check the stack for overflow. We are not trying to catch
1684 // interruptions (e.g. debug break and preemption) here, so the "real stack
1685 // limit" is checked.
1687 // Make scratch the space we have left. The stack might already be overflowed
1688 // here which will cause scratch to become negative.
1689 sub(scratch, sp, scratch);
1690 // Check if the arguments will overflow the stack.
1691 ShiftLeftU64(r0, num_args, Operand(kSystemPointerSizeLog2));
1692 CmpS64(scratch, r0);
1693 ble(stack_overflow); // Signed comparison.
1694}
1695
1696void MacroAssembler::InvokePrologue(Register expected_parameter_count,
1697 Register actual_parameter_count,
1698 InvokeType type) {
1699 Label regular_invoke;
1700
1701 // r3: actual arguments count
1702 // r4: function (passed through to callee)
1703 // r5: expected arguments count
1704
1705 DCHECK_EQ(actual_parameter_count, r3);
1706 DCHECK_EQ(expected_parameter_count, r5);
1707
1708 // If overapplication or if the actual argument count is equal to the
1709 // formal parameter count, no need to push extra undefined values.
1710 sub(expected_parameter_count, expected_parameter_count,
1711 actual_parameter_count, LeaveOE, SetRC);
1712 ble(&regular_invoke, cr0);
1713
1714 Label stack_overflow;
1715 Register scratch = r7;
1716 StackOverflowCheck(expected_parameter_count, scratch, &stack_overflow);
1717
1718 // Underapplication. Move the arguments already in the stack, including the
1719 // receiver and the return address.
1720 {
1721 Label copy, skip;
1722 Register src = r9, dest = r8;
1723 addi(src, sp, Operand(-kSystemPointerSize));
1724 ShiftLeftU64(r0, expected_parameter_count, Operand(kSystemPointerSizeLog2));
1725 sub(sp, sp, r0);
1726 // Update stack pointer.
1727 addi(dest, sp, Operand(-kSystemPointerSize));
1728 mr(r0, actual_parameter_count);
1729 cmpi(r0, Operand::Zero());
1730 ble(&skip);
1731 mtctr(r0);
1732
1733 bind(&copy);
1736 bdnz(&copy);
1737 bind(&skip);
1738 }
1739
1740 // Fill remaining expected arguments with undefined values.
1741 LoadRoot(scratch, RootIndex::kUndefinedValue);
1742 {
1743 mtctr(expected_parameter_count);
1744
1745 Label loop;
1746 bind(&loop);
1748 bdnz(&loop);
1749 }
1750 b(&regular_invoke);
1751
1752 bind(&stack_overflow);
1753 {
1754 FrameScope frame(
1755 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
1756 CallRuntime(Runtime::kThrowStackOverflow);
1757 bkpt(0);
1758 }
1759
1760 bind(&regular_invoke);
1761}
1762
1763void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1764 Register expected_parameter_count,
1765 Register actual_parameter_count) {
1766 Label skip_hook;
1767
1768 ExternalReference debug_hook_active =
1769 ExternalReference::debug_hook_on_function_call_address(isolate());
1770 Move(r7, debug_hook_active);
1771 LoadU8(r7, MemOperand(r7), r0);
1772 extsb(r7, r7);
1773 CmpSmiLiteral(r7, Smi::zero(), r0);
1774 beq(&skip_hook);
1775
1776 {
1777 // Load receiver to pass it later to DebugOnFunctionCall hook.
1778 LoadReceiver(r7);
1779 FrameScope frame(
1780 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
1781
1782 SmiTag(expected_parameter_count);
1783 Push(expected_parameter_count);
1784
1785 SmiTag(actual_parameter_count);
1786 Push(actual_parameter_count);
1787
1788 if (new_target.is_valid()) {
1790 }
1791 Push(fun, fun, r7);
1792 CallRuntime(Runtime::kDebugOnFunctionCall);
1793 Pop(fun);
1794 if (new_target.is_valid()) {
1795 Pop(new_target);
1796 }
1797
1798 Pop(actual_parameter_count);
1799 SmiUntag(actual_parameter_count);
1800
1801 Pop(expected_parameter_count);
1802 SmiUntag(expected_parameter_count);
1803 }
1804 bind(&skip_hook);
1805}
1806
1807void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1808 Register expected_parameter_count,
1809 Register actual_parameter_count,
1810 InvokeType type) {
1811 // You can't call a function without a valid frame.
1813 DCHECK_EQ(function, r4);
1814 DCHECK_IMPLIES(new_target.is_valid(), new_target == r6);
1815
1816 // On function call, call into the debugger if necessary.
1817 CheckDebugHook(function, new_target, expected_parameter_count,
1818 actual_parameter_count);
1819
1820 // Clear the new.target register if not given.
1821 if (!new_target.is_valid()) {
1822 LoadRoot(r6, RootIndex::kUndefinedValue);
1823 }
1824
1825 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
1826 // We call indirectly through the code field in the function to
1827 // allow recompilation to take effect without changing any of the
1828 // call sites.
1829 constexpr int unused_argument_count = 0;
1830 switch (type) {
1831 case InvokeType::kCall:
1832 CallJSFunction(function, unused_argument_count, r0);
1833 break;
1834 case InvokeType::kJump:
1835 JumpJSFunction(function, r0);
1836 break;
1837 }
1838}
1839
1841 Register fun, Register new_target, Register actual_parameter_count,
1842 InvokeType type) {
1843 // You can't call a function without a valid frame.
1845
1846 // Contract with called JS functions requires that function is passed in r4.
1847 DCHECK_EQ(fun, r4);
1848
1849 Register expected_reg = r5;
1850 Register temp_reg = r7;
1851
1853 temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
1854 LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
1855 LoadU16(expected_reg,
1856 FieldMemOperand(temp_reg,
1857 SharedFunctionInfo::kFormalParameterCountOffset));
1858
1859 InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
1860 type);
1861}
1862
1863void MacroAssembler::InvokeFunction(Register function,
1864 Register expected_parameter_count,
1865 Register actual_parameter_count,
1866 InvokeType type) {
1867 // You can't call a function without a valid frame.
1869
1870 // Contract with called JS functions requires that function is passed in r4.
1871 DCHECK_EQ(function, r4);
1872
1873 // Get the function and setup the context.
1874 LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
1875
1876 InvokeFunctionCode(r4, no_reg, expected_parameter_count,
1877 actual_parameter_count, type);
1878}
1879
1881 // Adjust this code if not the case.
1884
1885 Push(Smi::zero()); // Padding.
1886
1887 // Link the current handler as the next handler.
1888 // Preserve r4-r8.
1889 Move(r3,
1890 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1891 LoadU64(r0, MemOperand(r3));
1892 push(r0);
1893
1894 // Set this new handler as the current one.
1895 StoreU64(sp, MemOperand(r3));
1896}
1897
1900 static_assert(StackHandlerConstants::kNextOffset == 0);
1901
1902 pop(r4);
1903 Move(ip,
1904 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1905 StoreU64(r4, MemOperand(ip));
1906
1907 Drop(1); // Drop padding.
1908}
1909
1910#if V8_STATIC_ROOTS_BOOL
1911void MacroAssembler::CompareInstanceTypeWithUniqueCompressedMap(
1912 Register map, Register scratch, InstanceType type) {
1913 std::optional<RootIndex> expected =
1915 CHECK(expected);
1916 Tagged_t expected_ptr = ReadOnlyRootPtr(*expected);
1917 DCHECK_NE(map, scratch);
1918 UseScratchRegisterScope temps(this);
1919 CHECK(scratch != Register::no_reg() || temps.CanAcquire());
1920 if (scratch == Register::no_reg()) {
1921 // TODO(tpearson): Figure out why ScratchRegisterScope returns a
1922 // register that is aliased with one of our other in-use registers
1923 // For now, use r11 (kScratchReg in the code generator)
1924 scratch = r11;
1925 DCHECK_NE(map, scratch);
1926 }
1927 mov(scratch, Operand(expected_ptr));
1928 CompareTagged(map, scratch);
1929}
1930
1931void MacroAssembler::IsObjectTypeFast(Register object,
1932 Register compressed_map_scratch,
1933 InstanceType type, Register scratch) {
1934 ASM_CODE_COMMENT(this);
1936 LoadCompressedMap(compressed_map_scratch, object, scratch);
1937 CompareInstanceTypeWithUniqueCompressedMap(compressed_map_scratch,
1938 Register::no_reg(), type);
1939}
1940#endif // V8_STATIC_ROOTS_BOOL
1941
1942// Sets equality condition flags.
1943void MacroAssembler::IsObjectType(Register object, Register scratch1,
1944 Register scratch2, InstanceType type) {
1945 ASM_CODE_COMMENT(this);
1946
1947#if V8_STATIC_ROOTS_BOOL
1949 DCHECK((scratch1 != scratch2) || (scratch1 != r0));
1950 LoadCompressedMap(scratch1, object, scratch1 != scratch2 ? scratch2 : r0);
1951 CompareInstanceTypeWithUniqueCompressedMap(
1952 scratch1, scratch1 != scratch2 ? scratch2 : r0, type);
1953 return;
1954 }
1955#endif // V8_STATIC_ROOTS_BOOL
1956
1957 CompareObjectType(object, scratch1, scratch2, type);
1958}
1959
1960void MacroAssembler::CompareObjectType(Register object, Register map,
1961 Register type_reg, InstanceType type) {
1962 const Register temp = type_reg == no_reg ? r0 : type_reg;
1963
1964 LoadMap(map, object);
1965 CompareInstanceType(map, temp, type);
1966}
1967
1968void MacroAssembler::CompareObjectTypeRange(Register object, Register map,
1969 Register type_reg, Register scratch,
1970 InstanceType lower_limit,
1971 InstanceType upper_limit) {
1972 ASM_CODE_COMMENT(this);
1973 LoadMap(map, object);
1974 CompareInstanceTypeRange(map, type_reg, scratch, lower_limit, upper_limit);
1975}
1976
1977void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1978 InstanceType type) {
1979 static_assert(Map::kInstanceTypeOffset < 4096);
1980 static_assert(LAST_TYPE <= 0xFFFF);
1981 lhz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1982 cmpi(type_reg, Operand(type));
1983}
1984
1985void MacroAssembler::CompareRange(Register value, Register scratch,
1986 unsigned lower_limit, unsigned higher_limit) {
1987 ASM_CODE_COMMENT(this);
1988 DCHECK_LT(lower_limit, higher_limit);
1989 if (lower_limit != 0) {
1990 mov(scratch, Operand(lower_limit));
1991 sub(scratch, value, scratch);
1992 cmpli(scratch, Operand(higher_limit - lower_limit));
1993 } else {
1994 mov(scratch, Operand(higher_limit));
1995 CmpU64(value, scratch);
1996 }
1997}
1998
1999void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
2000 Register scratch,
2001 InstanceType lower_limit,
2002 InstanceType higher_limit) {
2003 DCHECK_LT(lower_limit, higher_limit);
2004 LoadU16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2005 CompareRange(type_reg, scratch, lower_limit, higher_limit);
2006}
2007
2008void MacroAssembler::CompareTaggedRoot(const Register& obj, RootIndex index) {
2009 ASM_CODE_COMMENT(this);
2010 // Use r0 as a safe scratch register here, since temps.Acquire() tends
2011 // to spit back the register being passed as an argument in obj...
2012 Register temp = r0;
2013 DCHECK(!AreAliased(obj, temp));
2014
2016 mov(temp, Operand(ReadOnlyRootPtr(index)));
2017 CompareTagged(obj, temp);
2018 return;
2019 }
2020 // Some smi roots contain system pointer size values like stack limits.
2023 LoadRoot(temp, index);
2024 CompareTagged(obj, temp);
2025}
2026
2027void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
2028 ASM_CODE_COMMENT(this);
2029 // Use r0 as a safe scratch register here, since temps.Acquire() tends
2030 // to spit back the register being passed as an argument in obj...
2031 Register temp = r0;
2034 // Some smi roots contain system pointer size values like stack limits.
2035 DCHECK(!AreAliased(obj, temp));
2036 LoadRoot(temp, index);
2037 CmpU64(obj, temp);
2038 return;
2039 }
2040 CompareTaggedRoot(obj, index);
2041}
2042
2043void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
2044 Register right,
2045 Register overflow_dst,
2046 Register scratch) {
2047 DCHECK(dst != overflow_dst);
2048 DCHECK(dst != scratch);
2049 DCHECK(overflow_dst != scratch);
2050 DCHECK(overflow_dst != left);
2051 DCHECK(overflow_dst != right);
2052
2053 bool left_is_right = left == right;
2054 RCBit xorRC = left_is_right ? SetRC : LeaveRC;
2055
2056 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
2057 if (dst == left) {
2058 mr(scratch, left); // Preserve left.
2059 add(dst, left, right); // Left is overwritten.
2060 xor_(overflow_dst, dst, scratch, xorRC); // Original left.
2061 if (!left_is_right) xor_(scratch, dst, right);
2062 } else if (dst == right) {
2063 mr(scratch, right); // Preserve right.
2064 add(dst, left, right); // Right is overwritten.
2065 xor_(overflow_dst, dst, left, xorRC);
2066 if (!left_is_right) xor_(scratch, dst, scratch); // Original right.
2067 } else {
2068 add(dst, left, right);
2069 xor_(overflow_dst, dst, left, xorRC);
2070 if (!left_is_right) xor_(scratch, dst, right);
2071 }
2072 if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
2073}
2074
2075void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
2076 intptr_t right,
2077 Register overflow_dst,
2078 Register scratch) {
2079 Register original_left = left;
2080 DCHECK(dst != overflow_dst);
2081 DCHECK(dst != scratch);
2082 DCHECK(overflow_dst != scratch);
2083 DCHECK(overflow_dst != left);
2084
2085 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
2086 if (dst == left) {
2087 // Preserve left.
2088 original_left = overflow_dst;
2089 mr(original_left, left);
2090 }
2091 AddS64(dst, left, Operand(right), scratch);
2092 xor_(overflow_dst, dst, original_left);
2093 if (right >= 0) {
2094 and_(overflow_dst, overflow_dst, dst, SetRC);
2095 } else {
2096 andc(overflow_dst, overflow_dst, dst, SetRC);
2097 }
2098}
2099
2100void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
2101 Register right,
2102 Register overflow_dst,
2103 Register scratch) {
2104 DCHECK(dst != overflow_dst);
2105 DCHECK(dst != scratch);
2106 DCHECK(overflow_dst != scratch);
2107 DCHECK(overflow_dst != left);
2108 DCHECK(overflow_dst != right);
2109
2110 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
2111 if (dst == left) {
2112 mr(scratch, left); // Preserve left.
2113 sub(dst, left, right); // Left is overwritten.
2114 xor_(overflow_dst, dst, scratch);
2115 xor_(scratch, scratch, right);
2116 and_(overflow_dst, overflow_dst, scratch, SetRC);
2117 } else if (dst == right) {
2118 mr(scratch, right); // Preserve right.
2119 sub(dst, left, right); // Right is overwritten.
2120 xor_(overflow_dst, dst, left);
2121 xor_(scratch, left, scratch);
2122 and_(overflow_dst, overflow_dst, scratch, SetRC);
2123 } else {
2124 sub(dst, left, right);
2125 xor_(overflow_dst, dst, left);
2126 xor_(scratch, left, right);
2127 and_(overflow_dst, scratch, overflow_dst, SetRC);
2128 }
2129}
2130
2132 DoubleRegister rhs, DoubleRegister scratch) {
2133 Label return_nan, done;
2134 fcmpu(lhs, rhs);
2135 bunordered(&return_nan);
2136 xsmindp(dst, lhs, rhs);
2137 b(&done);
2138 bind(&return_nan);
2139 /* If left or right are NaN, fadd propagates the appropriate one.*/
2140 fadd(dst, lhs, rhs);
2141 bind(&done);
2142}
2143
2145 DoubleRegister rhs, DoubleRegister scratch) {
2146 Label return_nan, done;
2147 fcmpu(lhs, rhs);
2148 bunordered(&return_nan);
2149 xsmaxdp(dst, lhs, rhs);
2150 b(&done);
2151 bind(&return_nan);
2152 /* If left or right are NaN, fadd propagates the appropriate one.*/
2153 fadd(dst, lhs, rhs);
2154 bind(&done);
2155}
2156
2157void MacroAssembler::JumpIfIsInRange(Register value, Register scratch,
2158 unsigned lower_limit,
2159 unsigned higher_limit,
2160 Label* on_in_range) {
2161 CompareRange(value, scratch, lower_limit, higher_limit);
2162 ble(on_in_range);
2163}
2164
2165void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
2166 Register result,
2167 DoubleRegister double_input,
2168 StubCallMode stub_mode) {
2169 Label done;
2170
2171 TryInlineTruncateDoubleToI(result, double_input, &done);
2172
2173 // If we fell through then inline version didn't succeed - call stub instead.
2174 mflr(r0);
2175 push(r0);
2176 // Put input on stack.
2177 stfdu(double_input, MemOperand(sp, -kDoubleSize));
2178
2179#if V8_ENABLE_WEBASSEMBLY
2180 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
2181 Call(static_cast<Address>(Builtin::kDoubleToI), RelocInfo::WASM_STUB_CALL);
2182#else
2183 // For balance.
2184 if (false) {
2185#endif // V8_ENABLE_WEBASSEMBLY
2186 } else {
2187 CallBuiltin(Builtin::kDoubleToI);
2188 }
2189
2191 addi(sp, sp, Operand(kDoubleSize));
2192 pop(r0);
2193 mtlr(r0);
2194
2195 bind(&done);
2196}
2197
2199 DoubleRegister double_input,
2200 Label* done) {
2201 DoubleRegister double_scratch = kScratchDoubleReg;
2202 ConvertDoubleToInt64(double_input,
2203 result, double_scratch);
2204
2205// Test for overflow
2206 TestIfInt32(result, r0);
2207 beq(done);
2208}
2209
2210namespace {
2211
2212#ifndef V8_ENABLE_LEAPTIERING
2213
2214void TailCallOptimizedCodeSlot(MacroAssembler* masm,
2215 Register optimized_code_entry,
2216 Register scratch) {
2217 // ----------- S t a t e -------------
2218 // -- r3 : actual argument count
2219 // -- r6 : new target (preserved for callee if needed, and caller)
2220 // -- r4 : target function (preserved for callee if needed, and caller)
2221 // -----------------------------------
2222 DCHECK(!AreAliased(r4, r6, optimized_code_entry, scratch));
2223
2224 Register closure = r4;
2225 Label heal_optimized_code_slot;
2226
2227 // If the optimized code is cleared, go to runtime to update the optimization
2228 // marker field.
2229 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
2230 &heal_optimized_code_slot);
2231
2232 // The entry references a CodeWrapper object. Unwrap it now.
2234 optimized_code_entry,
2235 FieldMemOperand(optimized_code_entry, CodeWrapper::kCodeOffset), scratch);
2236
2237 // Check if the optimized code is marked for deopt. If it is, call the
2238 // runtime to clear it.
2239 {
2240 UseScratchRegisterScope temps(masm);
2241 __ TestCodeIsMarkedForDeoptimization(optimized_code_entry, temps.Acquire(),
2242 scratch);
2243 __ bne(&heal_optimized_code_slot, cr0);
2244 }
2245
2246 // Optimized code is good, get it into the closure and link the closure
2247 // into the optimized functions list, then tail call the optimized code.
2248 __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure, scratch,
2249 r8);
2250 static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
2251 __ LoadCodeInstructionStart(r5, optimized_code_entry);
2252 __ Jump(r5);
2253
2254 // Optimized code slot contains deoptimized code or code is cleared and
2255 // optimized code marker isn't updated. Evict the code, update the marker
2256 // and re-enter the closure's code.
2257 __ bind(&heal_optimized_code_slot);
2258 __ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot);
2259}
2260
2261#endif // V8_ENABLE_LEAPTIERING
2262
2263} // namespace
2264
2265#ifdef V8_ENABLE_DEBUG_CODE
2266void MacroAssembler::AssertFeedbackCell(Register object, Register scratch) {
2267 if (v8_flags.debug_code) {
2268 CompareObjectType(object, scratch, scratch, FEEDBACK_CELL_TYPE);
2269 Assert(eq, AbortReason::kExpectedFeedbackCell);
2270 }
2271}
2272void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) {
2273 if (v8_flags.debug_code) {
2274 CompareObjectType(object, scratch, scratch, FEEDBACK_VECTOR_TYPE);
2275 Assert(eq, AbortReason::kExpectedFeedbackVector);
2276 }
2277}
2278#endif // V8_ENABLE_DEBUG_CODE
2279
2280// Optimized code is good, get it into the closure and link the closure
2281// into the optimized functions list, then tail call the optimized code.
2283 Register optimized_code, Register closure, Register scratch1,
2284 Register slot_address) {
2285#ifdef V8_ENABLE_LEAPTIERING
2286 UNREACHABLE();
2287#else
2288 DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
2290 DCHECK(!AreAliased(optimized_code, closure));
2291 // Store code entry in the closure.
2292 StoreCodePointerField(optimized_code,
2293 FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
2294 // Write barrier clobbers scratch1 below.
2295 Register value = scratch1;
2296 mr(value, optimized_code);
2297
2298 RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
2301#endif // V8_ENABLE_LEAPTIERING
2302}
2303
2305 Runtime::FunctionId function_id) {
2306 // ----------- S t a t e -------------
2307 // -- r3 : actual argument count
2308 // -- r4 : target function (preserved for callee)
2309 // -- r6 : new target (preserved for callee)
2310 // -----------------------------------
2311 {
2312 FrameAndConstantPoolScope scope(this, StackFrame::INTERNAL);
2313 // Push a copy of the target function, the new target and the actual
2314 // argument count.
2315 // Push function as parameter to the runtime call.
2319
2320 CallRuntime(function_id, 1);
2321 mr(r5, r3);
2322
2323 // Restore target function, new target and actual argument count.
2327 }
2328 static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
2329 JumpCodeObject(r5);
2330}
2331
2332#ifndef V8_ENABLE_LEAPTIERING
2333
2334// Read off the flags in the feedback vector and check if there
2335// is optimized code or a tiering state that needs to be processed.
2337 Register flags, Register feedback_vector, CodeKind current_code_kind,
2338 Label* flags_need_processing) {
2339 ASM_CODE_COMMENT(this);
2340 DCHECK(!AreAliased(flags, feedback_vector));
2341 DCHECK(CodeKindCanTierUp(current_code_kind));
2342 LoadU16(flags,
2343 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
2347 if (current_code_kind != CodeKind::MAGLEV) {
2349 }
2350 CHECK(is_uint16(kFlagsMask));
2351 mov(r0, Operand(kFlagsMask));
2352 AndU32(r0, flags, r0, SetRC);
2353 bne(flags_need_processing, cr0);
2354}
2355
2357 Register flags, Register feedback_vector) {
2358 DCHECK(!AreAliased(flags, feedback_vector));
2359 Label maybe_has_optimized_code, maybe_needs_logging;
2360 // Check if optimized code is available
2362 beq(&maybe_needs_logging, cr0);
2363
2364 GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
2365
2366 bind(&maybe_needs_logging);
2367 TestBitMask(flags, FeedbackVector::LogNextExecutionBit::kMask, r0);
2368 beq(&maybe_has_optimized_code, cr0);
2369 GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution);
2370
2371 bind(&maybe_has_optimized_code);
2372 Register optimized_code_entry = flags;
2373 LoadTaggedField(optimized_code_entry,
2374 FieldMemOperand(feedback_vector,
2375 FeedbackVector::kMaybeOptimizedCodeOffset),
2376 r0);
2377 TailCallOptimizedCodeSlot(this, optimized_code_entry, r9);
2378}
2379
2380#endif // !V8_ENABLE_LEAPTIERING
2381
2382void MacroAssembler::CallRuntime(const Runtime::Function* f,
2383 int num_arguments) {
2384 // All parameters are on the stack. r3 has the return value after call.
2385
2386 // If the expected number of arguments of the runtime function is
2387 // constant, we check that the actual number of arguments match the
2388 // expectation.
2389 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2390
2391 // TODO(1236192): Most runtime routines don't need the number of
2392 // arguments passed in because it is constant. At some point we
2393 // should remove this need and make the runtime routine entry code
2394 // smarter.
2395 mov(r3, Operand(num_arguments));
2397 bool switch_to_central_stack = options().is_wasm;
2398 CallBuiltin(Builtins::RuntimeCEntry(f->result_size, switch_to_central_stack));
2399}
2400
2402 const Runtime::Function* function = Runtime::FunctionForId(fid);
2403 DCHECK_EQ(1, function->result_size);
2404 if (function->nargs >= 0) {
2405 mov(r3, Operand(function->nargs));
2406 }
2408}
2409
2410void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
2411 bool builtin_exit_frame) {
2412 Move(r4, builtin);
2413 TailCallBuiltin(Builtins::CEntry(1, ArgvMode::kStack, builtin_exit_frame));
2414}
2415
2416void MacroAssembler::LoadWeakValue(Register out, Register in,
2417 Label* target_if_cleared) {
2418 CmpS32(in, Operand(kClearedWeakHeapObjectLower32), r0);
2419 beq(target_if_cleared);
2420
2421 mov(r0, Operand(~kWeakHeapObjectMask));
2422 and_(out, in, r0);
2423}
2424
2425void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
2426 Register scratch1,
2427 Register scratch2) {
2428 DCHECK_GT(value, 0);
2429 if (v8_flags.native_code_counters && counter->Enabled()) {
2430 // This operation has to be exactly 32-bit wide in case the external
2431 // reference table redirects the counter to a uint32_t dummy_stats_counter_
2432 // field.
2433 Move(scratch2, ExternalReference::Create(counter));
2434 lwz(scratch1, MemOperand(scratch2));
2435 addi(scratch1, scratch1, Operand(value));
2436 stw(scratch1, MemOperand(scratch2));
2437 }
2438}
2439
2440void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
2441 Register scratch1,
2442 Register scratch2) {
2443 DCHECK_GT(value, 0);
2444 if (v8_flags.native_code_counters && counter->Enabled()) {
2445 // This operation has to be exactly 32-bit wide in case the external
2446 // reference table redirects the counter to a uint32_t dummy_stats_counter_
2447 // field.
2448 Move(scratch2, ExternalReference::Create(counter));
2449 lwz(scratch1, MemOperand(scratch2));
2450 subi(scratch1, scratch1, Operand(value));
2451 stw(scratch1, MemOperand(scratch2));
2452 }
2453}
2454
2455void MacroAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
2456 Label L;
2457 b(cond, &L, cr);
2458 Abort(reason);
2459 // will not return here
2460 bind(&L);
2461}
2462
2464 ASM_CODE_COMMENT(this);
2465 if (v8_flags.code_comments) {
2466 RecordComment("Abort message:", SourceLocation{});
2467 RecordComment(GetAbortReason(reason), SourceLocation{});
2468 }
2469
2470 // Without debug code, save the code size and just trap.
2471 if (!v8_flags.debug_code || v8_flags.trap_on_abort) {
2472 stop();
2473 return;
2474 }
2475
2476 if (should_abort_hard()) {
2477 // We don't care if we constructed a frame. Just pretend we did.
2478 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
2479 mov(r3, Operand(static_cast<int>(reason)));
2480 PrepareCallCFunction(1, 0, r4);
2481 Register dst = ip;
2482 if (!ABI_CALL_VIA_IP) {
2483 dst = r4;
2484 }
2485 Move(dst, ExternalReference::abort_with_reason());
2486 // Use Call directly to avoid any unneeded overhead. The function won't
2487 // return anyway.
2488 Call(dst);
2489 return;
2490 }
2491
2492 LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
2493
2494 {
2495 // We don't actually want to generate a pile of code for this, so just
2496 // claim there is a stack frame, without generating one.
2497 FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
2498 if (root_array_available()) {
2499 // Generate an indirect call via builtins entry table here in order to
2500 // ensure that the interpreter_entry_return_pc_offset is the same for
2501 // InterpreterEntryTrampoline and InterpreterEntryTrampolineForProfiling
2502 // when v8_flags.debug_code is enabled.
2503 LoadEntryFromBuiltin(Builtin::kAbort, ip);
2504 Call(ip);
2505 } else {
2506 CallBuiltin(Builtin::kAbort);
2507 }
2508 }
2509 // will not return here
2510}
2511
2512void MacroAssembler::LoadMap(Register destination, Register object) {
2514 r0);
2515}
2516
2517void MacroAssembler::LoadFeedbackVector(Register dst, Register closure,
2518 Register scratch, Label* fbv_undef) {
2519 Label done;
2520
2521 // Load the feedback vector from the closure.
2523 dst, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
2524 LoadTaggedField(dst, FieldMemOperand(dst, FeedbackCell::kValueOffset), r0);
2525
2526 // Check if feedback vector is valid.
2528 LoadU16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2529 CmpS32(scratch, Operand(FEEDBACK_VECTOR_TYPE), r0);
2530 b(eq, &done);
2531
2532 // Not valid, load undefined.
2533 LoadRoot(dst, RootIndex::kUndefinedValue);
2534 b(fbv_undef);
2535
2536 bind(&done);
2537}
2538
2539void MacroAssembler::LoadCompressedMap(Register dst, Register object,
2540 Register scratch) {
2541 ASM_CODE_COMMENT(this);
2542 LoadU32(dst, FieldMemOperand(object, HeapObject::kMapOffset), scratch);
2543}
2544
2545void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
2546 LoadMap(dst, cp);
2548 dst,
2549 FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset),
2550 r0);
2551 LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)), r0);
2552}
2553
2554#ifdef V8_ENABLE_DEBUG_CODE
2555void MacroAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
2556 if (v8_flags.debug_code) Check(cond, reason, cr);
2557}
2558
2559void MacroAssembler::AssertNotSmi(Register object) {
2560 if (v8_flags.debug_code) {
2561 static_assert(kSmiTag == 0);
2562 TestIfSmi(object, r0);
2563 Check(ne, AbortReason::kOperandIsASmi, cr0);
2564 }
2565}
2566
2567void MacroAssembler::AssertSmi(Register object) {
2568 if (v8_flags.debug_code) {
2569 static_assert(kSmiTag == 0);
2570 TestIfSmi(object, r0);
2571 Check(eq, AbortReason::kOperandIsNotASmi, cr0);
2572 }
2573}
2574
2575void MacroAssembler::AssertConstructor(Register object) {
2576 if (v8_flags.debug_code) {
2577 static_assert(kSmiTag == 0);
2578 TestIfSmi(object, r0);
2579 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, cr0);
2580 push(object);
2581 LoadMap(object, object);
2582 lbz(object, FieldMemOperand(object, Map::kBitFieldOffset));
2583 andi(object, object, Operand(Map::Bits1::IsConstructorBit::kMask));
2584 pop(object);
2585 Check(ne, AbortReason::kOperandIsNotAConstructor, cr0);
2586 }
2587}
2588
2589void MacroAssembler::AssertFunction(Register object) {
2590 if (v8_flags.debug_code) {
2591 static_assert(kSmiTag == 0);
2592 TestIfSmi(object, r0);
2593 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
2594 push(object);
2595 LoadMap(object, object);
2596 CompareInstanceTypeRange(object, object, r0, FIRST_JS_FUNCTION_TYPE,
2597 LAST_JS_FUNCTION_TYPE);
2598 pop(object);
2599 Check(le, AbortReason::kOperandIsNotAFunction);
2600 }
2601}
2602
2603void MacroAssembler::AssertCallableFunction(Register object) {
2604 if (!v8_flags.debug_code) return;
2605 ASM_CODE_COMMENT(this);
2606 static_assert(kSmiTag == 0);
2607 TestIfSmi(object, r0);
2608 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
2609 push(object);
2610 LoadMap(object, object);
2613 pop(object);
2614 Check(le, AbortReason::kOperandIsNotACallableFunction);
2615}
2616
2617void MacroAssembler::AssertBoundFunction(Register object) {
2618 if (v8_flags.debug_code) {
2619 static_assert(kSmiTag == 0);
2620 TestIfSmi(object, r0);
2621 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
2622 push(object);
2623 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2624 pop(object);
2625 Check(eq, AbortReason::kOperandIsNotABoundFunction);
2626 }
2627}
2628
2629void MacroAssembler::AssertGeneratorObject(Register object) {
2630 if (!v8_flags.debug_code) return;
2631 TestIfSmi(object, r0);
2632 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
2633
2634 // Load map
2635 Register map = object;
2636 push(object);
2637 LoadMap(map, object);
2638
2639 // Check if JSGeneratorObject
2640 Register instance_type = object;
2641 CompareInstanceTypeRange(map, instance_type, r0,
2642 FIRST_JS_GENERATOR_OBJECT_TYPE,
2643 LAST_JS_GENERATOR_OBJECT_TYPE);
2644 // Restore generator object to register and perform assertion
2645 pop(object);
2646 Check(le, AbortReason::kOperandIsNotAGeneratorObject);
2647}
2648
2650 Register scratch) {
2651 if (v8_flags.debug_code) {
2652 Label done_checking;
2653 AssertNotSmi(object);
2654 CompareRoot(object, RootIndex::kUndefinedValue);
2655 beq(&done_checking);
2656 LoadMap(scratch, object);
2657 CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
2658 Assert(eq, AbortReason::kExpectedUndefinedOrCell);
2659 bind(&done_checking);
2660 }
2661}
2662
2663void MacroAssembler::AssertJSAny(Register object, Register map_tmp,
2664 Register tmp, AbortReason abort_reason) {
2665 if (!v8_flags.debug_code) return;
2666
2667 ASM_CODE_COMMENT(this);
2668 DCHECK(!AreAliased(object, map_tmp, tmp));
2669 Label ok;
2670
2671 JumpIfSmi(object, &ok);
2672
2673 LoadMap(map_tmp, object);
2674 CompareInstanceType(map_tmp, tmp, LAST_NAME_TYPE);
2675 ble(&ok);
2676
2677 CompareInstanceType(map_tmp, tmp, FIRST_JS_RECEIVER_TYPE);
2678 bge(&ok);
2679
2680 CompareRoot(map_tmp, RootIndex::kHeapNumberMap);
2681 beq(&ok);
2682
2683 CompareRoot(map_tmp, RootIndex::kBigIntMap);
2684 beq(&ok);
2685
2686 CompareRoot(object, RootIndex::kUndefinedValue);
2687 beq(&ok);
2688
2689 CompareRoot(object, RootIndex::kTrueValue);
2690 beq(&ok);
2691
2692 CompareRoot(object, RootIndex::kFalseValue);
2693 beq(&ok);
2694
2695 CompareRoot(object, RootIndex::kNullValue);
2696 beq(&ok);
2697
2698 Abort(abort_reason);
2699
2700 bind(&ok);
2701}
2702
2703#endif // V8_ENABLE_DEBUG_CODE
2704
2705int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
2706 int num_double_arguments) {
2707 int stack_passed_words = 0;
2708 if (num_double_arguments > DoubleRegister::kNumRegisters) {
2709 stack_passed_words +=
2710 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
2711 }
2712 // Up to 8 simple arguments are passed in registers r3..r10.
2713 if (num_reg_arguments > kRegisterPassedArguments) {
2714 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2715 }
2716 return stack_passed_words;
2717}
2718
2719void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2720 int num_double_arguments,
2721 Register scratch) {
2722 int frame_alignment = ActivationFrameAlignment();
2723 int stack_passed_arguments =
2724 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2725 int stack_space = kNumRequiredStackFrameSlots;
2726
2727 if (frame_alignment > kSystemPointerSize) {
2728 // Make stack end at alignment and make room for stack arguments
2729 // -- preserving original value of sp.
2730 mr(scratch, sp);
2731 AddS64(sp, sp, Operand(-(stack_passed_arguments + 1) * kSystemPointerSize),
2732 scratch);
2733 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2734 ClearRightImm(sp, sp,
2735 Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
2736 StoreU64(scratch,
2737 MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
2738 } else {
2739 // Make room for stack arguments
2740 stack_space += stack_passed_arguments;
2741 }
2742
2743 // Allocate frame with required slots to make ABI work.
2744 li(r0, Operand::Zero());
2745 StoreU64WithUpdate(r0, MemOperand(sp, -stack_space * kSystemPointerSize));
2746}
2747
2748void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2749 Register scratch) {
2750 PrepareCallCFunction(num_reg_arguments, 0, scratch);
2751}
2752
2754
2756
2758 DoubleRegister src2) {
2759 if (src2 == d1) {
2760 DCHECK(src1 != d2);
2761 Move(d2, src2);
2762 Move(d1, src1);
2763 } else {
2764 Move(d1, src1);
2765 Move(d2, src2);
2766 }
2767}
2768
2769int MacroAssembler::CallCFunction(ExternalReference function,
2770 int num_reg_arguments,
2771 int num_double_arguments,
2772 SetIsolateDataSlots set_isolate_data_slots,
2773 bool has_function_descriptor) {
2774 Move(ip, function);
2775 return CallCFunction(ip, num_reg_arguments, num_double_arguments,
2776 set_isolate_data_slots, has_function_descriptor);
2777}
2778
2779int MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
2780 int num_double_arguments,
2781 SetIsolateDataSlots set_isolate_data_slots,
2782 bool has_function_descriptor) {
2783 ASM_CODE_COMMENT(this);
2784 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
2785 DCHECK(has_frame());
2786
2787 Label start_call;
2788 Register pc_scratch = r11;
2789 DCHECK(!AreAliased(pc_scratch, function));
2790 LoadPC(pc_scratch);
2791 bind(&start_call);
2792 int start_pc_offset = pc_offset();
2793 // We are going to patch this instruction after emitting
2794 // Call, using a zero offset here as placeholder for now.
2795 // patch_pc_address assumes `addi` is used here to
2796 // add the offset to pc.
2797 addi(pc_scratch, pc_scratch, Operand::Zero());
2798
2799 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
2800 // Save the frame pointer and PC so that the stack layout remains iterable,
2801 // even without an ExitFrame which normally exists between JS and C frames.
2802 Register scratch = r8;
2803 Push(scratch);
2804 mflr(scratch);
2806 StoreU64(pc_scratch,
2807 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerPC));
2808 StoreU64(fp,
2809 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
2810 mtlr(scratch);
2811 Pop(scratch);
2812 }
2813
2814 // Just call directly. The function called cannot cause a GC, or
2815 // allow preemption, so the return address in the link register
2816 // stays correct.
2817 Register dest = function;
2818 if (ABI_USES_FUNCTION_DESCRIPTORS && has_function_descriptor) {
2819 // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
2820 // aware of this descriptor and pick up values from it
2822 MemOperand(function, kSystemPointerSize));
2823 LoadU64(ip, MemOperand(function, 0));
2824 dest = ip;
2825 } else if (ABI_CALL_VIA_IP) {
2826 // pLinux and Simualtor, not AIX
2827 Move(ip, function);
2828 dest = ip;
2829 }
2830
2831 Call(dest);
2832 int call_pc_offset = pc_offset();
2833 int offset_since_start_call = SizeOfCodeGeneratedSince(&start_call);
2834 // Here we are going to patch the `addi` instruction above to use the
2835 // correct offset.
2836 // LoadPC emits two instructions and pc is the address of its second emitted
2837 // instruction. Add one more to the offset to point to after the Call.
2838 offset_since_start_call += kInstrSize;
2839 patch_pc_address(pc_scratch, start_pc_offset, offset_since_start_call);
2840
2841 if (set_isolate_data_slots == SetIsolateDataSlots::kYes) {
2842 // We don't unset the PC; the FP is the source of truth.
2843 Register zero_scratch = r0;
2844 mov(zero_scratch, Operand::Zero());
2845
2846 StoreU64(zero_scratch,
2847 ExternalReferenceAsOperand(IsolateFieldId::kFastCCallCallerFP));
2848 }
2849
2850 // Remove frame bought in PrepareCallCFunction
2851 int stack_passed_arguments =
2852 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2853 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
2855 LoadU64(sp, MemOperand(sp, stack_space * kSystemPointerSize), r0);
2856 } else {
2857 AddS64(sp, sp, Operand(stack_space * kSystemPointerSize), r0);
2858 }
2859
2860 return call_pc_offset;
2861}
2862
2863int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments,
2864 SetIsolateDataSlots set_isolate_data_slots,
2865 bool has_function_descriptor) {
2866 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
2867 has_function_descriptor);
2868}
2869
2870int MacroAssembler::CallCFunction(Register function, int num_arguments,
2871 SetIsolateDataSlots set_isolate_data_slots,
2872 bool has_function_descriptor) {
2873 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
2874 has_function_descriptor);
2875}
2876
2878 Register object,
2879 Register scratch, // scratch may be same register as object
2880 int mask, Condition cc, Label* condition_met) {
2881 DCHECK(cc == ne || cc == eq);
2882 DCHECK(scratch != r0);
2883 ClearRightImm(scratch, object, Operand(kPageSizeBits));
2884 LoadU64(scratch, MemOperand(scratch, MemoryChunk::FlagsOffset()), r0);
2885
2886 mov(r0, Operand(mask));
2887 and_(r0, scratch, r0, SetRC);
2888
2889 if (cc == ne) {
2890 bne(condition_met, cr0);
2891 }
2892 if (cc == eq) {
2893 beq(condition_met, cr0);
2894 }
2895}
2896
2898
2900 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
2901}
2902
2904//
2905// New MacroAssembler Interfaces added for PPC
2906//
2908void MacroAssembler::LoadIntLiteral(Register dst, int value) {
2909 mov(dst, Operand(value));
2910}
2911
2912void MacroAssembler::LoadSmiLiteral(Register dst, Tagged<Smi> smi) {
2913 mov(dst, Operand(smi));
2914}
2915
2917 base::Double value, Register scratch) {
2919 !(scratch == r0 && ConstantPoolAccessIsInOverflow())) {
2921 if (access == ConstantPoolEntry::OVERFLOWED) {
2923 lfd(result, MemOperand(scratch, 0));
2924 } else {
2926 }
2927 return;
2928 }
2929
2930 // avoid gcc strict aliasing error using union cast
2931 union {
2932 uint64_t dval;
2933 intptr_t ival;
2934 } litVal;
2935
2936 litVal.dval = value.AsUint64();
2937
2938 mov(scratch, Operand(litVal.ival));
2939 mtfprd(result, scratch);
2940}
2941
2942void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
2943 Register scratch) {
2944 // sign-extend src to 64-bit
2945 mtfprwa(dst, src);
2946}
2947
2949 Register scratch) {
2950 // zero-extend src to 64-bit
2951 mtfprwz(dst, src);
2952}
2953
2955 Register src) {
2956 mtfprd(dst, src);
2957}
2958
2960 Register src_hi,
2961 Register src_lo,
2962 Register scratch) {
2963 ShiftLeftU64(scratch, src_hi, Operand(32));
2964 rldimi(scratch, src_lo, 0, 32);
2965 mtfprd(dst, scratch);
2966}
2967
2968void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
2969 Register scratch) {
2970 mffprd(scratch, dst);
2971 rldimi(scratch, src, 0, 32);
2972 mtfprd(dst, scratch);
2973}
2974
2976 Register scratch) {
2977 mffprd(scratch, dst);
2978 rldimi(scratch, src, 32, 0);
2979 mtfprd(dst, scratch);
2980}
2981
2982void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
2983 mffprwz(dst, src);
2984}
2985
2986void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
2987 mffprd(dst, src);
2988 srdi(dst, dst, Operand(32));
2989}
2990
2991void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
2992 mffprd(dst, src);
2993}
2994
2995void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src,
2996 Register scratch) {
2997 ShiftLeftU64(scratch, src, Operand(32));
2998 mtfprd(dst, scratch);
2999 xscvspdpn(dst, dst);
3000}
3001
3002void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src,
3003 DoubleRegister scratch) {
3004 xscvdpspn(scratch, src);
3005 mffprwz(dst, scratch);
3006}
3007
3008void MacroAssembler::AddS64(Register dst, Register src, Register value, OEBit s,
3009 RCBit r) {
3010 add(dst, src, value, s, r);
3011}
3012
3013void MacroAssembler::AddS64(Register dst, Register src, const Operand& value,
3014 Register scratch, OEBit s, RCBit r) {
3015 if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) {
3016 addi(dst, src, value);
3017 } else {
3018 mov(scratch, value);
3019 add(dst, src, scratch, s, r);
3020 }
3021}
3022
3023void MacroAssembler::SubS64(Register dst, Register src, Register value, OEBit s,
3024 RCBit r) {
3025 sub(dst, src, value, s, r);
3026}
3027
3028void MacroAssembler::SubS64(Register dst, Register src, const Operand& value,
3029 Register scratch, OEBit s, RCBit r) {
3030 if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) {
3031 subi(dst, src, value);
3032 } else {
3033 mov(scratch, value);
3034 sub(dst, src, scratch, s, r);
3035 }
3036}
3037
3038void MacroAssembler::AddS32(Register dst, Register src, Register value,
3039 RCBit r) {
3040 AddS64(dst, src, value, LeaveOE, r);
3041 extsw(dst, dst, r);
3042}
3043
3044void MacroAssembler::AddS32(Register dst, Register src, const Operand& value,
3045 Register scratch, RCBit r) {
3046 AddS64(dst, src, value, scratch, LeaveOE, r);
3047 extsw(dst, dst, r);
3048}
3049
3050void MacroAssembler::SubS32(Register dst, Register src, Register value,
3051 RCBit r) {
3052 SubS64(dst, src, value, LeaveOE, r);
3053 extsw(dst, dst, r);
3054}
3055
3056void MacroAssembler::SubS32(Register dst, Register src, const Operand& value,
3057 Register scratch, RCBit r) {
3058 SubS64(dst, src, value, scratch, LeaveOE, r);
3059 extsw(dst, dst, r);
3060}
3061
3062void MacroAssembler::MulS64(Register dst, Register src, const Operand& value,
3063 Register scratch, OEBit s, RCBit r) {
3064 if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) {
3065 mulli(dst, src, value);
3066 } else {
3067 mov(scratch, value);
3068 mulld(dst, src, scratch, s, r);
3069 }
3070}
3071
3072void MacroAssembler::MulS64(Register dst, Register src, Register value, OEBit s,
3073 RCBit r) {
3074 mulld(dst, src, value, s, r);
3075}
3076
3077void MacroAssembler::MulS32(Register dst, Register src, const Operand& value,
3078 Register scratch, OEBit s, RCBit r) {
3079 MulS64(dst, src, value, scratch, s, r);
3080 extsw(dst, dst, r);
3081}
3082
3083void MacroAssembler::MulS32(Register dst, Register src, Register value, OEBit s,
3084 RCBit r) {
3085 MulS64(dst, src, value, s, r);
3086 extsw(dst, dst, r);
3087}
3088
3089void MacroAssembler::DivS64(Register dst, Register src, Register value, OEBit s,
3090 RCBit r) {
3091 divd(dst, src, value, s, r);
3092}
3093
3094void MacroAssembler::DivU64(Register dst, Register src, Register value, OEBit s,
3095 RCBit r) {
3096 divdu(dst, src, value, s, r);
3097}
3098
3099void MacroAssembler::DivS32(Register dst, Register src, Register value, OEBit s,
3100 RCBit r) {
3101 divw(dst, src, value, s, r);
3102 extsw(dst, dst);
3103}
3104void MacroAssembler::DivU32(Register dst, Register src, Register value, OEBit s,
3105 RCBit r) {
3106 divwu(dst, src, value, s, r);
3107 ZeroExtWord32(dst, dst);
3108}
3109
3110void MacroAssembler::ModS64(Register dst, Register src, Register value) {
3111 if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
3112 modsd(dst, src, value);
3113 } else {
3114 Register scratch = GetRegisterThatIsNotOneOf(dst, src, value);
3115 Push(scratch);
3116 divd(scratch, src, value);
3117 mulld(scratch, scratch, value);
3118 sub(dst, src, scratch);
3119 Pop(scratch);
3120 }
3121}
3122
3123void MacroAssembler::ModU64(Register dst, Register src, Register value) {
3124 if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
3125 modud(dst, src, value);
3126 } else {
3127 Register scratch = GetRegisterThatIsNotOneOf(dst, src, value);
3128 Push(scratch);
3129 divdu(scratch, src, value);
3130 mulld(scratch, scratch, value);
3131 sub(dst, src, scratch);
3132 Pop(scratch);
3133 }
3134}
3135
3136void MacroAssembler::ModS32(Register dst, Register src, Register value) {
3137 if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
3138 modsw(dst, src, value);
3139 } else {
3140 Register scratch = GetRegisterThatIsNotOneOf(dst, src, value);
3141 Push(scratch);
3142 divw(scratch, src, value);
3143 mullw(scratch, scratch, value);
3144 sub(dst, src, scratch);
3145 Pop(scratch);
3146 }
3147 extsw(dst, dst);
3148}
3149void MacroAssembler::ModU32(Register dst, Register src, Register value) {
3150 if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
3151 moduw(dst, src, value);
3152 } else {
3153 Register scratch = GetRegisterThatIsNotOneOf(dst, src, value);
3154 Push(scratch);
3155 divwu(scratch, src, value);
3156 mullw(scratch, scratch, value);
3157 sub(dst, src, scratch);
3158 Pop(scratch);
3159 }
3160 ZeroExtWord32(dst, dst);
3161}
3162
3163void MacroAssembler::AndU64(Register dst, Register src, const Operand& value,
3164 Register scratch, RCBit r) {
3165 if (is_uint16(value.immediate()) && r == SetRC) {
3166 andi(dst, src, value);
3167 } else {
3168 mov(scratch, value);
3169 and_(dst, src, scratch, r);
3170 }
3171}
3172
3173void MacroAssembler::AndU64(Register dst, Register src, Register value,
3174 RCBit r) {
3175 and_(dst, src, value, r);
3176}
3177
3178void MacroAssembler::OrU64(Register dst, Register src, const Operand& value,
3179 Register scratch, RCBit r) {
3180 if (is_int16(value.immediate()) && r == LeaveRC) {
3181 ori(dst, src, value);
3182 } else {
3183 mov(scratch, value);
3184 orx(dst, src, scratch, r);
3185 }
3186}
3187
3188void MacroAssembler::OrU64(Register dst, Register src, Register value,
3189 RCBit r) {
3190 orx(dst, src, value, r);
3191}
3192
3193void MacroAssembler::XorU64(Register dst, Register src, const Operand& value,
3194 Register scratch, RCBit r) {
3195 if (is_int16(value.immediate()) && r == LeaveRC) {
3196 xori(dst, src, value);
3197 } else {
3198 mov(scratch, value);
3199 xor_(dst, src, scratch, r);
3200 }
3201}
3202
3203void MacroAssembler::XorU64(Register dst, Register src, Register value,
3204 RCBit r) {
3205 xor_(dst, src, value, r);
3206}
3207
3208void MacroAssembler::AndU32(Register dst, Register src, const Operand& value,
3209 Register scratch, RCBit r) {
3210 AndU64(dst, src, value, scratch, r);
3211 extsw(dst, dst, r);
3212}
3213
3214void MacroAssembler::AndU32(Register dst, Register src, Register value,
3215 RCBit r) {
3216 AndU64(dst, src, value, r);
3217 extsw(dst, dst, r);
3218}
3219
3220void MacroAssembler::OrU32(Register dst, Register src, const Operand& value,
3221 Register scratch, RCBit r) {
3222 OrU64(dst, src, value, scratch, r);
3223 extsw(dst, dst, r);
3224}
3225
3226void MacroAssembler::OrU32(Register dst, Register src, Register value,
3227 RCBit r) {
3228 OrU64(dst, src, value, r);
3229 extsw(dst, dst, r);
3230}
3231
3232void MacroAssembler::XorU32(Register dst, Register src, const Operand& value,
3233 Register scratch, RCBit r) {
3234 XorU64(dst, src, value, scratch, r);
3235 extsw(dst, dst, r);
3236}
3237
3238void MacroAssembler::XorU32(Register dst, Register src, Register value,
3239 RCBit r) {
3240 XorU64(dst, src, value, r);
3241 extsw(dst, dst, r);
3242}
3243
3244void MacroAssembler::ShiftLeftU64(Register dst, Register src,
3245 const Operand& value, RCBit r) {
3246 sldi(dst, src, value, r);
3247}
3248
3249void MacroAssembler::ShiftRightU64(Register dst, Register src,
3250 const Operand& value, RCBit r) {
3251 srdi(dst, src, value, r);
3252}
3253
3254void MacroAssembler::ShiftRightS64(Register dst, Register src,
3255 const Operand& value, RCBit r) {
3256 sradi(dst, src, value.immediate(), r);
3257}
3258
3259void MacroAssembler::ShiftLeftU32(Register dst, Register src,
3260 const Operand& value, RCBit r) {
3261 slwi(dst, src, value, r);
3262}
3263
3264void MacroAssembler::ShiftRightU32(Register dst, Register src,
3265 const Operand& value, RCBit r) {
3266 srwi(dst, src, value, r);
3267}
3268
3269void MacroAssembler::ShiftRightS32(Register dst, Register src,
3270 const Operand& value, RCBit r) {
3271 srawi(dst, src, value.immediate(), r);
3272}
3273
3274void MacroAssembler::ShiftLeftU64(Register dst, Register src, Register value,
3275 RCBit r) {
3276 sld(dst, src, value, r);
3277}
3278
3279void MacroAssembler::ShiftRightU64(Register dst, Register src, Register value,
3280 RCBit r) {
3281 srd(dst, src, value, r);
3282}
3283
3284void MacroAssembler::ShiftRightS64(Register dst, Register src, Register value,
3285 RCBit r) {
3286 srad(dst, src, value, r);
3287}
3288
3289void MacroAssembler::ShiftLeftU32(Register dst, Register src, Register value,
3290 RCBit r) {
3291 slw(dst, src, value, r);
3292}
3293
3294void MacroAssembler::ShiftRightU32(Register dst, Register src, Register value,
3295 RCBit r) {
3296 srw(dst, src, value, r);
3297}
3298
3299void MacroAssembler::ShiftRightS32(Register dst, Register src, Register value,
3300 RCBit r) {
3301 sraw(dst, src, value, r);
3302}
3303
3304void MacroAssembler::CmpS64(Register src1, Register src2, CRegister cr) {
3305 cmp(src1, src2, cr);
3306}
3307
3308void MacroAssembler::CmpS64(Register src1, const Operand& src2,
3309 Register scratch, CRegister cr) {
3310 intptr_t value = src2.immediate();
3311 if (is_int16(value)) {
3312 cmpi(src1, src2, cr);
3313 } else {
3314 mov(scratch, src2);
3315 CmpS64(src1, scratch, cr);
3316 }
3317}
3318
3319void MacroAssembler::CmpU64(Register src1, const Operand& src2,
3320 Register scratch, CRegister cr) {
3321 intptr_t value = src2.immediate();
3322 if (is_uint16(value)) {
3323 cmpli(src1, src2, cr);
3324 } else {
3325 mov(scratch, src2);
3326 CmpU64(src1, scratch, cr);
3327 }
3328}
3329
3330void MacroAssembler::CmpU64(Register src1, Register src2, CRegister cr) {
3331 cmpl(src1, src2, cr);
3332}
3333
3334void MacroAssembler::CmpS32(Register src1, const Operand& src2,
3335 Register scratch, CRegister cr) {
3336 intptr_t value = src2.immediate();
3337 if (is_int16(value)) {
3338 cmpwi(src1, src2, cr);
3339 } else {
3340 mov(scratch, src2);
3341 CmpS32(src1, scratch, cr);
3342 }
3343}
3344
3345void MacroAssembler::CmpS32(Register src1, Register src2, CRegister cr) {
3346 cmpw(src1, src2, cr);
3347}
3348
3349void MacroAssembler::CmpU32(Register src1, const Operand& src2,
3350 Register scratch, CRegister cr) {
3351 intptr_t value = src2.immediate();
3352 if (is_uint16(value)) {
3353 cmplwi(src1, src2, cr);
3354 } else {
3355 mov(scratch, src2);
3356 cmplw(src1, scratch, cr);
3357 }
3358}
3359
3360void MacroAssembler::CmpU32(Register src1, Register src2, CRegister cr) {
3361 cmplw(src1, src2, cr);
3362}
3363
3365 DoubleRegister rhs, RCBit r) {
3366 fadd(dst, lhs, rhs, r);
3367}
3368
3370 DoubleRegister rhs, RCBit r) {
3371 fsub(dst, lhs, rhs, r);
3372}
3373
3375 DoubleRegister rhs, RCBit r) {
3376 fmul(dst, lhs, rhs, r);
3377}
3378
3380 DoubleRegister rhs, RCBit r) {
3381 fdiv(dst, lhs, rhs, r);
3382}
3383
3385 DoubleRegister rhs, RCBit r) {
3386 fadd(dst, lhs, rhs, r);
3387 frsp(dst, dst, r);
3388}
3389
3391 DoubleRegister rhs, RCBit r) {
3392 fsub(dst, lhs, rhs, r);
3393 frsp(dst, dst, r);
3394}
3395
3397 DoubleRegister rhs, RCBit r) {
3398 fmul(dst, lhs, rhs, r);
3399 frsp(dst, dst, r);
3400}
3401
3403 DoubleRegister rhs, RCBit r) {
3404 fdiv(dst, lhs, rhs, r);
3405 frsp(dst, dst, r);
3406}
3407
3409 DoubleRegister rhs, RCBit r) {
3410 fcpsgn(dst, rhs, lhs, r);
3411}
3412
3413void MacroAssembler::CmpSmiLiteral(Register src1, Tagged<Smi> smi,
3414 Register scratch, CRegister cr) {
3415#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3416 CmpS32(src1, Operand(smi), scratch, cr);
3417#else
3418 LoadSmiLiteral(scratch, smi);
3419 CmpS64(src1, scratch, cr);
3420#endif
3421}
3422
3423void MacroAssembler::CmplSmiLiteral(Register src1, Tagged<Smi> smi,
3424 Register scratch, CRegister cr) {
3425#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3426 CmpU64(src1, Operand(smi), scratch, cr);
3427#else
3428 LoadSmiLiteral(scratch, smi);
3429 CmpU64(src1, scratch, cr);
3430#endif
3431}
3432
3433void MacroAssembler::AddSmiLiteral(Register dst, Register src, Tagged<Smi> smi,
3434 Register scratch) {
3435#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3436 AddS64(dst, src, Operand(smi.ptr()), scratch);
3437#else
3438 LoadSmiLiteral(scratch, smi);
3439 add(dst, src, scratch);
3440#endif
3441}
3442
3443void MacroAssembler::SubSmiLiteral(Register dst, Register src, Tagged<Smi> smi,
3444 Register scratch) {
3445#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3446 AddS64(dst, src, Operand(-(static_cast<intptr_t>(smi.ptr()))), scratch);
3447#else
3448 LoadSmiLiteral(scratch, smi);
3449 sub(dst, src, scratch);
3450#endif
3451}
3452
3453void MacroAssembler::AndSmiLiteral(Register dst, Register src, Tagged<Smi> smi,
3454 Register scratch, RCBit rc) {
3455#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3456 AndU64(dst, src, Operand(smi), scratch, rc);
3457#else
3458 LoadSmiLiteral(scratch, smi);
3459 and_(dst, src, scratch, rc);
3460#endif
3461}
3462
3463#define GenerateMemoryOperation(reg, mem, ri_op, rr_op) \
3464 { \
3465 int64_t offset = mem.offset(); \
3466 \
3467 if (mem.rb() == no_reg) { \
3468 if (!is_int16(offset)) { \
3469 /* cannot use d-form */ \
3470 CHECK_NE(scratch, no_reg); \
3471 mov(scratch, Operand(offset)); \
3472 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3473 } else { \
3474 ri_op(reg, mem); \
3475 } \
3476 } else { \
3477 if (offset == 0) { \
3478 rr_op(reg, mem); \
3479 } else if (is_int16(offset)) { \
3480 CHECK_NE(scratch, no_reg); \
3481 addi(scratch, mem.rb(), Operand(offset)); \
3482 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3483 } else { \
3484 CHECK_NE(scratch, no_reg); \
3485 mov(scratch, Operand(offset)); \
3486 add(scratch, scratch, mem.rb()); \
3487 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3488 } \
3489 } \
3490 }
3491
3492#define GenerateMemoryOperationRR(reg, mem, op) \
3493 { \
3494 if (mem.offset() == 0) { \
3495 if (mem.rb() != no_reg) \
3496 op(reg, mem); \
3497 else \
3498 op(reg, MemOperand(r0, mem.ra())); \
3499 } else if (is_int16(mem.offset())) { \
3500 if (mem.rb() != no_reg) \
3501 addi(scratch, mem.rb(), Operand(mem.offset())); \
3502 else \
3503 mov(scratch, Operand(mem.offset())); \
3504 op(reg, MemOperand(mem.ra(), scratch)); \
3505 } else { \
3506 mov(scratch, Operand(mem.offset())); \
3507 if (mem.rb() != no_reg) add(scratch, scratch, mem.rb()); \
3508 op(reg, MemOperand(mem.ra(), scratch)); \
3509 } \
3510 }
3511
3512#define GenerateMemoryOperationPrefixed(reg, mem, ri_op, rip_op, rr_op) \
3513 { \
3514 int64_t offset = mem.offset(); \
3515 \
3516 if (mem.rb() == no_reg) { \
3517 if (is_int16(offset)) { \
3518 ri_op(reg, mem); \
3519 } else if (is_int34(offset) && CpuFeatures::IsSupported(PPC_10_PLUS)) { \
3520 rip_op(reg, mem); \
3521 } else { \
3522 /* cannot use d-form */ \
3523 CHECK_NE(scratch, no_reg); \
3524 mov(scratch, Operand(offset)); \
3525 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3526 } \
3527 } else { \
3528 if (offset == 0) { \
3529 rr_op(reg, mem); \
3530 } else if (is_int16(offset)) { \
3531 CHECK_NE(scratch, no_reg); \
3532 addi(scratch, mem.rb(), Operand(offset)); \
3533 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3534 } else { \
3535 CHECK_NE(scratch, no_reg); \
3536 mov(scratch, Operand(offset)); \
3537 add(scratch, scratch, mem.rb()); \
3538 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3539 } \
3540 } \
3541 }
3542
3543#define GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op) \
3544 { \
3545 int64_t offset = mem.offset(); \
3546 int misaligned = (offset & 3); \
3547 \
3548 if (mem.rb() == no_reg) { \
3549 if (!is_int16(offset) || misaligned) { \
3550 /* cannot use d-form */ \
3551 CHECK_NE(scratch, no_reg); \
3552 mov(scratch, Operand(offset)); \
3553 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3554 } else { \
3555 ri_op(reg, mem); \
3556 } \
3557 } else { \
3558 if (offset == 0) { \
3559 rr_op(reg, mem); \
3560 } else if (is_int16(offset)) { \
3561 CHECK_NE(scratch, no_reg); \
3562 addi(scratch, mem.rb(), Operand(offset)); \
3563 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3564 } else { \
3565 CHECK_NE(scratch, no_reg); \
3566 mov(scratch, Operand(offset)); \
3567 add(scratch, scratch, mem.rb()); \
3568 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3569 } \
3570 } \
3571 }
3572
3573#define GenerateMemoryOperationWithAlignPrefixed(reg, mem, ri_op, rip_op, \
3574 rr_op) \
3575 { \
3576 int64_t offset = mem.offset(); \
3577 int misaligned = (offset & 3); \
3578 \
3579 if (mem.rb() == no_reg) { \
3580 if (is_int16(offset) && !misaligned) { \
3581 ri_op(reg, mem); \
3582 } else if (is_int34(offset) && CpuFeatures::IsSupported(PPC_10_PLUS)) { \
3583 rip_op(reg, mem); \
3584 } else { \
3585 /* cannot use d-form */ \
3586 CHECK_NE(scratch, no_reg); \
3587 mov(scratch, Operand(offset)); \
3588 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3589 } \
3590 } else { \
3591 if (offset == 0) { \
3592 rr_op(reg, mem); \
3593 } else if (is_int16(offset)) { \
3594 CHECK_NE(scratch, no_reg); \
3595 addi(scratch, mem.rb(), Operand(offset)); \
3596 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3597 } else { \
3598 CHECK_NE(scratch, no_reg); \
3599 mov(scratch, Operand(offset)); \
3600 add(scratch, scratch, mem.rb()); \
3601 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3602 } \
3603 } \
3604 }
3605
3606#define MEM_OP_WITH_ALIGN_LIST(V) \
3607 V(LoadU64WithUpdate, ldu, ldux) \
3608 V(StoreU64WithUpdate, stdu, stdux)
3609
3610#define MEM_OP_WITH_ALIGN_FUNCTION(name, ri_op, rr_op) \
3611 void MacroAssembler::name(Register reg, const MemOperand& mem, \
3612 Register scratch) { \
3613 GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op); \
3614 }
3615MEM_OP_WITH_ALIGN_LIST(MEM_OP_WITH_ALIGN_FUNCTION)
3616#undef MEM_OP_WITH_ALIGN_LIST
3617#undef MEM_OP_WITH_ALIGN_FUNCTION
3618
3619#define MEM_OP_WITH_ALIGN_PREFIXED_LIST(V) \
3620 V(LoadS32, lwa, plwa, lwax) \
3621 V(LoadU64, ld, pld, ldx) \
3622 V(StoreU64, std, pstd, stdx)
3623
3624#define MEM_OP_WITH_ALIGN_PREFIXED_FUNCTION(name, ri_op, rip_op, rr_op) \
3625 void MacroAssembler::name(Register reg, const MemOperand& mem, \
3626 Register scratch) { \
3627 GenerateMemoryOperationWithAlignPrefixed(reg, mem, ri_op, rip_op, rr_op); \
3628 }
3629MEM_OP_WITH_ALIGN_PREFIXED_LIST(MEM_OP_WITH_ALIGN_PREFIXED_FUNCTION)
3630#undef MEM_OP_WITH_ALIGN_PREFIXED_LIST
3631#undef MEM_OP_WITH_ALIGN_PREFIXED_FUNCTION
3632
3633#define MEM_OP_LIST(V) \
3634 V(LoadF64WithUpdate, DoubleRegister, lfdu, lfdux) \
3635 V(LoadF32WithUpdate, DoubleRegister, lfsu, lfsux) \
3636 V(StoreF64WithUpdate, DoubleRegister, stfdu, stfdux) \
3637 V(StoreF32WithUpdate, DoubleRegister, stfsu, stfsux)
3638
3639#define MEM_OP_FUNCTION(name, result_t, ri_op, rr_op) \
3640 void MacroAssembler::name(result_t reg, const MemOperand& mem, \
3641 Register scratch) { \
3642 GenerateMemoryOperation(reg, mem, ri_op, rr_op); \
3643 }
3644MEM_OP_LIST(MEM_OP_FUNCTION)
3645#undef MEM_OP_LIST
3646#undef MEM_OP_FUNCTION
3647
3648#define MEM_OP_PREFIXED_LIST(V) \
3649 V(LoadU32, Register, lwz, plwz, lwzx) \
3650 V(LoadS16, Register, lha, plha, lhax) \
3651 V(LoadU16, Register, lhz, plhz, lhzx) \
3652 V(LoadU8, Register, lbz, plbz, lbzx) \
3653 V(StoreU32, Register, stw, pstw, stwx) \
3654 V(StoreU16, Register, sth, psth, sthx) \
3655 V(StoreU8, Register, stb, pstb, stbx) \
3656 V(LoadF64, DoubleRegister, lfd, plfd, lfdx) \
3657 V(LoadF32, DoubleRegister, lfs, plfs, lfsx) \
3658 V(StoreF64, DoubleRegister, stfd, pstfd, stfdx) \
3659 V(StoreF32, DoubleRegister, stfs, pstfs, stfsx)
3660
3661#define MEM_OP_PREFIXED_FUNCTION(name, result_t, ri_op, rip_op, rr_op) \
3662 void MacroAssembler::name(result_t reg, const MemOperand& mem, \
3663 Register scratch) { \
3664 GenerateMemoryOperationPrefixed(reg, mem, ri_op, rip_op, rr_op); \
3665 }
3666MEM_OP_PREFIXED_LIST(MEM_OP_PREFIXED_FUNCTION)
3667#undef MEM_OP_PREFIXED_LIST
3668#undef MEM_OP_PREFIXED_FUNCTION
3669
3670#define MEM_OP_SIMD_LIST(V) \
3671 V(LoadSimd128, lxvx) \
3672 V(StoreSimd128, stxvx) \
3673 V(LoadSimd128Uint64, lxsdx) \
3674 V(LoadSimd128Uint32, lxsiwzx) \
3675 V(LoadSimd128Uint16, lxsihzx) \
3676 V(LoadSimd128Uint8, lxsibzx) \
3677 V(StoreSimd128Uint64, stxsdx) \
3678 V(StoreSimd128Uint32, stxsiwx) \
3679 V(StoreSimd128Uint16, stxsihx) \
3680 V(StoreSimd128Uint8, stxsibx)
3681
3682#define MEM_OP_SIMD_FUNCTION(name, rr_op) \
3683 void MacroAssembler::name(Simd128Register reg, const MemOperand& mem, \
3684 Register scratch) { \
3685 GenerateMemoryOperationRR(reg, mem, rr_op); \
3686 }
3687MEM_OP_SIMD_LIST(MEM_OP_SIMD_FUNCTION)
3688#undef MEM_OP_SIMD_LIST
3689#undef MEM_OP_SIMD_FUNCTION
3690
3691void MacroAssembler::LoadS8(Register dst, const MemOperand& mem,
3692 Register scratch) {
3693 LoadU8(dst, mem, scratch);
3694 extsb(dst, dst);
3695}
3696
3697#define MEM_LE_OP_LIST(V) \
3698 V(LoadU64, ldbrx) \
3699 V(LoadU32, lwbrx) \
3700 V(LoadU16, lhbrx) \
3701 V(StoreU64, stdbrx) \
3702 V(StoreU32, stwbrx) \
3703 V(StoreU16, sthbrx)
3704
3705#ifdef V8_TARGET_BIG_ENDIAN
3706#define MEM_LE_OP_FUNCTION(name, op) \
3707 void MacroAssembler::name##LE(Register reg, const MemOperand& mem, \
3708 Register scratch) { \
3709 GenerateMemoryOperationRR(reg, mem, op); \
3710 }
3711#else
3712#define MEM_LE_OP_FUNCTION(name, op) \
3713 void MacroAssembler::name##LE(Register reg, const MemOperand& mem, \
3714 Register scratch) { \
3715 name(reg, mem, scratch); \
3716 }
3717#endif
3718
3719MEM_LE_OP_LIST(MEM_LE_OP_FUNCTION)
3720#undef MEM_LE_OP_FUNCTION
3721#undef MEM_LE_OP_LIST
3722
3723void MacroAssembler::LoadS32LE(Register dst, const MemOperand& mem,
3724 Register scratch) {
3725#ifdef V8_TARGET_BIG_ENDIAN
3726 LoadU32LE(dst, mem, scratch);
3727 extsw(dst, dst);
3728#else
3729 LoadS32(dst, mem, scratch);
3730#endif
3731}
3732
3733void MacroAssembler::LoadS16LE(Register dst, const MemOperand& mem,
3734 Register scratch) {
3735#ifdef V8_TARGET_BIG_ENDIAN
3736 LoadU16LE(dst, mem, scratch);
3737 extsh(dst, dst);
3738#else
3739 LoadS16(dst, mem, scratch);
3740#endif
3741}
3742
3744 Register scratch, Register scratch2) {
3745#ifdef V8_TARGET_BIG_ENDIAN
3746 LoadU64LE(scratch, mem, scratch2);
3747 push(scratch);
3748 LoadF64(dst, MemOperand(sp), scratch2);
3749 pop(scratch);
3750#else
3751 LoadF64(dst, mem, scratch);
3752#endif
3753}
3754
3756 Register scratch, Register scratch2) {
3757#ifdef V8_TARGET_BIG_ENDIAN
3758 LoadU32LE(scratch, mem, scratch2);
3759 push(scratch);
3760 LoadF32(dst, MemOperand(sp, 4), scratch2);
3761 pop(scratch);
3762#else
3763 LoadF32(dst, mem, scratch);
3764#endif
3765}
3766
3768 Register scratch, Register scratch2) {
3769#ifdef V8_TARGET_BIG_ENDIAN
3770 StoreF64(dst, mem, scratch2);
3771 LoadU64(scratch, mem, scratch2);
3772 StoreU64LE(scratch, mem, scratch2);
3773#else
3774 StoreF64(dst, mem, scratch);
3775#endif
3776}
3777
3779 Register scratch, Register scratch2) {
3780#ifdef V8_TARGET_BIG_ENDIAN
3781 StoreF32(dst, mem, scratch2);
3782 LoadU32(scratch, mem, scratch2);
3783 StoreU32LE(scratch, mem, scratch2);
3784#else
3785 StoreF32(dst, mem, scratch);
3786#endif
3787}
3788
3789// Simd Support.
3790#define SIMD_BINOP_LIST(V) \
3791 V(F64x2Add, xvadddp) \
3792 V(F64x2Sub, xvsubdp) \
3793 V(F64x2Mul, xvmuldp) \
3794 V(F64x2Div, xvdivdp) \
3795 V(F64x2Eq, xvcmpeqdp) \
3796 V(F32x4Add, vaddfp) \
3797 V(F32x4Sub, vsubfp) \
3798 V(F32x4Mul, xvmulsp) \
3799 V(F32x4Div, xvdivsp) \
3800 V(F32x4Min, vminfp) \
3801 V(F32x4Max, vmaxfp) \
3802 V(F32x4Eq, xvcmpeqsp) \
3803 V(I64x2Add, vaddudm) \
3804 V(I64x2Sub, vsubudm) \
3805 V(I64x2Eq, vcmpequd) \
3806 V(I64x2GtS, vcmpgtsd) \
3807 V(I32x4Add, vadduwm) \
3808 V(I32x4Sub, vsubuwm) \
3809 V(I32x4Mul, vmuluwm) \
3810 V(I32x4MinS, vminsw) \
3811 V(I32x4MinU, vminuw) \
3812 V(I32x4MaxS, vmaxsw) \
3813 V(I32x4MaxU, vmaxuw) \
3814 V(I32x4Eq, vcmpequw) \
3815 V(I32x4GtS, vcmpgtsw) \
3816 V(I32x4GtU, vcmpgtuw) \
3817 V(I16x8Add, vadduhm) \
3818 V(I16x8Sub, vsubuhm) \
3819 V(I16x8MinS, vminsh) \
3820 V(I16x8MinU, vminuh) \
3821 V(I16x8MaxS, vmaxsh) \
3822 V(I16x8MaxU, vmaxuh) \
3823 V(I16x8Eq, vcmpequh) \
3824 V(I16x8GtS, vcmpgtsh) \
3825 V(I16x8GtU, vcmpgtuh) \
3826 V(I16x8AddSatS, vaddshs) \
3827 V(I16x8SubSatS, vsubshs) \
3828 V(I16x8AddSatU, vadduhs) \
3829 V(I16x8SubSatU, vsubuhs) \
3830 V(I16x8RoundingAverageU, vavguh) \
3831 V(I8x16Add, vaddubm) \
3832 V(I8x16Sub, vsububm) \
3833 V(I8x16MinS, vminsb) \
3834 V(I8x16MinU, vminub) \
3835 V(I8x16MaxS, vmaxsb) \
3836 V(I8x16MaxU, vmaxub) \
3837 V(I8x16Eq, vcmpequb) \
3838 V(I8x16GtS, vcmpgtsb) \
3839 V(I8x16GtU, vcmpgtub) \
3840 V(I8x16AddSatS, vaddsbs) \
3841 V(I8x16SubSatS, vsubsbs) \
3842 V(I8x16AddSatU, vaddubs) \
3843 V(I8x16SubSatU, vsububs) \
3844 V(I8x16RoundingAverageU, vavgub) \
3845 V(S128And, vand) \
3846 V(S128Or, vor) \
3847 V(S128Xor, vxor) \
3848 V(S128AndNot, vandc)
3849
3850#define EMIT_SIMD_BINOP(name, op) \
3851 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
3852 Simd128Register src2) { \
3853 op(dst, src1, src2); \
3854 }
3856#undef EMIT_SIMD_BINOP
3857#undef SIMD_BINOP_LIST
3858
3859#define SIMD_SHIFT_LIST(V) \
3860 V(I64x2Shl, vsld) \
3861 V(I64x2ShrS, vsrad) \
3862 V(I64x2ShrU, vsrd) \
3863 V(I32x4Shl, vslw) \
3864 V(I32x4ShrS, vsraw) \
3865 V(I32x4ShrU, vsrw) \
3866 V(I16x8Shl, vslh) \
3867 V(I16x8ShrS, vsrah) \
3868 V(I16x8ShrU, vsrh) \
3869 V(I8x16Shl, vslb) \
3870 V(I8x16ShrS, vsrab) \
3871 V(I8x16ShrU, vsrb)
3872
3873#define EMIT_SIMD_SHIFT(name, op) \
3874 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
3875 Register src2, Simd128Register scratch) { \
3876 mtvsrd(scratch, src2); \
3877 vspltb(scratch, scratch, Operand(7)); \
3878 op(dst, src1, scratch); \
3879 } \
3880 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
3881 const Operand& src2, Register scratch1, \
3882 Simd128Register scratch2) { \
3883 mov(scratch1, src2); \
3884 name(dst, src1, scratch1, scratch2); \
3885 }
3887#undef EMIT_SIMD_SHIFT
3888#undef SIMD_SHIFT_LIST
3889
3890#define SIMD_UNOP_LIST(V) \
3891 V(F64x2Abs, xvabsdp) \
3892 V(F64x2Neg, xvnegdp) \
3893 V(F64x2Sqrt, xvsqrtdp) \
3894 V(F64x2Ceil, xvrdpip) \
3895 V(F64x2Floor, xvrdpim) \
3896 V(F64x2Trunc, xvrdpiz) \
3897 V(F32x4Abs, xvabssp) \
3898 V(F32x4Neg, xvnegsp) \
3899 V(F32x4Sqrt, xvsqrtsp) \
3900 V(F32x4Ceil, xvrspip) \
3901 V(F32x4Floor, xvrspim) \
3902 V(F32x4Trunc, xvrspiz) \
3903 V(F32x4SConvertI32x4, xvcvsxwsp) \
3904 V(F32x4UConvertI32x4, xvcvuxwsp) \
3905 V(I64x2Neg, vnegd) \
3906 V(I64x2SConvertI32x4Low, vupklsw) \
3907 V(I64x2SConvertI32x4High, vupkhsw) \
3908 V(I32x4Neg, vnegw) \
3909 V(I32x4SConvertI16x8Low, vupklsh) \
3910 V(I32x4SConvertI16x8High, vupkhsh) \
3911 V(I32x4UConvertF32x4, xvcvspuxws) \
3912 V(I16x8SConvertI8x16Low, vupklsb) \
3913 V(I16x8SConvertI8x16High, vupkhsb) \
3914 V(I8x16Popcnt, vpopcntb)
3915
3916#define EMIT_SIMD_UNOP(name, op) \
3917 void MacroAssembler::name(Simd128Register dst, Simd128Register src) { \
3918 op(dst, src); \
3919 }
3921#undef EMIT_SIMD_UNOP
3922#undef SIMD_UNOP_LIST
3923
3924#define EXT_MUL(dst_even, dst_odd, mul_even, mul_odd) \
3925 mul_even(dst_even, src1, src2); \
3926 mul_odd(dst_odd, src1, src2);
3927#define SIMD_EXT_MUL_LIST(V) \
3928 V(I32x4ExtMulLowI16x8S, vmulesh, vmulosh, vmrglw) \
3929 V(I32x4ExtMulHighI16x8S, vmulesh, vmulosh, vmrghw) \
3930 V(I32x4ExtMulLowI16x8U, vmuleuh, vmulouh, vmrglw) \
3931 V(I32x4ExtMulHighI16x8U, vmuleuh, vmulouh, vmrghw) \
3932 V(I16x8ExtMulLowI8x16S, vmulesb, vmulosb, vmrglh) \
3933 V(I16x8ExtMulHighI8x16S, vmulesb, vmulosb, vmrghh) \
3934 V(I16x8ExtMulLowI8x16U, vmuleub, vmuloub, vmrglh) \
3935 V(I16x8ExtMulHighI8x16U, vmuleub, vmuloub, vmrghh)
3936
3937#define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge) \
3938 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
3939 Simd128Register src2, Simd128Register scratch) { \
3940 EXT_MUL(scratch, dst, mul_even, mul_odd) \
3941 merge(dst, scratch, dst); \
3942 }
3944#undef EMIT_SIMD_EXT_MUL
3945#undef SIMD_EXT_MUL_LIST
3946
3947#define SIMD_ALL_TRUE_LIST(V) \
3948 V(I64x2AllTrue, vcmpgtud) \
3949 V(I32x4AllTrue, vcmpgtuw) \
3950 V(I16x8AllTrue, vcmpgtuh) \
3951 V(I8x16AllTrue, vcmpgtub)
3952
3953#define EMIT_SIMD_ALL_TRUE(name, op) \
3954 void MacroAssembler::name(Register dst, Simd128Register src, \
3955 Register scratch1, Register scratch2, \
3956 Simd128Register scratch3) { \
3957 constexpr uint8_t fxm = 0x2; /* field mask. */ \
3958 constexpr int bit_number = 24; \
3959 li(scratch1, Operand(0)); \
3960 li(scratch2, Operand(1)); \
3961 /* Check if all lanes > 0, if not then return false.*/ \
3962 vxor(scratch3, scratch3, scratch3); \
3963 mtcrf(scratch1, fxm); /* Clear cr6.*/ \
3964 op(scratch3, src, scratch3, SetRC); \
3965 isel(dst, scratch2, scratch1, bit_number); \
3966 }
3968#undef EMIT_SIMD_ALL_TRUE
3969#undef SIMD_ALL_TRUE_LIST
3970
3971#define SIMD_BITMASK_LIST(V) \
3972 V(I64x2BitMask, vextractdm, 0x8080808080800040) \
3973 V(I32x4BitMask, vextractwm, 0x8080808000204060) \
3974 V(I16x8BitMask, vextracthm, 0x10203040506070)
3975
3976#define EMIT_SIMD_BITMASK(name, op, indicies) \
3977 void MacroAssembler::name(Register dst, Simd128Register src, \
3978 Register scratch1, Simd128Register scratch2) { \
3979 if (CpuFeatures::IsSupported(PPC_10_PLUS)) { \
3980 op(dst, src); \
3981 } else { \
3982 mov(scratch1, Operand(indicies)); /* Select 0 for the high bits. */ \
3983 mtvsrd(scratch2, scratch1); \
3984 vbpermq(scratch2, src, scratch2); \
3985 vextractub(scratch2, scratch2, Operand(6)); \
3986 mfvsrd(dst, scratch2); \
3987 } \
3988 }
3989SIMD_BITMASK_LIST(EMIT_SIMD_BITMASK)
3990#undef EMIT_SIMD_BITMASK
3991#undef SIMD_BITMASK_LIST
3992
3993#define SIMD_QFM_LIST(V) \
3994 V(F64x2Qfma, xvmaddmdp) \
3995 V(F64x2Qfms, xvnmsubmdp) \
3996 V(F32x4Qfma, xvmaddmsp) \
3997 V(F32x4Qfms, xvnmsubmsp)
3998
3999#define EMIT_SIMD_QFM(name, op) \
4000 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
4001 Simd128Register src2, Simd128Register src3, \
4002 Simd128Register scratch) { \
4003 Simd128Register dest = dst; \
4004 if (dst != src1) { \
4005 vor(scratch, src1, src1); \
4006 dest = scratch; \
4007 } \
4008 op(dest, src2, src3); \
4009 if (dest != dst) { \
4010 vor(dst, dest, dest); \
4011 } \
4012 }
4014#undef EMIT_SIMD_QFM
4015#undef SIMD_QFM_LIST
4016
4017void MacroAssembler::I64x2ExtMulLowI32x4S(Simd128Register dst,
4018 Simd128Register src1,
4019 Simd128Register src2,
4020 Simd128Register scratch) {
4021 constexpr int lane_width_in_bytes = 8;
4022 EXT_MUL(scratch, dst, vmulesw, vmulosw)
4023 vextractd(scratch, scratch, Operand(1 * lane_width_in_bytes));
4024 vinsertd(dst, scratch, Operand(0));
4025}
4026
4027void MacroAssembler::I64x2ExtMulHighI32x4S(Simd128Register dst,
4028 Simd128Register src1,
4029 Simd128Register src2,
4030 Simd128Register scratch) {
4031 constexpr int lane_width_in_bytes = 8;
4032 EXT_MUL(scratch, dst, vmulesw, vmulosw)
4033 vinsertd(scratch, dst, Operand(1 * lane_width_in_bytes));
4034 vor(dst, scratch, scratch);
4035}
4036
4037void MacroAssembler::I64x2ExtMulLowI32x4U(Simd128Register dst,
4038 Simd128Register src1,
4039 Simd128Register src2,
4040 Simd128Register scratch) {
4041 constexpr int lane_width_in_bytes = 8;
4042 EXT_MUL(scratch, dst, vmuleuw, vmulouw)
4043 vextractd(scratch, scratch, Operand(1 * lane_width_in_bytes));
4044 vinsertd(dst, scratch, Operand(0));
4045}
4046
4047void MacroAssembler::I64x2ExtMulHighI32x4U(Simd128Register dst,
4048 Simd128Register src1,
4049 Simd128Register src2,
4050 Simd128Register scratch) {
4051 constexpr int lane_width_in_bytes = 8;
4052 EXT_MUL(scratch, dst, vmuleuw, vmulouw)
4053 vinsertd(scratch, dst, Operand(1 * lane_width_in_bytes));
4054 vor(dst, scratch, scratch);
4055}
4056#undef EXT_MUL
4057
4059 Register scratch) {
4060#ifdef V8_TARGET_BIG_ENDIAN
4061 LoadSimd128(dst, mem, scratch);
4062 xxbrq(dst, dst);
4063#else
4064 LoadSimd128(dst, mem, scratch);
4065#endif
4066}
4067
4069 Register scratch1,
4070 Simd128Register scratch2) {
4071#ifdef V8_TARGET_BIG_ENDIAN
4072 xxbrq(scratch2, src);
4073 StoreSimd128(scratch2, mem, scratch1);
4074#else
4075 StoreSimd128(src, mem, scratch1);
4076#endif
4077}
4078
4080 Register scratch) {
4081 constexpr int lane_width_in_bytes = 8;
4082 MovDoubleToInt64(scratch, src);
4083 mtvsrd(dst, scratch);
4084 vinsertd(dst, dst, Operand(1 * lane_width_in_bytes));
4085}
4086
4088 DoubleRegister scratch1, Register scratch2) {
4089 MovFloatToInt(scratch2, src, scratch1);
4090 mtvsrd(dst, scratch2);
4091 vspltw(dst, dst, Operand(1));
4092}
4093
4094void MacroAssembler::I64x2Splat(Simd128Register dst, Register src) {
4095 constexpr int lane_width_in_bytes = 8;
4096 mtvsrd(dst, src);
4097 vinsertd(dst, dst, Operand(1 * lane_width_in_bytes));
4098}
4099
4100void MacroAssembler::I32x4Splat(Simd128Register dst, Register src) {
4101 mtvsrd(dst, src);
4102 vspltw(dst, dst, Operand(1));
4103}
4104
4105void MacroAssembler::I16x8Splat(Simd128Register dst, Register src) {
4106 mtvsrd(dst, src);
4107 vsplth(dst, dst, Operand(3));
4108}
4109
4110void MacroAssembler::I8x16Splat(Simd128Register dst, Register src) {
4111 mtvsrd(dst, src);
4112 vspltb(dst, dst, Operand(7));
4113}
4114
4116 uint8_t imm_lane_idx,
4117 Simd128Register scratch1,
4118 Register scratch2) {
4119 constexpr int lane_width_in_bytes = 8;
4120 vextractd(scratch1, src, Operand((1 - imm_lane_idx) * lane_width_in_bytes));
4121 mfvsrd(scratch2, scratch1);
4122 MovInt64ToDouble(dst, scratch2);
4123}
4124
4126 uint8_t imm_lane_idx,
4127 Simd128Register scratch1,
4128 Register scratch2, Register scratch3) {
4129 constexpr int lane_width_in_bytes = 4;
4130 vextractuw(scratch1, src, Operand((3 - imm_lane_idx) * lane_width_in_bytes));
4131 mfvsrd(scratch2, scratch1);
4132 MovIntToFloat(dst, scratch2, scratch3);
4133}
4134
4136 uint8_t imm_lane_idx,
4137 Simd128Register scratch) {
4138 constexpr int lane_width_in_bytes = 8;
4139 vextractd(scratch, src, Operand((1 - imm_lane_idx) * lane_width_in_bytes));
4140 mfvsrd(dst, scratch);
4141}
4142
4144 uint8_t imm_lane_idx,
4145 Simd128Register scratch) {
4146 constexpr int lane_width_in_bytes = 4;
4147 vextractuw(scratch, src, Operand((3 - imm_lane_idx) * lane_width_in_bytes));
4148 mfvsrd(dst, scratch);
4149}
4150
4152 uint8_t imm_lane_idx,
4153 Simd128Register scratch) {
4154 constexpr int lane_width_in_bytes = 2;
4155 vextractuh(scratch, src, Operand((7 - imm_lane_idx) * lane_width_in_bytes));
4156 mfvsrd(dst, scratch);
4157}
4158
4160 uint8_t imm_lane_idx,
4161 Simd128Register scratch) {
4162 I16x8ExtractLaneU(dst, src, imm_lane_idx, scratch);
4163 extsh(dst, dst);
4164}
4165
4167 uint8_t imm_lane_idx,
4168 Simd128Register scratch) {
4169 vextractub(scratch, src, Operand(15 - imm_lane_idx));
4170 mfvsrd(dst, scratch);
4171}
4172
4174 uint8_t imm_lane_idx,
4175 Simd128Register scratch) {
4176 I8x16ExtractLaneU(dst, src, imm_lane_idx, scratch);
4177 extsb(dst, dst);
4178}
4179
4181 DoubleRegister src2, uint8_t imm_lane_idx,
4182 Register scratch1,
4183 Simd128Register scratch2) {
4184 constexpr int lane_width_in_bytes = 8;
4185 if (src1 != dst) {
4186 vor(dst, src1, src1);
4187 }
4188 MovDoubleToInt64(scratch1, src2);
4189 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
4190 vinsd(dst, scratch1, Operand((1 - imm_lane_idx) * lane_width_in_bytes));
4191 } else {
4192 mtvsrd(scratch2, scratch1);
4193 vinsertd(dst, scratch2, Operand((1 - imm_lane_idx) * lane_width_in_bytes));
4194 }
4195}
4196
4198 DoubleRegister src2, uint8_t imm_lane_idx,
4199 Register scratch1,
4200 DoubleRegister scratch2,
4201 Simd128Register scratch3) {
4202 constexpr int lane_width_in_bytes = 4;
4203 if (src1 != dst) {
4204 vor(dst, src1, src1);
4205 }
4206 MovFloatToInt(scratch1, src2, scratch2);
4207 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
4208 vinsw(dst, scratch1, Operand((3 - imm_lane_idx) * lane_width_in_bytes));
4209 } else {
4210 mtvsrd(scratch3, scratch1);
4211 vinsertw(dst, scratch3, Operand((3 - imm_lane_idx) * lane_width_in_bytes));
4212 }
4213}
4214
4216 Register src2, uint8_t imm_lane_idx,
4217 Simd128Register scratch) {
4218 constexpr int lane_width_in_bytes = 8;
4219 if (src1 != dst) {
4220 vor(dst, src1, src1);
4221 }
4222 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
4223 vinsd(dst, src2, Operand((1 - imm_lane_idx) * lane_width_in_bytes));
4224 } else {
4225 mtvsrd(scratch, src2);
4226 vinsertd(dst, scratch, Operand((1 - imm_lane_idx) * lane_width_in_bytes));
4227 }
4228}
4229
4231 Register src2, uint8_t imm_lane_idx,
4232 Simd128Register scratch) {
4233 constexpr int lane_width_in_bytes = 4;
4234 if (src1 != dst) {
4235 vor(dst, src1, src1);
4236 }
4237 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
4238 vinsw(dst, src2, Operand((3 - imm_lane_idx) * lane_width_in_bytes));
4239 } else {
4240 mtvsrd(scratch, src2);
4241 vinsertw(dst, scratch, Operand((3 - imm_lane_idx) * lane_width_in_bytes));
4242 }
4243}
4244
4246 Register src2, uint8_t imm_lane_idx,
4247 Simd128Register scratch) {
4248 constexpr int lane_width_in_bytes = 2;
4249 if (src1 != dst) {
4250 vor(dst, src1, src1);
4251 }
4252 mtvsrd(scratch, src2);
4253 vinserth(dst, scratch, Operand((7 - imm_lane_idx) * lane_width_in_bytes));
4254}
4255
4257 Register src2, uint8_t imm_lane_idx,
4258 Simd128Register scratch) {
4259 if (src1 != dst) {
4260 vor(dst, src1, src1);
4261 }
4262 mtvsrd(scratch, src2);
4263 vinsertb(dst, scratch, Operand(15 - imm_lane_idx));
4264}
4265
4267 Simd128Register src2, Register scratch1,
4268 Register scratch2, Register scratch3,
4269 Simd128Register scratch4) {
4270 constexpr int lane_width_in_bytes = 8;
4271 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
4272 vmulld(dst, src1, src2);
4273 } else {
4274 Register scratch_1 = scratch1;
4275 Register scratch_2 = scratch2;
4276 for (int i = 0; i < 2; i++) {
4277 if (i > 0) {
4278 vextractd(scratch4, src1, Operand(1 * lane_width_in_bytes));
4279 vextractd(dst, src2, Operand(1 * lane_width_in_bytes));
4280 src1 = scratch4;
4281 src2 = dst;
4282 }
4283 mfvsrd(scratch_1, src1);
4284 mfvsrd(scratch_2, src2);
4285 mulld(scratch_1, scratch_1, scratch_2);
4286 scratch_1 = scratch2;
4287 scratch_2 = scratch3;
4288 }
4289 mtvsrdd(dst, scratch1, scratch2);
4290 }
4291}
4292
4293void MacroAssembler::I16x8Mul(Simd128Register dst, Simd128Register src1,
4294 Simd128Register src2) {
4296 vmladduhm(dst, src1, src2, kSimd128RegZero);
4297}
4298
4299#define F64X2_MIN_MAX_NAN(result) \
4300 xvcmpeqdp(scratch2, src1, src1); \
4301 vsel(result, src1, result, scratch2); \
4302 xvcmpeqdp(scratch2, src2, src2); \
4303 vsel(dst, src2, result, scratch2); \
4304 /* Use xvmindp to turn any selected SNANs to QNANs. */ \
4305 xvmindp(dst, dst, dst);
4307 Simd128Register src2, Simd128Register scratch1,
4308 Simd128Register scratch2) {
4309 xvmindp(scratch1, src1, src2);
4310 // We need to check if an input is NAN and preserve it.
4311 F64X2_MIN_MAX_NAN(scratch1)
4312}
4313
4315 Simd128Register src2, Simd128Register scratch1,
4316 Simd128Register scratch2) {
4317 xvmaxdp(scratch1, src1, src2);
4318 // We need to check if an input is NAN and preserve it.
4319 F64X2_MIN_MAX_NAN(scratch1)
4320}
4321#undef F64X2_MIN_MAX_NAN
4322
4323void MacroAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1,
4324 Simd128Register src2) {
4325 xvcmpgtdp(dst, src2, src1);
4326}
4327
4328void MacroAssembler::F64x2Le(Simd128Register dst, Simd128Register src1,
4329 Simd128Register src2) {
4330 xvcmpgedp(dst, src2, src1);
4331}
4332
4333void MacroAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1,
4334 Simd128Register src2, Simd128Register scratch) {
4335 xvcmpeqdp(scratch, src1, src2);
4336 vnor(dst, scratch, scratch);
4337}
4338
4339void MacroAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1,
4340 Simd128Register src2) {
4341 xvcmpgtsp(dst, src2, src1);
4342}
4343
4344void MacroAssembler::F32x4Le(Simd128Register dst, Simd128Register src1,
4345 Simd128Register src2) {
4346 xvcmpgesp(dst, src2, src1);
4347}
4348
4349void MacroAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1,
4350 Simd128Register src2, Simd128Register scratch) {
4351 xvcmpeqsp(scratch, src1, src2);
4352 vnor(dst, scratch, scratch);
4353}
4354
4356 Simd128Register src2, Simd128Register scratch) {
4357 vcmpequd(scratch, src1, src2);
4358 vnor(dst, scratch, scratch);
4359}
4360
4362 Simd128Register src2, Simd128Register scratch) {
4363 vcmpgtsd(scratch, src2, src1);
4364 vnor(dst, scratch, scratch);
4365}
4366
4367void MacroAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1,
4368 Simd128Register src2, Simd128Register scratch) {
4369 vcmpequw(scratch, src1, src2);
4370 vnor(dst, scratch, scratch);
4371}
4372
4373void MacroAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1,
4374 Simd128Register src2, Simd128Register scratch) {
4375 vcmpgtsw(scratch, src2, src1);
4376 vnor(dst, scratch, scratch);
4377}
4378
4380 Simd128Register src2, Simd128Register scratch) {
4381 vcmpequw(scratch, src1, src2);
4382 vcmpgtuw(dst, src1, src2);
4383 vor(dst, dst, scratch);
4384}
4385
4386void MacroAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1,
4387 Simd128Register src2, Simd128Register scratch) {
4388 vcmpequh(scratch, src1, src2);
4389 vnor(dst, scratch, scratch);
4390}
4391
4392void MacroAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1,
4393 Simd128Register src2, Simd128Register scratch) {
4394 vcmpgtsh(scratch, src2, src1);
4395 vnor(dst, scratch, scratch);
4396}
4397
4399 Simd128Register src2, Simd128Register scratch) {
4400 vcmpequh(scratch, src1, src2);
4401 vcmpgtuh(dst, src1, src2);
4402 vor(dst, dst, scratch);
4403}
4404
4405void MacroAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1,
4406 Simd128Register src2, Simd128Register scratch) {
4407 vcmpequb(scratch, src1, src2);
4408 vnor(dst, scratch, scratch);
4409}
4410
4411void MacroAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1,
4412 Simd128Register src2, Simd128Register scratch) {
4413 vcmpgtsb(scratch, src2, src1);
4414 vnor(dst, scratch, scratch);
4415}
4416
4418 Simd128Register src2, Simd128Register scratch) {
4419 vcmpequb(scratch, src1, src2);
4420 vcmpgtub(dst, src1, src2);
4421 vor(dst, dst, scratch);
4422}
4423
4425 Simd128Register scratch) {
4426 constexpr int shift_bits = 63;
4427 xxspltib(scratch, Operand(shift_bits));
4428 vsrad(scratch, src, scratch);
4429 vxor(dst, src, scratch);
4430 vsubudm(dst, dst, scratch);
4431}
4432void MacroAssembler::I32x4Abs(Simd128Register dst, Simd128Register src,
4433 Simd128Register scratch) {
4434 constexpr int shift_bits = 31;
4435 xxspltib(scratch, Operand(shift_bits));
4436 vsraw(scratch, src, scratch);
4437 vxor(dst, src, scratch);
4438 vsubuwm(dst, dst, scratch);
4439}
4440void MacroAssembler::I16x8Abs(Simd128Register dst, Simd128Register src,
4441 Simd128Register scratch) {
4442 constexpr int shift_bits = 15;
4443 xxspltib(scratch, Operand(shift_bits));
4444 vsrah(scratch, src, scratch);
4445 vxor(dst, src, scratch);
4446 vsubuhm(dst, dst, scratch);
4447}
4448void MacroAssembler::I16x8Neg(Simd128Register dst, Simd128Register src,
4449 Simd128Register scratch) {
4450 vspltish(scratch, Operand(1));
4451 vnor(dst, src, src);
4452 vadduhm(dst, scratch, dst);
4453}
4454void MacroAssembler::I8x16Abs(Simd128Register dst, Simd128Register src,
4455 Simd128Register scratch) {
4456 constexpr int shift_bits = 7;
4457 xxspltib(scratch, Operand(shift_bits));
4458 vsrab(scratch, src, scratch);
4459 vxor(dst, src, scratch);
4460 vsububm(dst, dst, scratch);
4461}
4462void MacroAssembler::I8x16Neg(Simd128Register dst, Simd128Register src,
4463 Simd128Register scratch) {
4464 xxspltib(scratch, Operand(1));
4465 vnor(dst, src, src);
4466 vaddubm(dst, scratch, dst);
4467}
4468
4469void MacroAssembler::F64x2Pmin(Simd128Register dst, Simd128Register src1,
4470 Simd128Register src2, Simd128Register scratch) {
4471 xvcmpgtdp(kScratchSimd128Reg, src1, src2);
4472 vsel(dst, src1, src2, kScratchSimd128Reg);
4473}
4474
4475void MacroAssembler::F64x2Pmax(Simd128Register dst, Simd128Register src1,
4476 Simd128Register src2, Simd128Register scratch) {
4477 xvcmpgtdp(kScratchSimd128Reg, src2, src1);
4478 vsel(dst, src1, src2, kScratchSimd128Reg);
4479}
4480
4481void MacroAssembler::F32x4Pmin(Simd128Register dst, Simd128Register src1,
4482 Simd128Register src2, Simd128Register scratch) {
4483 xvcmpgtsp(kScratchSimd128Reg, src1, src2);
4484 vsel(dst, src1, src2, kScratchSimd128Reg);
4485}
4486
4487void MacroAssembler::F32x4Pmax(Simd128Register dst, Simd128Register src1,
4488 Simd128Register src2, Simd128Register scratch) {
4489 xvcmpgtsp(kScratchSimd128Reg, src2, src1);
4490 vsel(dst, src1, src2, kScratchSimd128Reg);
4491}
4492
4494 Simd128Register src,
4495 Simd128Register scratch) {
4496 // NaN to 0
4497 xvcmpeqsp(scratch, src, src);
4498 vand(scratch, src, scratch);
4499 xvcvspsxws(dst, scratch);
4500}
4501
4503 Simd128Register src1,
4504 Simd128Register src2) {
4505 vpkswss(dst, src2, src1);
4506}
4507
4509 Simd128Register src1,
4510 Simd128Register src2) {
4511 vpkswus(dst, src2, src1);
4512}
4513
4515 Simd128Register src1,
4516 Simd128Register src2) {
4517 vpkshss(dst, src2, src1);
4518}
4519
4521 Simd128Register src1,
4522 Simd128Register src2) {
4523 vpkshus(dst, src2, src1);
4524}
4525
4527 Simd128Register src) {
4528 vupklsw(dst, src);
4529 xvcvsxddp(dst, dst);
4530}
4531
4533 Simd128Register src,
4534 Register scratch1,
4535 Simd128Register scratch2) {
4536 constexpr int lane_width_in_bytes = 8;
4537 vupklsw(dst, src);
4538 // Zero extend.
4539 mov(scratch1, Operand(0xFFFFFFFF));
4540 mtvsrd(scratch2, scratch1);
4541 vinsertd(scratch2, scratch2, Operand(1 * lane_width_in_bytes));
4542 vand(dst, scratch2, dst);
4543 xvcvuxddp(dst, dst);
4544}
4545
4547 Simd128Register src,
4548 Register scratch1,
4549 Simd128Register scratch2) {
4550 constexpr int lane_width_in_bytes = 8;
4551 vupklsw(dst, src);
4552 // Zero extend.
4553 mov(scratch1, Operand(0xFFFFFFFF));
4554 mtvsrd(scratch2, scratch1);
4555 vinsertd(scratch2, scratch2, Operand(1 * lane_width_in_bytes));
4556 vand(dst, scratch2, dst);
4557}
4558
4560 Simd128Register src,
4561 Register scratch1,
4562 Simd128Register scratch2) {
4563 constexpr int lane_width_in_bytes = 8;
4564 vupkhsw(dst, src);
4565 // Zero extend.
4566 mov(scratch1, Operand(0xFFFFFFFF));
4567 mtvsrd(scratch2, scratch1);
4568 vinsertd(scratch2, scratch2, Operand(1 * lane_width_in_bytes));
4569 vand(dst, scratch2, dst);
4570}
4571
4573 Simd128Register src,
4574 Register scratch1,
4575 Simd128Register scratch2) {
4576 vupklsh(dst, src);
4577 // Zero extend.
4578 mov(scratch1, Operand(0xFFFF));
4579 mtvsrd(scratch2, scratch1);
4580 vspltw(scratch2, scratch2, Operand(1));
4581 vand(dst, scratch2, dst);
4582}
4583
4585 Simd128Register src,
4586 Register scratch1,
4587 Simd128Register scratch2) {
4588 vupkhsh(dst, src);
4589 // Zero extend.
4590 mov(scratch1, Operand(0xFFFF));
4591 mtvsrd(scratch2, scratch1);
4592 vspltw(scratch2, scratch2, Operand(1));
4593 vand(dst, scratch2, dst);
4594}
4595
4597 Simd128Register src,
4598 Register scratch1,
4599 Simd128Register scratch2) {
4600 vupklsb(dst, src);
4601 // Zero extend.
4602 li(scratch1, Operand(0xFF));
4603 mtvsrd(scratch2, scratch1);
4604 vsplth(scratch2, scratch2, Operand(3));
4605 vand(dst, scratch2, dst);
4606}
4607
4609 Simd128Register src,
4610 Register scratch1,
4611 Simd128Register scratch2) {
4612 vupkhsb(dst, src);
4613 // Zero extend.
4614 li(scratch1, Operand(0xFF));
4615 mtvsrd(scratch2, scratch1);
4616 vsplth(scratch2, scratch2, Operand(3));
4617 vand(dst, scratch2, dst);
4618}
4619
4620void MacroAssembler::I8x16BitMask(Register dst, Simd128Register src,
4621 Register scratch1, Register scratch2,
4622 Simd128Register scratch3) {
4623 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
4624 vextractbm(dst, src);
4625 } else {
4626 mov(scratch1, Operand(0x8101820283038));
4627 mov(scratch2, Operand(0x4048505860687078));
4628 mtvsrdd(scratch3, scratch1, scratch2);
4629 vbpermq(scratch3, src, scratch3);
4630 mfvsrd(dst, scratch3);
4631 }
4632}
4633
4635 Simd128Register src2) {
4637 vmsumshm(dst, src1, src2, kSimd128RegZero);
4638}
4639
4641 Simd128Register src1,
4642 Simd128Register src2,
4643 Simd128Register src3) {
4644 vmsummbm(dst, src1, src2, src3);
4645}
4646
4648 Simd128Register src2,
4649 Simd128Register scratch) {
4650 vmulesb(scratch, src1, src2);
4651 vmulosb(dst, src1, src2);
4652 vadduhm(dst, scratch, dst);
4653}
4654
4656 Simd128Register src2) {
4658 vmhraddshs(dst, src1, src2, kSimd128RegZero);
4659}
4660
4662 Simd128Register src2,
4663 Simd128Register scratch) {
4664 // Saturate the indices to 5 bits. Input indices more than 31 should
4665 // return 0.
4666 xxspltib(scratch, Operand(31));
4667 vminub(scratch, src2, scratch);
4668 // Input needs to be reversed.
4669 xxbrq(dst, src1);
4671 vperm(dst, dst, kSimd128RegZero, scratch);
4672}
4673
4675 Simd128Register src2, uint64_t high,
4676 uint64_t low, Register scratch1,
4677 Register scratch2, Simd128Register scratch3) {
4678 mov(scratch1, Operand(low));
4679 mov(scratch2, Operand(high));
4680 mtvsrdd(scratch3, scratch2, scratch1);
4681 vperm(dst, src1, src2, scratch3);
4682}
4683
4684#define EXT_ADD_PAIRWISE(splat, mul_even, mul_odd, add) \
4685 splat(scratch1, Operand(1)); \
4686 mul_even(scratch2, src, scratch1); \
4687 mul_odd(scratch1, src, scratch1); \
4688 add(dst, scratch2, scratch1);
4690 Simd128Register src,
4691 Simd128Register scratch1,
4692 Simd128Register scratch2) {
4693 EXT_ADD_PAIRWISE(vspltish, vmulesh, vmulosh, vadduwm)
4694}
4696 Simd128Register src,
4697 Simd128Register scratch1,
4698 Simd128Register scratch2) {
4699 EXT_ADD_PAIRWISE(vspltish, vmuleuh, vmulouh, vadduwm)
4700}
4702 Simd128Register src,
4703 Simd128Register scratch1,
4704 Simd128Register scratch2) {
4705 EXT_ADD_PAIRWISE(xxspltib, vmulesb, vmulosb, vadduhm)
4706}
4708 Simd128Register src,
4709 Simd128Register scratch1,
4710 Simd128Register scratch2) {
4711 EXT_ADD_PAIRWISE(xxspltib, vmuleub, vmuloub, vadduhm)
4712}
4713#undef EXT_ADD_PAIRWISE
4714
4716 Simd128Register src) {
4717 constexpr int lane_number = 8;
4718 vextractd(dst, src, Operand(lane_number));
4719 vinsertw(dst, dst, Operand(lane_number));
4720 xvcvspdp(dst, dst);
4721}
4722
4724 Simd128Register src,
4725 Simd128Register scratch) {
4726 constexpr int lane_number = 8;
4727 xvcvdpsp(scratch, src);
4728 vextractuw(dst, scratch, Operand(lane_number));
4729 vinsertw(scratch, dst, Operand(4));
4730 vxor(dst, dst, dst);
4731 vinsertd(dst, scratch, Operand(lane_number));
4732}
4733
4735 Simd128Register src,
4736 Simd128Register scratch) {
4737 constexpr int lane_number = 8;
4738 // NaN to 0.
4739 xvcmpeqdp(scratch, src, src);
4740 vand(scratch, src, scratch);
4741 xvcvdpsxws(scratch, scratch);
4742 vextractuw(dst, scratch, Operand(lane_number));
4743 vinsertw(scratch, dst, Operand(4));
4744 vxor(dst, dst, dst);
4745 vinsertd(dst, scratch, Operand(lane_number));
4746}
4747
4749 Simd128Register src,
4750 Simd128Register scratch) {
4751 constexpr int lane_number = 8;
4752 xvcvdpuxws(scratch, src);
4753 vextractuw(dst, scratch, Operand(lane_number));
4754 vinsertw(scratch, dst, Operand(4));
4755 vxor(dst, dst, dst);
4756 vinsertd(dst, scratch, Operand(lane_number));
4757}
4758
4759#if V8_TARGET_BIG_ENDIAN
4760#define MAYBE_REVERSE_BYTES(reg, instr) instr(reg, reg);
4761#else
4762#define MAYBE_REVERSE_BYTES(reg, instr)
4763#endif
4765 int lane, Register scratch1,
4766 Simd128Register scratch2) {
4767 constexpr int lane_width_in_bytes = 8;
4768 LoadSimd128Uint64(scratch2, mem, scratch1);
4769 MAYBE_REVERSE_BYTES(scratch2, xxbrd)
4770 vinsertd(dst, scratch2, Operand((1 - lane) * lane_width_in_bytes));
4771}
4772
4774 int lane, Register scratch1,
4775 Simd128Register scratch2) {
4776 constexpr int lane_width_in_bytes = 4;
4777 LoadSimd128Uint32(scratch2, mem, scratch1);
4778 MAYBE_REVERSE_BYTES(scratch2, xxbrw)
4779 vinsertw(dst, scratch2, Operand((3 - lane) * lane_width_in_bytes));
4780}
4781
4783 int lane, Register scratch1,
4784 Simd128Register scratch2) {
4785 constexpr int lane_width_in_bytes = 2;
4786 LoadSimd128Uint16(scratch2, mem, scratch1);
4787 MAYBE_REVERSE_BYTES(scratch2, xxbrh)
4788 vinserth(dst, scratch2, Operand((7 - lane) * lane_width_in_bytes));
4789}
4790
4792 int lane, Register scratch1,
4793 Simd128Register scratch2) {
4794 LoadSimd128Uint8(scratch2, mem, scratch1);
4795 vinsertb(dst, scratch2, Operand((15 - lane)));
4796}
4797
4799 int lane, Register scratch1,
4800 Simd128Register scratch2) {
4801 constexpr int lane_width_in_bytes = 8;
4802 vextractd(scratch2, src, Operand((1 - lane) * lane_width_in_bytes));
4803 MAYBE_REVERSE_BYTES(scratch2, xxbrd)
4804 StoreSimd128Uint64(scratch2, mem, scratch1);
4805}
4806
4808 int lane, Register scratch1,
4809 Simd128Register scratch2) {
4810 constexpr int lane_width_in_bytes = 4;
4811 vextractuw(scratch2, src, Operand((3 - lane) * lane_width_in_bytes));
4812 MAYBE_REVERSE_BYTES(scratch2, xxbrw)
4813 StoreSimd128Uint32(scratch2, mem, scratch1);
4814}
4815
4817 int lane, Register scratch1,
4818 Simd128Register scratch2) {
4819 constexpr int lane_width_in_bytes = 2;
4820 vextractuh(scratch2, src, Operand((7 - lane) * lane_width_in_bytes));
4821 MAYBE_REVERSE_BYTES(scratch2, xxbrh)
4822 StoreSimd128Uint16(scratch2, mem, scratch1);
4823}
4824
4826 int lane, Register scratch1,
4827 Simd128Register scratch2) {
4828 vextractub(scratch2, src, Operand(15 - lane));
4829 StoreSimd128Uint8(scratch2, mem, scratch1);
4830}
4831
4833 const MemOperand& mem,
4834 Register scratch) {
4835 constexpr int lane_width_in_bytes = 8;
4836 LoadSimd128Uint64(dst, mem, scratch);
4837 MAYBE_REVERSE_BYTES(dst, xxbrd)
4838 vinsertd(dst, dst, Operand(1 * lane_width_in_bytes));
4839}
4840
4842 const MemOperand& mem,
4843 Register scratch) {
4844 LoadSimd128Uint32(dst, mem, scratch);
4845 MAYBE_REVERSE_BYTES(dst, xxbrw)
4846 vspltw(dst, dst, Operand(1));
4847}
4848
4850 const MemOperand& mem,
4851 Register scratch) {
4852 LoadSimd128Uint16(dst, mem, scratch);
4853 MAYBE_REVERSE_BYTES(dst, xxbrh)
4854 vsplth(dst, dst, Operand(3));
4855}
4856
4858 const MemOperand& mem,
4859 Register scratch) {
4860 LoadSimd128Uint8(dst, mem, scratch);
4861 vspltb(dst, dst, Operand(7));
4862}
4863
4865 const MemOperand& mem,
4866 Register scratch) {
4867 LoadSimd128Uint64(dst, mem, scratch);
4868 MAYBE_REVERSE_BYTES(dst, xxbrd)
4869 vupkhsw(dst, dst);
4870}
4871
4873 const MemOperand& mem,
4874 Register scratch1,
4875 Simd128Register scratch2) {
4876 constexpr int lane_width_in_bytes = 8;
4877 LoadAndExtend32x2SLE(dst, mem, scratch1);
4878 // Zero extend.
4879 mov(scratch1, Operand(0xFFFFFFFF));
4880 mtvsrd(scratch2, scratch1);
4881 vinsertd(scratch2, scratch2, Operand(1 * lane_width_in_bytes));
4882 vand(dst, scratch2, dst);
4883}
4884
4886 const MemOperand& mem,
4887 Register scratch) {
4888 LoadSimd128Uint64(dst, mem, scratch);
4889 MAYBE_REVERSE_BYTES(dst, xxbrd)
4890 vupkhsh(dst, dst);
4891}
4892
4894 const MemOperand& mem,
4895 Register scratch1,
4896 Simd128Register scratch2) {
4897 LoadAndExtend16x4SLE(dst, mem, scratch1);
4898 // Zero extend.
4899 mov(scratch1, Operand(0xFFFF));
4900 mtvsrd(scratch2, scratch1);
4901 vspltw(scratch2, scratch2, Operand(1));
4902 vand(dst, scratch2, dst);
4903}
4904
4906 const MemOperand& mem,
4907 Register scratch) {
4908 LoadSimd128Uint64(dst, mem, scratch);
4909 MAYBE_REVERSE_BYTES(dst, xxbrd)
4910 vupkhsb(dst, dst);
4911}
4912
4914 const MemOperand& mem,
4915 Register scratch1,
4916 Simd128Register scratch2) {
4917 LoadAndExtend8x8SLE(dst, mem, scratch1);
4918 // Zero extend.
4919 li(scratch1, Operand(0xFF));
4920 mtvsrd(scratch2, scratch1);
4921 vsplth(scratch2, scratch2, Operand(3));
4922 vand(dst, scratch2, dst);
4923}
4924
4926 Register scratch1,
4927 Simd128Register scratch2) {
4928 constexpr int lane_width_in_bytes = 8;
4929 LoadSimd128Uint64(scratch2, mem, scratch1);
4930 MAYBE_REVERSE_BYTES(scratch2, xxbrd)
4931 vxor(dst, dst, dst);
4932 vinsertd(dst, scratch2, Operand(1 * lane_width_in_bytes));
4933}
4934
4936 Register scratch1,
4937 Simd128Register scratch2) {
4938 constexpr int lane_width_in_bytes = 4;
4939 LoadSimd128Uint32(scratch2, mem, scratch1);
4940 MAYBE_REVERSE_BYTES(scratch2, xxbrw)
4941 vxor(dst, dst, dst);
4942 vinsertw(dst, scratch2, Operand(3 * lane_width_in_bytes));
4943}
4944#undef MAYBE_REVERSE_BYTES
4945
4946void MacroAssembler::V128AnyTrue(Register dst, Simd128Register src,
4947 Register scratch1, Register scratch2,
4948 Simd128Register scratch3) {
4949 constexpr uint8_t fxm = 0x2; // field mask.
4950 constexpr int bit_number = 24;
4951 li(scratch1, Operand(0));
4952 li(scratch2, Operand(1));
4953 // Check if both lanes are 0, if so then return false.
4954 vxor(scratch3, scratch3, scratch3);
4955 mtcrf(scratch1, fxm); // Clear cr6.
4956 vcmpequd(scratch3, src, scratch3, SetRC);
4957 isel(dst, scratch1, scratch2, bit_number);
4958}
4959
4961 vnor(dst, src, src);
4962}
4963
4964void MacroAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low,
4965 Register scratch1, Register scratch2) {
4966 mov(scratch1, Operand(low));
4967 mov(scratch2, Operand(high));
4968 mtvsrdd(dst, scratch2, scratch1);
4969}
4970
4973 vsel(dst, src2, src1, mask);
4974}
4975
4976Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
4977 Register reg4, Register reg5,
4978 Register reg6) {
4979 RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
4980
4981 const RegisterConfiguration* config = RegisterConfiguration::Default();
4982 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
4983 int code = config->GetAllocatableGeneralCode(i);
4984 Register candidate = Register::from_code(code);
4985 if (regs.has(candidate)) continue;
4986 return candidate;
4987 }
4988 UNREACHABLE();
4989}
4990
4991void MacroAssembler::SwapP(Register src, Register dst, Register scratch) {
4992 if (src == dst) return;
4993 DCHECK(!AreAliased(src, dst, scratch));
4994 mr(scratch, src);
4995 mr(src, dst);
4996 mr(dst, scratch);
4997}
4998
4999void MacroAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
5000 if (dst.ra() != r0 && dst.ra().is_valid())
5001 DCHECK(!AreAliased(src, dst.ra(), scratch));
5002 if (dst.rb() != r0 && dst.rb().is_valid())
5003 DCHECK(!AreAliased(src, dst.rb(), scratch));
5004 DCHECK(!AreAliased(src, scratch));
5005 mr(scratch, src);
5006 LoadU64(src, dst, r0);
5007 StoreU64(scratch, dst, r0);
5008}
5009
5010void MacroAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
5011 Register scratch_1) {
5012 if (src.ra() != r0 && src.ra().is_valid())
5013 DCHECK(!AreAliased(src.ra(), scratch_0, scratch_1));
5014 if (src.rb() != r0 && src.rb().is_valid())
5015 DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
5016 if (dst.ra() != r0 && dst.ra().is_valid())
5017 DCHECK(!AreAliased(dst.ra(), scratch_0, scratch_1));
5018 if (dst.rb() != r0 && dst.rb().is_valid())
5019 DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
5020 DCHECK(!AreAliased(scratch_0, scratch_1));
5021 if (is_int16(src.offset()) || is_int16(dst.offset())) {
5022 if (!is_int16(src.offset())) {
5023 // swap operand
5024 MemOperand temp = src;
5025 src = dst;
5026 dst = temp;
5027 }
5028 LoadU64(scratch_1, dst, scratch_0);
5029 LoadU64(scratch_0, src);
5030 StoreU64(scratch_1, src);
5031 StoreU64(scratch_0, dst, scratch_1);
5032 } else {
5033 LoadU64(scratch_1, dst, scratch_0);
5034 push(scratch_1);
5035 LoadU64(scratch_0, src, scratch_1);
5036 StoreU64(scratch_0, dst, scratch_1);
5037 pop(scratch_1);
5038 StoreU64(scratch_1, src, scratch_0);
5039 }
5040}
5041
5043 DoubleRegister scratch) {
5044 if (src == dst) return;
5045 DCHECK(!AreAliased(src, dst, scratch));
5046 fmr(scratch, src);
5047 fmr(src, dst);
5048 fmr(dst, scratch);
5049}
5050
5052 DoubleRegister scratch) {
5053 DCHECK(!AreAliased(src, scratch));
5054 fmr(scratch, src);
5055 LoadF32(src, dst, r0);
5056 StoreF32(scratch, dst, r0);
5057}
5058
5060 DoubleRegister scratch_0,
5061 DoubleRegister scratch_1) {
5062 DCHECK(!AreAliased(scratch_0, scratch_1));
5063 LoadF32(scratch_0, src, r0);
5064 LoadF32(scratch_1, dst, r0);
5065 StoreF32(scratch_0, dst, r0);
5066 StoreF32(scratch_1, src, r0);
5067}
5068
5070 DoubleRegister scratch) {
5071 if (src == dst) return;
5072 DCHECK(!AreAliased(src, dst, scratch));
5073 fmr(scratch, src);
5074 fmr(src, dst);
5075 fmr(dst, scratch);
5076}
5077
5079 DoubleRegister scratch) {
5080 DCHECK(!AreAliased(src, scratch));
5081 fmr(scratch, src);
5082 LoadF64(src, dst, r0);
5083 StoreF64(scratch, dst, r0);
5084}
5085
5087 DoubleRegister scratch_0,
5088 DoubleRegister scratch_1) {
5089 DCHECK(!AreAliased(scratch_0, scratch_1));
5090 LoadF64(scratch_0, src, r0);
5091 LoadF64(scratch_1, dst, r0);
5092 StoreF64(scratch_0, dst, r0);
5093 StoreF64(scratch_1, src, r0);
5094}
5095
5097 Simd128Register scratch) {
5098 if (src == dst) return;
5099 vor(scratch, src, src);
5100 vor(src, dst, dst);
5101 vor(dst, scratch, scratch);
5102}
5103
5105 Simd128Register scratch1, Register scratch2) {
5106 DCHECK(src != scratch1);
5107 LoadSimd128(scratch1, dst, scratch2);
5108 StoreSimd128(src, dst, scratch2);
5109 vor(src, scratch1, scratch1);
5110}
5111
5113 Simd128Register scratch1,
5114 Simd128Register scratch2, Register scratch3) {
5115 LoadSimd128(scratch1, src, scratch3);
5116 LoadSimd128(scratch2, dst, scratch3);
5117
5118 StoreSimd128(scratch1, dst, scratch3);
5119 StoreSimd128(scratch2, src, scratch3);
5120}
5121
5122void MacroAssembler::ByteReverseU16(Register dst, Register val,
5123 Register scratch) {
5124 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
5125 brh(dst, val);
5126 ZeroExtHalfWord(dst, dst);
5127 return;
5128 }
5129 rlwinm(scratch, val, 8, 16, 23);
5130 rlwinm(dst, val, 24, 24, 31);
5131 orx(dst, scratch, dst);
5132 ZeroExtHalfWord(dst, dst);
5133}
5134
5135void MacroAssembler::ByteReverseU32(Register dst, Register val,
5136 Register scratch) {
5137 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
5138 brw(dst, val);
5139 ZeroExtWord32(dst, dst);
5140 return;
5141 }
5142 rotlwi(scratch, val, 8);
5143 rlwimi(scratch, val, 24, 0, 7);
5144 rlwimi(scratch, val, 24, 16, 23);
5145 ZeroExtWord32(dst, scratch);
5146}
5147
5148void MacroAssembler::ByteReverseU64(Register dst, Register val, Register) {
5149 if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
5150 brd(dst, val);
5151 return;
5152 }
5153 subi(sp, sp, Operand(kSystemPointerSize));
5154 std(val, MemOperand(sp));
5155 ldbrx(dst, MemOperand(r0, sp));
5156 addi(sp, sp, Operand(kSystemPointerSize));
5157}
5158
5159void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
5160 CmpS32(x, Operand(y), r0);
5161 beq(dest);
5162}
5163
5164void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
5165 CmpS32(x, Operand(y), r0);
5166 blt(dest);
5167}
5168
5169void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index,
5170 Register target) {
5171 static_assert(kSystemPointerSize == 8);
5172 static_assert(kSmiTagSize == 1);
5173 static_assert(kSmiTag == 0);
5174
5175 // The builtin_index register contains the builtin index as a Smi.
5176 if (SmiValuesAre32Bits()) {
5177 ShiftRightS64(target, builtin_index,
5179 } else {
5181 ShiftLeftU64(target, builtin_index,
5183 }
5184 AddS64(target, target, Operand(IsolateData::builtin_entry_table_offset()));
5185 LoadU64(target, MemOperand(kRootRegister, target));
5186}
5187
5188void MacroAssembler::CallBuiltinByIndex(Register builtin_index,
5189 Register target) {
5190 LoadEntryFromBuiltinIndex(builtin_index, target);
5191 Call(target);
5192}
5193
5195 Register destination) {
5196 ASM_CODE_COMMENT(this);
5198}
5199
5201 ASM_CODE_COMMENT(this);
5205}
5206
5207#ifdef V8_ENABLE_LEAPTIERING
5208
5209void MacroAssembler::LoadEntrypointFromJSDispatchTable(Register destination,
5210 Register dispatch_handle,
5211 Register scratch) {
5212 DCHECK(!AreAliased(destination, dispatch_handle, scratch));
5213 ASM_CODE_COMMENT(this);
5214
5215 Register index = destination;
5216 Move(scratch, ExternalReference::js_dispatch_table_address());
5217 ShiftRightU64(index, dispatch_handle, Operand(kJSDispatchHandleShift));
5218 ShiftLeftU64(index, index, Operand(kJSDispatchTableEntrySizeLog2));
5219 AddS64(scratch, scratch, index);
5220 LoadU64(destination, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
5221}
5222
5223#endif // V8_ENABLE_LEAPTIERING
5224
5226 Register code_object,
5227 CodeEntrypointTag tag) {
5228 ASM_CODE_COMMENT(this);
5229#ifdef V8_ENABLE_SANDBOX
5230 LoadCodeEntrypointViaCodePointer(
5232 FieldMemOperand(code_object, Code::kSelfIndirectPointerOffset), r0);
5233#else
5235 FieldMemOperand(code_object, Code::kInstructionStartOffset), r0);
5236#endif
5237}
5238
5239void MacroAssembler::CallCodeObject(Register code_object) {
5240 ASM_CODE_COMMENT(this);
5241 LoadCodeInstructionStart(code_object, code_object);
5242 Call(code_object);
5243}
5244
5245void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
5246 ASM_CODE_COMMENT(this);
5247 DCHECK_EQ(JumpMode::kJump, jump_mode);
5248 LoadCodeInstructionStart(code_object, code_object);
5249 Jump(code_object);
5250}
5251
5252void MacroAssembler::CallJSFunction(Register function_object,
5253 uint16_t argument_count, Register scratch) {
5255#if V8_ENABLE_LEAPTIERING
5256 Register dispatch_handle = r0;
5257 LoadU32(dispatch_handle,
5258 FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
5259 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, ip);
5260 Call(code);
5261#elif V8_ENABLE_SANDBOX
5262 // When the sandbox is enabled, we can directly fetch the entrypoint pointer
5263 // from the code pointer table instead of going through the Code object. In
5264 // this way, we avoid one memory load on this code path.
5265 LoadCodeEntrypointViaCodePointer(
5266 code, FieldMemOperand(function_object, JSFunction::kCodeOffset), scratch);
5267 Call(code);
5268#else
5270 code, FieldMemOperand(function_object, JSFunction::kCodeOffset), scratch);
5271 CallCodeObject(code);
5272#endif
5273}
5274
5275#if V8_ENABLE_LEAPTIERING
5276void MacroAssembler::CallJSDispatchEntry(JSDispatchHandle dispatch_handle,
5277 uint16_t argument_count) {
5279 Register dispatch_handle_reg = r0;
5280 Register scratch = ip;
5281 mov(dispatch_handle_reg,
5282 Operand(dispatch_handle.value(), RelocInfo::JS_DISPATCH_HANDLE));
5283 // WARNING: This entrypoint load is only safe because we are storing a
5284 // RelocInfo for the dispatch handle in the movl above (thus keeping the
5285 // dispatch entry alive) _and_ because the entrypoints are not compactable
5286 // (thus meaning that the calculation in the entrypoint load is not
5287 // invalidated by a compaction).
5288 // TODO(leszeks): Make this less of a footgun.
5289 static_assert(!JSDispatchTable::kSupportsCompaction);
5290 LoadEntrypointFromJSDispatchTable(code, dispatch_handle_reg, scratch);
5291 CHECK_EQ(argument_count,
5292 IsolateGroup::current()->js_dispatch_table()->GetParameterCount(
5293 dispatch_handle));
5294 Call(code);
5295}
5296#endif
5297
5298void MacroAssembler::JumpJSFunction(Register function_object, Register scratch,
5299 JumpMode jump_mode) {
5301#if V8_ENABLE_LEAPTIERING
5302 Register dispatch_handle = r0;
5303 LoadU32(dispatch_handle,
5304 FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
5305 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, ip);
5306 Jump(code);
5307#elif V8_ENABLE_SANDBOX
5308 // When the sandbox is enabled, we can directly fetch the entrypoint pointer
5309 // from the code pointer table instead of going through the Code object. In
5310 // this way, we avoid one memory load on this code path.
5311 LoadCodeEntrypointViaCodePointer(
5312 code, FieldMemOperand(function_object, JSFunction::kCodeOffset), scratch);
5313 DCHECK_EQ(jump_mode, JumpMode::kJump);
5314 DCHECK_EQ(code, r5);
5315 Jump(code);
5316#else
5318 code, FieldMemOperand(function_object, JSFunction::kCodeOffset), scratch);
5319 JumpCodeObject(code, jump_mode);
5320#endif
5321}
5322
5323#ifdef V8_ENABLE_WEBASSEMBLY
5324
5325void MacroAssembler::ResolveWasmCodePointer(Register target) {
5326 ASM_CODE_COMMENT(this);
5327 static_assert(!V8_ENABLE_SANDBOX_BOOL);
5328 ExternalReference global_jump_table =
5329 ExternalReference::wasm_code_pointer_table();
5330 UseScratchRegisterScope temps(this);
5331 Register scratch = temps.Acquire();
5332 Move(scratch, global_jump_table);
5333 static_assert(sizeof(wasm::WasmCodePointerTableEntry) == kSystemPointerSize);
5334 ShiftLeftU32(target, target, Operand(kSystemPointerSizeLog2));
5335 LoadU64(target, MemOperand(scratch, target));
5336}
5337
5338void MacroAssembler::CallWasmCodePointer(Register target,
5339 CallJumpMode call_jump_mode) {
5340 ResolveWasmCodePointer(target);
5341 if (call_jump_mode == CallJumpMode::kTailCall) {
5342 Jump(target);
5343 } else {
5344 Call(target);
5345 }
5346}
5347
5348void MacroAssembler::LoadWasmCodePointer(Register dst, MemOperand src) {
5349 static_assert(sizeof(WasmCodePointer) == 4);
5350 LoadU32(dst, src);
5351}
5352
5353#endif
5354
5355void MacroAssembler::StoreReturnAddressAndCall(Register target) {
5356 // This generates the final instruction sequence for calls to C functions
5357 // once an exit frame has been constructed.
5358 //
5359 // Note that this assumes the caller code (i.e. the InstructionStream object
5360 // currently being generated) is immovable or that the callee function cannot
5361 // trigger GC, since the callee function will return to it.
5362
5363 static constexpr int after_call_offset = 5 * kInstrSize;
5364 Label start_call;
5365 Register dest = target;
5366
5368 // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
5369 // aware of this descriptor and pick up values from it
5372 LoadU64(ip, MemOperand(target, 0));
5373 dest = ip;
5374 } else if (ABI_CALL_VIA_IP && dest != ip) {
5375 Move(ip, target);
5376 dest = ip;
5377 }
5378
5379 LoadPC(r7);
5380 bind(&start_call);
5381 addi(r7, r7, Operand(after_call_offset));
5383 Call(dest);
5384
5385 DCHECK_EQ(after_call_offset - kInstrSize,
5386 SizeOfCodeGeneratedSince(&start_call));
5387}
5388
5389// Check if the code object is marked for deoptimization. If it is, then it
5390// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
5391// to:
5392// 1. read from memory the word that contains that bit, which can be found in
5393// the flags in the referenced {Code} object;
5394// 2. test kMarkedForDeoptimizationBit in those flags; and
5395// 3. if it is not zero then it jumps to the builtin.
5396//
5397// Note: With leaptiering we simply assert the code is not deoptimized.
5399 int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
5400 if (v8_flags.debug_code || !V8_ENABLE_LEAPTIERING_BOOL) {
5402 r0);
5403 LoadU32(r11, FieldMemOperand(r11, Code::kFlagsOffset), r0);
5405 }
5406#ifdef V8_ENABLE_LEAPTIERING
5407 if (v8_flags.debug_code) {
5408 Assert(to_condition(kZero), AbortReason::kInvalidDeoptimizedCode);
5409 }
5410#else
5411 TailCallBuiltin(Builtin::kCompileLazyDeoptimizedCode, ne, cr0);
5412#endif
5413}
5414
5415void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
5416 DeoptimizeKind kind, Label* ret,
5417 Label*) {
5418 BlockTrampolinePoolScope block_trampoline_pool(this);
5422 Call(ip);
5426}
5427
5428void MacroAssembler::ZeroExtByte(Register dst, Register src) {
5429 clrldi(dst, src, Operand(56));
5430}
5431
5432void MacroAssembler::ZeroExtHalfWord(Register dst, Register src) {
5433 clrldi(dst, src, Operand(48));
5434}
5435
5436void MacroAssembler::ZeroExtWord32(Register dst, Register src) {
5437 clrldi(dst, src, Operand(32));
5438}
5439
5440void MacroAssembler::Trap() { stop(); }
5441void MacroAssembler::DebugBreak() { stop(); }
5442
5443void MacroAssembler::Popcnt32(Register dst, Register src) { popcntw(dst, src); }
5444
5445void MacroAssembler::Popcnt64(Register dst, Register src) { popcntd(dst, src); }
5446
5447void MacroAssembler::CountLeadingZerosU32(Register dst, Register src, RCBit r) {
5448 cntlzw(dst, src, r);
5449}
5450
5451void MacroAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) {
5452 cntlzd(dst, src, r);
5453}
5454
5455#define COUNT_TRAILING_ZEROES_SLOW(max_count, scratch1, scratch2) \
5456 Label loop, done; \
5457 li(scratch1, Operand(max_count)); \
5458 mtctr(scratch1); \
5459 mr(scratch1, src); \
5460 li(dst, Operand::Zero()); \
5461 bind(&loop); /* while ((src & 1) == 0) */ \
5462 andi(scratch2, scratch1, Operand(1)); \
5463 bne(&done, cr0); \
5464 srdi(scratch1, scratch1, Operand(1)); /* src >>= 1;*/ \
5465 addi(dst, dst, Operand(1)); /* dst++ */ \
5466 bdnz(&loop); \
5467 bind(&done);
5468void MacroAssembler::CountTrailingZerosU32(Register dst, Register src,
5469 Register scratch1, Register scratch2,
5470 RCBit r) {
5471 if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
5472 cnttzw(dst, src, r);
5473 } else {
5474 COUNT_TRAILING_ZEROES_SLOW(32, scratch1, scratch2);
5475 }
5476}
5477
5478void MacroAssembler::CountTrailingZerosU64(Register dst, Register src,
5479 Register scratch1, Register scratch2,
5480 RCBit r) {
5481 if (CpuFeatures::IsSupported(PPC_9_PLUS)) {
5482 cnttzd(dst, src, r);
5483 } else {
5484 COUNT_TRAILING_ZEROES_SLOW(64, scratch1, scratch2);
5485 }
5486}
5487#undef COUNT_TRAILING_ZEROES_SLOW
5488
5489void MacroAssembler::ClearByteU64(Register dst, int byte_idx) {
5490 CHECK(0 <= byte_idx && byte_idx <= 7);
5491 int shift = byte_idx*8;
5492 rldicl(dst, dst, shift, 8);
5493 rldicl(dst, dst, 64-shift, 0);
5494}
5495
5496void MacroAssembler::ReverseBitsU64(Register dst, Register src,
5497 Register scratch1, Register scratch2) {
5498 ByteReverseU64(dst, src);
5499 for (int i = 0; i < 8; i++) {
5500 ReverseBitsInSingleByteU64(dst, dst, scratch1, scratch2, i);
5501 }
5502}
5503
5504void MacroAssembler::ReverseBitsU32(Register dst, Register src,
5505 Register scratch1, Register scratch2) {
5506 ByteReverseU32(dst, src, scratch1);
5507 for (int i = 4; i < 8; i++) {
5508 ReverseBitsInSingleByteU64(dst, dst, scratch1, scratch2, i);
5509 }
5510}
5511
5512// byte_idx=7 refers to least significant byte
5513void MacroAssembler::ReverseBitsInSingleByteU64(Register dst, Register src,
5514 Register scratch1,
5515 Register scratch2,
5516 int byte_idx) {
5517 CHECK(0 <= byte_idx && byte_idx <= 7);
5518 int j = byte_idx;
5519 // zero all bits of scratch1
5520 li(scratch2, Operand(0));
5521 for (int i = 0; i <= 7; i++) {
5522 // zero all bits of scratch1
5523 li(scratch1, Operand(0));
5524 // move bit (j+1)*8-i-1 of src to bit j*8+i of scratch1, erase bits
5525 // (j*8+i+1):end of scratch1
5526 int shift = 7 - (2*i);
5527 if (shift < 0) shift += 64;
5528 rldicr(scratch1, src, shift, j*8+i);
5529 // erase bits start:(j*8-1+i) of scratch1 (inclusive)
5530 rldicl(scratch1, scratch1, 0, j*8+i);
5531 // scratch2 = scratch2|scratch1
5532 orx(scratch2, scratch2, scratch1);
5533 }
5534 // clear jth byte of dst and insert jth byte of scratch2
5535 ClearByteU64(dst, j);
5536 orx(dst, dst, scratch2);
5537}
5538
5539// Calls an API function. Allocates HandleScope, extracts returned value
5540// from handle and propagates exceptions. Clobbers C argument registers
5541// and C caller-saved registers. Restores context. On return removes
5542// (*argc_operand + slots_to_drop_on_return) * kSystemPointerSize
5543// (GCed, includes the call JS arguments space and the additional space
5544// allocated for the fast call).
5545void CallApiFunctionAndReturn(MacroAssembler* masm, bool with_profiling,
5546 Register function_address,
5547 ExternalReference thunk_ref, Register thunk_arg,
5548 int slots_to_drop_on_return,
5549 MemOperand* argc_operand,
5550 MemOperand return_value_operand) {
5551 using ER = ExternalReference;
5552
5553 Isolate* isolate = masm->isolate();
5555 ER::handle_scope_next_address(isolate), no_reg);
5557 ER::handle_scope_limit_address(isolate), no_reg);
5559 ER::handle_scope_level_address(isolate), no_reg);
5560
5561 // Additional parameter is the address of the actual callback.
5562 Register return_value = r3;
5563 Register scratch = ip;
5564 Register scratch2 = r0;
5565
5566 // Allocate HandleScope in callee-saved registers.
5567 // We will need to restore the HandleScope after the call to the API function,
5568 // by allocating it in callee-saved registers it'll be preserved by C code.
5569 Register prev_next_address_reg = r14;
5570 Register prev_limit_reg = r15;
5571 Register prev_level_reg = r16;
5572
5573 // C arguments (kCArgRegs[0/1]) are expected to be initialized outside, so
5574 // this function must not corrupt them (return_value overlaps with
5575 // kCArgRegs[0] but that's ok because we start using it only after the C
5576 // call).
5577 DCHECK(!AreAliased(kCArgRegs[0], kCArgRegs[1], // C args
5578 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
5579 // function_address and thunk_arg might overlap but this function must not
5580 // corrupted them until the call is made (i.e. overlap with return_value is
5581 // fine).
5582 DCHECK(!AreAliased(function_address, // incoming parameters
5583 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
5584 DCHECK(!AreAliased(thunk_arg, // incoming parameters
5585 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
5586 {
5588 "Allocate HandleScope in callee-save registers.");
5589 __ LoadU64(prev_next_address_reg, next_mem_op);
5590 __ LoadU64(prev_limit_reg, limit_mem_op);
5591 __ lwz(prev_level_reg, level_mem_op);
5592 __ addi(scratch, prev_level_reg, Operand(1));
5593 __ stw(scratch, level_mem_op);
5594 }
5595
5596 Label profiler_or_side_effects_check_enabled, done_api_call;
5597 if (with_profiling) {
5598 __ RecordComment("Check if profiler or side effects check is enabled");
5599 __ lbz(scratch,
5600 __ ExternalReferenceAsOperand(IsolateFieldId::kExecutionMode));
5601 __ cmpi(scratch, Operand::Zero());
5602 __ bne(&profiler_or_side_effects_check_enabled);
5603#ifdef V8_RUNTIME_CALL_STATS
5604 __ RecordComment("Check if RCS is enabled");
5605 __ Move(scratch, ER::address_of_runtime_stats_flag());
5606 __ lwz(scratch, MemOperand(scratch, 0));
5607 __ cmpi(scratch, Operand::Zero());
5608 __ bne(&profiler_or_side_effects_check_enabled);
5609#endif // V8_RUNTIME_CALL_STATS
5610 }
5611
5612 __ RecordComment("Call the api function directly.");
5613 __ StoreReturnAddressAndCall(function_address);
5614 __ bind(&done_api_call);
5615
5616 Label propagate_exception;
5617 Label delete_allocated_handles;
5618 Label leave_exit_frame;
5619
5620 // load value from ReturnValue
5621 __ RecordComment("Load the value from ReturnValue");
5622 __ LoadU64(r3, return_value_operand);
5623
5624 {
5626 masm,
5627 "No more valid handles (the result handle was the last one)."
5628 "Restore previous handle scope.");
5629 __ StoreU64(prev_next_address_reg, next_mem_op);
5630 if (v8_flags.debug_code) {
5631 __ lwz(scratch, level_mem_op);
5632 __ subi(scratch, scratch, Operand(1));
5633 __ CmpS64(scratch, prev_level_reg);
5634 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
5635 }
5636 __ stw(prev_level_reg, level_mem_op);
5637 __ LoadU64(scratch, limit_mem_op);
5638 __ CmpS64(scratch, prev_limit_reg);
5639 __ bne(&delete_allocated_handles);
5640 }
5641
5642 __ RecordComment("Leave the API exit frame.");
5643 __ bind(&leave_exit_frame);
5644 Register argc_reg = prev_limit_reg;
5645 if (argc_operand != nullptr) {
5646 // Load the number of stack slots to drop before LeaveExitFrame modifies sp.
5647 __ LoadU64(argc_reg, *argc_operand);
5648 }
5649 __ LeaveExitFrame(scratch);
5650
5651 {
5653 "Check if the function scheduled an exception.");
5654 __ LoadRoot(scratch, RootIndex::kTheHoleValue);
5656 ER::exception_address(isolate), no_reg));
5657 __ CmpS64(scratch, scratch2);
5658 __ bne(&propagate_exception);
5659 }
5660
5661 __ AssertJSAny(return_value, scratch, scratch2,
5662 AbortReason::kAPICallReturnedInvalidObject);
5663
5664 if (argc_operand == nullptr) {
5665 DCHECK_NE(slots_to_drop_on_return, 0);
5666 __ AddS64(sp, sp, Operand(slots_to_drop_on_return * kSystemPointerSize));
5667
5668 } else {
5669 // {argc_operand} was loaded into {argc_reg} above.
5670 __ AddS64(sp, sp, Operand(slots_to_drop_on_return * kSystemPointerSize));
5671 __ ShiftLeftU64(r0, argc_reg, Operand(kSystemPointerSizeLog2));
5672 __ AddS64(sp, sp, r0);
5673 }
5674
5675 __ blr();
5676
5677 if (with_profiling) {
5678 ASM_CODE_COMMENT_STRING(masm, "Call the api function via the thunk.");
5679 __ bind(&profiler_or_side_effects_check_enabled);
5680 // Additional parameter is the address of the actual callback function.
5681 if (thunk_arg.is_valid()) {
5682 MemOperand thunk_arg_mem_op = __ ExternalReferenceAsOperand(
5683 IsolateFieldId::kApiCallbackThunkArgument);
5684 __ StoreU64(thunk_arg, thunk_arg_mem_op);
5685 }
5686 __ Move(scratch, thunk_ref);
5688 __ b(&done_api_call);
5689 }
5690
5691 __ RecordComment("An exception was thrown. Propagate it.");
5692 __ bind(&propagate_exception);
5693 __ TailCallRuntime(Runtime::kPropagateException);
5694
5695 {
5697 masm, "HandleScope limit has changed. Delete allocated extensions.");
5698 __ bind(&delete_allocated_handles);
5699 __ StoreU64(prev_limit_reg, limit_mem_op);
5700 // Save the return value in a callee-save register.
5701 Register saved_result = prev_limit_reg;
5702 __ mr(saved_result, return_value);
5703 __ PrepareCallCFunction(1, scratch);
5704 __ Move(kCArgRegs[0], ER::isolate_address());
5705 __ CallCFunction(ER::delete_handle_scope_extensions(), 1);
5706 __ mr(return_value, saved_result);
5707 __ b(&leave_exit_frame);
5708 }
5709}
5710
5711} // namespace internal
5712} // namespace v8
5713
5714#undef __
5715
5716#endif // V8_TARGET_ARCH_PPC64
friend Zone
Definition asm-types.cc:195
constexpr int kPageSizeBits
#define Assert(condition)
Builtins::Kind kind
Definition builtins.cc:40
static int ActivationFrameAlignment()
bool is_constant_pool_available() const
Definition assembler.h:359
friend class ConstantPoolUnavailableScope
Definition assembler.h:579
EmbeddedObjectIndex AddEmbeddedObject(IndirectHandle< HeapObject > object)
Definition assembler.cc:285
friend class FrameAndConstantPoolScope
Definition assembler.h:578
int AddCodeTarget(IndirectHandle< Code > target)
Definition assembler.cc:267
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
const AssemblerOptions & options() const
Definition assembler.h:339
void set_constant_pool_available(bool available)
Definition assembler.h:512
void mtvsrdd(const Simd128Register rt, const Register ra, const Register rb)
void divwu(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void addi(Register dst, Register src, const Operand &imm)
void patch_pc_address(Register dst, int pc_offset, int return_address_offset)
void rldicr(Register dst, Register src, int sh, int me, RCBit r=LeaveRC)
void divw(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void fctidu(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void mov_label_offset(Register dst, Label *label)
void fcpsgn(const DoubleRegister frt, const DoubleRegister fra, const DoubleRegister frc, RCBit rc=LeaveRC)
void xxspltib(const Simd128Register rt, const Operand &imm)
void mr(Register dst, Register src)
void extsw(Register rs, Register ra, RCBit rc=LeaveRC)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void mflr(Register dst)
void rldicl(Register dst, Register src, int sh, int mb, RCBit r=LeaveRC)
void sradi(Register ra, Register rs, int sh, RCBit r=LeaveRC)
void bne(Register rj, Register rd, int32_t offset)
void fctid(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void mtctr(Register src)
void fctiduz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode, intptr_t value)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void isel(Register rt, Register ra, Register rb, int cb)
void srdi(Register dst, Register src, const Operand &val, RCBit rc=LeaveRC)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void mtfprwz(DoubleRegister dst, Register src)
void blt(Register rj, Register rd, int32_t offset)
void frsp(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void lhz(Register dst, const MemOperand &src)
void ble(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void clrldi(Register dst, Register src, const Operand &val, RCBit rc=LeaveRC)
void cmpw(Operand dst, Immediate src)
void cmp(Register src1, const Operand &src2, Condition cond=al)
void rldimi(Register dst, Register src, int sh, int mb, RCBit r=LeaveRC)
Simd128Register Simd128Register Simd128Register Simd128Register rc
void addis(Register dst, Register src, const Operand &imm)
void srwi(Register dst, Register src, const Operand &val, RCBit rc=LeaveRC)
void xori(Register rd, Register rj, int32_t ui12)
friend class UseScratchRegisterScope
void mulld(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void cmpwi(Register src1, const Operand &src2, CRegister cr=cr0)
void mffprd(Register dst, DoubleRegister src)
void mtlr(Register src)
void shift(Operand dst, Immediate shift_amount, int subcode, int size)
void fcfidu(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void sldi(Register dst, Register src, const Operand &val, RCBit rc=LeaveRC)
static constexpr int kPcLoadDelta
void mtfprwa(DoubleRegister dst, Register src)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void lfd(const DoubleRegister frt, const MemOperand &src)
void xor_(Register dst, int32_t imm32)
void rlwimi(Register ra, Register rs, int sh, int mb, int me, RCBit rc=LeaveRC)
void andi(Register rd, Register rj, int32_t ui12)
void fcmpu(const DoubleRegister fra, const DoubleRegister frb, CRegister cr=cr0)
void fadd(const VRegister &vd, const VRegister &vn, const VRegister &vm)
void stfd(const DoubleRegister frs, const MemOperand &src)
void cmpi(Register src1, const Operand &src2, CRegister cr=cr0)
void cmpli(Register src1, const Operand &src2, CRegister cr=cr0)
void lwz(Register dst, const MemOperand &src)
static constexpr int kMovInstructionsNoConstantPool
void add_label_offset(Register dst, Register base, Label *label, int delta=0)
void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void mfvsrd(const Register ra, const Simd128Register r)
void fsub(const VRegister &vd, const VRegister &vn, const VRegister &vm)
void fctidz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void fcfid(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void cmplwi(Register src1, const Operand &src2, CRegister cr=cr0)
void fcfids(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void fdiv(const VRegister &vd, const VRegister &vn, const VRegister &vm)
void mtvsrd(const Simd128Register rt, const Register ra)
void divd(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void mtfprd(DoubleRegister dst, Register src)
void bge(Register rj, Register rd, int32_t offset)
void vsel(const Condition cond, const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2)
void bunordered(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void ori(Register rd, Register rj, int32_t ui12)
void rlwinm(Register ra, Register rs, int sh, int mb, int me, RCBit rc=LeaveRC)
void fmr(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void mulli(Register dst, Register src, const Operand &imm)
void mffprwz(Register dst, DoubleRegister src)
void lbz(Register dst, const MemOperand &src)
void bkpt(uint32_t imm16)
void mtcrf(Register src, uint8_t FXM)
void divdu(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void rotlwi(Register ra, Register rs, int sh, RCBit r=LeaveRC)
void stw(Register dst, const MemOperand &src)
void fmul(const VRegister &vd, const VRegister &vn, const VRegister &vm)
void subi(Register dst, Register src1, const Operand &src2)
void fcfidus(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void mullw(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void stfdu(const DoubleRegister frs, const MemOperand &src)
void mtfsfi(int bf, int immediate, RCBit rc=LeaveRC)
bool ConstantPoolAccessIsInOverflow() const
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
int SizeOfCodeGeneratedSince(Label *label)
void beq(Register rj, Register rd, int32_t offset)
void bdnz(Label *L, LKBit lk=LeaveLK)
void slwi(Register dst, Register src, const Operand &val, RCBit rc=LeaveRC)
static constexpr Builtin RecordWrite(SaveFPRegsMode fp_mode)
static bool IsIsolateIndependentBuiltin(Tagged< Code > code)
Definition builtins.cc:372
V8_EXPORT_PRIVATE Handle< Code > code_handle(Builtin builtin)
Definition builtins.cc:154
static constexpr Builtin RuntimeCEntry(int result_size, bool switch_to_central_stack=false)
static constexpr Builtin EphemeronKeyBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin IndirectPointerBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin kLastTier0
Definition builtins.h:114
static constexpr Builtin CEntry(int result_size, ArgvMode argv_mode, bool builtin_exit_frame=false, bool switch_to_central_stack=false)
static const int kMarkedForDeoptimizationBit
Definition code.h:456
static constexpr int kConstantPoolOffset
static const int kInvalidContext
Definition contexts.h:578
static V8_INLINE constexpr int SlotOffset(int index)
Definition contexts.h:516
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static constexpr int kCallerSPDisplacement
static V8_EXPORT_PRIVATE ExternalReference address_of_code_pointer_table_base_address()
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr uint32_t kFlagsMaybeHasMaglevCode
static constexpr uint32_t kFlagsTieringStateIsAnyRequested
static constexpr uint32_t kFlagsLogNextExecution
static constexpr uint32_t kFlagsMaybeHasTurbofanCode
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static NEVER_READ_ONLY_SPACE constexpr bool kOnHeapBodyIsContiguous
static const int kExternalPointerTableBasePointerOffset
static constexpr int BuiltinEntrySlotOffset(Builtin id)
static constexpr int real_jslimit_offset()
static IsolateGroup * current()
Builtins * builtins()
Definition isolate.h:1443
bool IsGeneratingEmbeddedBuiltins() const
Definition isolate.h:1897
Address BuiltinEntry(Builtin builtin)
static bool IsAddressableThroughRootRegister(Isolate *isolate, const ExternalReference &reference)
static constexpr bool CanBeImmediate(RootIndex index)
V8_INLINE std::string CommentForOffHeapTrampoline(const char *prefix, Builtin builtin)
static int32_t RootRegisterOffsetForExternalReferenceTableEntry(Isolate *isolate, const ExternalReference &reference)
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
Tagged_t ReadOnlyRootPtr(RootIndex index)
void IndirectLoadConstant(Register destination, Handle< HeapObject > object)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
void IndirectLoadExternalReference(Register destination, ExternalReference reference)
void F32x4ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2, Register scratch3)
void LoadSimd128LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void I16x8DotI8x16S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void MaxF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, DoubleRegister scratch=kScratchDoubleReg)
void ClearByteU64(Register dst, int byte_idx)
void ConvertIntToFloat(Register src, DoubleRegister dst)
void LoadStackLimit(Register destination, StackLimitKind kind)
void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch)
void LoadSimd128Uint64(Simd128Register reg, const MemOperand &mem, Register scratch)
void MultiPushF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs, Register scratch1, Register scratch2, Register location=sp)
void ConvertIntToDouble(Register src, DoubleRegister dst)
void LoadS16LE(Register dst, const MemOperand &mem, Register scratch)
void Call(Register target, Condition cond=al)
void CallJSFunction(Register function_object, uint16_t argument_count)
void S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask)
void I64x2UConvertI32x4High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void CountTrailingZerosU64(Register dst, Register src, Register scratch1=ip, Register scratch2=r0, RCBit r=LeaveRC)
void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void TestCodeIsMarkedForDeoptimization(Register code, Register scratch)
void LoadU64LE(Register dst, const MemOperand &mem, Register scratch)
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void ByteReverseU64(Register dst, Register val, Register=r0)
void I16x8UConvertI8x16Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadU8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void DecompressTaggedSigned(const Register &destination, const MemOperand &field_operand)
void I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2, Simd128Register scratch3)
void Drop(int count, Condition cond=al)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand &me, Register scratch)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void AndU32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=SetRC)
void I32x4UConvertI16x8Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void MovFromFloatResult(DwVfpRegister dst)
void mov(Register rd, Register rj)
void XorU32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=SetRC)
void CompareTagged(Register src1, Register src2, CRegister cr=cr0)
void ModS32(Register dst, Register src, Register value)
void IsObjectType(Register heap_object, Register scratch1, Register scratch2, InstanceType type)
void ModS64(Register dst, Register src, Register value)
void I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void SmiUntag(Register reg, SBit s=LeaveCC)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void TestIfSmi(Register value, Register scratch)
void I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi, Register src_lo, Register scratch)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void MulS32(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTagRange tag_range, Register isolate_root=Register::no_reg())
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
void SubSmiLiteral(Register dst, Register src, Tagged< Smi > smi, Register scratch)
static int CallSizeNotPredictableCodeSize(Address target, RelocInfo::Mode rmode, Condition cond=al)
void MultiPopF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs, Register scratch1, Register scratch2, Register location=sp)
void LoadLane32LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void CopySignF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void PushStandardFrame(Register function_reg)
void StoreF64(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void I8x16ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void CompareRoot(Register obj, RootIndex index)
void I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void LoadV64ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void MovDoubleLowToInt(Register dst, DoubleRegister src)
void MovFromFloatParameter(DwVfpRegister dst)
void StoreU64WithUpdate(Register src, const MemOperand &mem, Register scratch=no_reg)
void StoreU64LE(Register src, const MemOperand &mem, Register scratch)
void LoadF32(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void ModU64(Register dst, Register src, Register value)
void LoadV32ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void Move(Register dst, Tagged< Smi > smi)
void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void CountTrailingZerosU32(Register dst, Register src, Register scratch1=ip, Register scratch2=r0, RCBit r=LeaveRC)
void MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void StoreF32LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
void I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void StoreReturnAddressAndCall(Register target)
void I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
void ZeroExtWord32(Register dst, Register src)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void I16x8Splat(Simd128Register dst, Register src)
void LoadRootRelative(Register destination, int32_t offset) final
void JumpIfSmi(Register value, Label *smi_label)
void StoreSimd128Uint16(Simd128Register reg, const MemOperand &mem, Register scratch)
void LoadPC(Register dst)
void ShiftRightU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void LoadU16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void MultiPopV128(Simd128RegList dregs, Register scratch, Register location=sp)
void CallCodeObject(Register code_object)
void LoadSandboxedPointerField(Register destination, MemOperand field_operand)
void SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void DivS32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void SwapDouble(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void SubAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch=r0)
void LoadS32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
void CheckDebugHook(Register fun, Register new_target, Register expected_parameter_count_or_dispatch_handle, Register actual_parameter_count)
void CountLeadingZerosU32(Register dst, Register src, RCBit r=LeaveRC)
void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scrahc2, Register scratch3, Simd128Register scratch4)
void I32x4SConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void LoadDoubleLiteral(DoubleRegister result, base::Double value, Register scratch)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src)
void F64x2Max(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2)
void StoreLane8LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void LoadSimd128Uint16(Simd128Register reg, const MemOperand &mem, Register scratch)
void SwapFloat32(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void LoadLane64LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadS32LE(Register dst, const MemOperand &mem, Register scratch)
void DecodeSandboxedPointer(Register value)
void MovDoubleHighToInt(Register dst, DoubleRegister src)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void CmpU32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadU16LE(Register dst, const MemOperand &mem, Register scratch)
void MinF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, DoubleRegister scratch=kScratchDoubleReg)
void MultiPopDoubles(DoubleRegList dregs, Register location=sp)
void CompareTaggedRoot(Register with, RootIndex index)
void ModU32(Register dst, Register src, Register value)
void StoreSimd128Uint64(Simd128Register reg, const MemOperand &mem, Register scratch)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Register scratch, Label *target, Label::Distance distance=Label::kFar, Condition condition=Condition::kUnsignedGreaterThanEqual)
void OrU64(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=SetRC)
void LoadSimd128Uint32(Simd128Register reg, const MemOperand &mem, Register scratch)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void SmiTag(Register reg, SBit s=LeaveCC)
void StoreSimd128(Simd128Register src, const MemOperand &mem, Register scratch)
void StoreLane32LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void MovDoubleToInt64(Register dst, DoubleRegister src)
void F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, DoubleRegister scratch2, Simd128Register scratch3)
void PushArray(Register array, Register size, Register scratch, PushArrayOrder order=PushArrayOrder::kNormal)
void I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3)
void ZeroExtHalfWord(Register dst, Register src)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void TestIfInt32(Register value, Register scratch, CRegister cr=cr0)
void MovToFloatResult(DwVfpRegister src)
void AddS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void SetRoundingMode(FPRoundingMode RN)
void I16x8ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void CallJSEntry(Register target)
void SubS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ShiftRightU32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void StoreF64LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void Zero(const MemOperand &dest)
void ShiftRightS32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void AndU64(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=SetRC)
void MovToFloatParameter(DwVfpRegister src)
void MovFloatToInt(Register dst, DoubleRegister src, DoubleRegister scratch)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void I16x8ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void PushCommonFrame(Register marker_reg=no_reg)
void LoadIndirectPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void JumpIfEqual(Register x, int32_t y, Label *dest)
void CallIndirectPointerBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode, IndirectPointerTag tag)
void AndSmiLiteral(Register dst, Register src, Tagged< Smi > smi, Register scratch, RCBit rc=LeaveRC)
int LeaveFrame(StackFrame::Type type)
void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void GetLabelAddress(Register dst, Label *target)
void StoreF32(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void V128AnyTrue(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
void ShiftLeftU32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void SwapSimd128(Simd128Register src, Simd128Register dst, Simd128Register scratch)
void StoreCodePointerField(Register value, MemOperand dst_field_operand)
void CmpU64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadF64(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void I32x4Splat(Simd128Register dst, Register src)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void AddS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
void I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void LoadSimd128(Simd128Register dst, const MemOperand &mem, Register scratch)
void Jump(Register target, Condition cond=al)
void StoreU32(Register src, const MemOperand &mem, Register scratch)
void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void LoadRoot(Register destination, RootIndex index) final
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void I8x16ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void MovInt64ToDouble(DoubleRegister dst, Register src)
void LoadF32LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void LoadU32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void Popcnt32(Register dst, Register src)
void CanonicalizeNaN(const VRegister &dst, const VRegister &src)
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src)
void DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void SwapP(Register src, Register dst, Register scratch)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void CompareRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit)
void S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void I64x2Splat(Simd128Register dst, Register src)
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst)
void LoadU64WithUpdate(Register dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void I32x4UConvertI16x8High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadFromConstantsTable(Register destination, int constant_index) final
void ShiftRightS64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void LoadS8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void CompareObjectTypeRange(Register heap_object, Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void F64x2Min(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2)
void MaybeSaveRegisters(RegList registers)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void LoadTaggedRoot(Register destination, RootIndex index)
void I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void SubS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
void StoreIndirectPointerField(Register value, MemOperand dst_field_operand)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void LoadLane8LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
void ConvertDoubleToUnsignedInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void LoadCompressedMap(Register dst, Register object)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void CallBuiltin(Builtin builtin, Condition cond=al)
void CmplSmiLiteral(Register src1, Tagged< Smi > smi, Register scratch, CRegister cr=cr0)
void DivU32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void Popcnt64(Register dst, Register src)
void I64x2Abs(QwNeonRegister dst, QwNeonRegister src)
void AddSmiLiteral(Register dst, Register src, Tagged< Smi > smi, Register scratch)
void I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void F64x2ConvertLowI32x4S(QwNeonRegister dst, QwNeonRegister src)
void OrU32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=SetRC)
void MulS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode)
void ShiftLeftU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void LoadIntLiteral(Register dst, int value)
void CmpS64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void ReverseBitsU64(Register dst, Register src, Register scratch1, Register scratch2)
void LoadCodePointerField(Register destination, MemOperand field_operand)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void LoadSmiLiteral(Register dst, Tagged< Smi > smi)
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void Check(Condition cond, AbortReason reason)
void DivU64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2, Register scratch3, Register scratch4)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void LoadU32LE(Register dst, const MemOperand &mem, Register scratch)
void MovIntToFloat(DoubleRegister dst, Register src, Register scratch)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void StoreTrustedPointerField(Register value, MemOperand dst_field_operand)
void I32x4DotI8x16AddS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register src3)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void StoreSandboxedPointerField(Register value, MemOperand dst_field_operand)
void F64x2ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2)
void MovUnsignedIntToDouble(DoubleRegister dst, Register src, Register scratch)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void ReverseBitsU32(Register dst, Register src, Register scratch1, Register scratch2)
void I16x8UConvertI8x16High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void CmpS32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void StoreU64(Register src, const MemOperand &mem, Register scratch=no_reg)
void XorU64(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=SetRC)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void MultiPushV128(Simd128RegList dregs, Register scratch, Register location=sp)
void JumpIfLessThan(Register x, int32_t y, Label *dest)
void ReverseBitsInSingleByteU64(Register dst, Register src, Register scratch1, Register scratch2, int byte_idx)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void TestBitMask(Register value, uintptr_t mask, Register scratch=r0)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void I8x16BitMask(Register dst, VRegister src, VRegister temp=NoVReg)
void DivS64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void CompareInstanceTypeRange(Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadIsolateField(Register dst, IsolateFieldId id)
void StoreSimd128Uint8(Simd128Register reg, const MemOperand &mem, Register scratch)
void LoadU64(Register dst, const MemOperand &mem, Register scratch=no_reg)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void MaybeRestoreRegisters(RegList registers)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void I64x2UConvertI32x4Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void MovIntToDouble(DoubleRegister dst, Register src, Register scratch)
void I8x16Splat(Simd128Register dst, Register src)
void CmpSmiLiteral(Register src1, Tagged< Smi > smi, Register scratch, CRegister cr=cr0)
void LoadF64LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void F32x4Splat(Simd128Register dst, DoubleRegister src, DoubleRegister scratch1, Register scratch2)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void ByteReverseU16(Register dst, Register val, Register scratch)
void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void LoadSimd128Uint8(Simd128Register reg, const MemOperand &mem, Register scratch)
void ZeroExtByte(Register dst, Register src)
void StubPrologue(StackFrame::Type type)
void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2)
void StoreU32LE(Register src, const MemOperand &mem, Register scratch)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void StoreRootRelative(int32_t offset, Register value) final
void F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, Simd128Register scratch2)
void CountLeadingZerosU64(Register dst, Register src, RCBit r=LeaveRC)
void LoadTaggedSignedField(const Register &destination, const MemOperand &field_operand)
void LoadMap(Register destination, Register object)
void TailCallRuntime(Runtime::FunctionId fid)
void TestBit(Register value, int bitNumber, Register scratch=r0)
void AddAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch=r0)
void LoadNativeContextSlot(Register dst, int index)
void I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void StoreSimd128Uint32(Simd128Register reg, const MemOperand &mem, Register scratch)
void ConvertInt64ToDouble(Register src, DoubleRegister double_dst)
void StoreSimd128LE(Simd128Register src, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(Register code_target_address, Register scratch1, Register scratch2)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst)
void F64x2Splat(Simd128Register dst, DoubleRegister src, Register scratch)
void I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void LoadS16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void StoreLane16LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void ByteReverseU32(Register dst, Register val, Register scratch)
void ConvertDoubleToInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void StoreLane64LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void I8x16Swizzle(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Simd128Register scratch3)
void LoadLane16LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void MultiPushDoubles(DoubleRegList dregs, Register location=sp)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr intptr_t FlagsOffset()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static V8_INLINE Operand Zero()
static constexpr DwVfpRegister from_code(int8_t code)
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr Register no_reg()
static constexpr bool IsCompressedEmbeddedObject(Mode mode)
Definition reloc-info.h:206
static constexpr bool IsCodeTarget(Mode mode)
Definition reloc-info.h:196
static constexpr bool IsFullEmbeddedObject(Mode mode)
Definition reloc-info.h:203
static constexpr bool IsReadOnly(RootIndex root_index)
Definition roots.h:623
static constexpr bool IsImmortalImmovable(RootIndex root_index)
Definition roots.h:616
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
Definition runtime.cc:350
void S128Not(XMMRegister dst, XMMRegister src, XMMRegister scratch)
void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src, XMMRegister tmp)
void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src, XMMRegister scratch, Register tmp)
void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src, Register scratch)
void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src, Register scratch)
static SlotDescriptor ForCodePointerSlot()
Definition assembler.h:311
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static constexpr Tagged< Smi > zero()
Definition smi.h:99
static constexpr int32_t TypeToMarker(Type type)
Definition frames.h:196
static bool IsJavaScript(Type t)
Definition frames.h:284
static constexpr int kFixedFrameSizeFromFp
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode)
#define EMIT_SIMD_UNOP(name)
#define EMIT_SIMD_QFM(name)
#define EMIT_SIMD_ALL_TRUE(name)
#define EMIT_SIMD_BINOP(name)
#define EMIT_SIMD_SHIFT(name)
#define EMIT_SIMD_EXT_MUL(name)
#define ASM_CODE_COMMENT_STRING(asm,...)
Definition assembler.h:618
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
#define V8_ENABLE_LEAPTIERING_BOOL
Definition globals.h:151
#define COMPRESS_POINTERS_BOOL
Definition globals.h:99
#define V8_EMBEDDED_CONSTANT_POOL_BOOL
Definition globals.h:81
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
#define ABI_CALL_VIA_IP
#define ABI_USES_FUNCTION_DESCRIPTORS
#define ABI_TOC_REGISTER
DirectHandle< Object > new_target
Definition execution.cc:75
int32_t offset
TNode< Object > target
TNode< Object > receiver
RoundingMode rounding_mode
ZoneVector< RpoNumber > & result
int x
uint32_t const mask
#define SIMD_BITMASK_LIST(V)
#define SIMD_UNOP_LIST(V)
#define SIMD_ALL_TRUE_LIST(V)
#define SIMD_BINOP_LIST(V)
#define SIMD_QFM_LIST(V)
#define ClearRightImm
#define SIMD_SHIFT_LIST(V)
#define SIMD_EXT_MUL_LIST(V)
SmiCheck
InvokeType
SetIsolateDataSlots
JumpMode
RegListBase< RegisterT > registers
InstructionOperand destination
int r
Definition mul-fft.cc:298
STL namespace.
int int32_t
Definition unicode.cc:40
signed short int16_t
Definition unicode.cc:38
constexpr bool IsPowerOfTwo(T value)
Definition bits.h:187
constexpr int WhichPowerOfTwo(T value)
Definition bits.h:195
V8_INLINE Dest bit_cast(Source const &source)
Definition macros.h:95
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
Definition bounds.h:20
V8_INLINE constexpr std::optional< RootIndex > UniqueMapOfInstanceType(InstanceType type)
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr Register kRootRegister
constexpr int kCodePointerTableEntrySizeLog2
constexpr VFPRoundingMode kRoundToNearest
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kSimd128Size
Definition globals.h:706
const int kNumRequiredStackFrameSlots
DwVfpRegister DoubleRegister
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
constexpr Simd128Register kSimd128RegZero
constexpr DoubleRegister kScratchDoubleReg
const int kSmiTagSize
Definition v8-internal.h:87
constexpr Simd128Register kScratchSimd128Reg
RegListBase< Register > RegList
Definition reglist-arm.h:14
constexpr bool CodeKindCanTierUp(CodeKind kind)
Definition code-kind.h:95
constexpr Register kJavaScriptCallTargetRegister
constexpr int kCodePointerTableEntryCodeObjectOffset
const int kStackFrameExtraParamSlot
constexpr int kTrustedPointerTableEntrySizeLog2
const Address kWeakHeapObjectMask
Definition globals.h:967
constexpr Register kJavaScriptCallArgCountRegister
Address Tagged_t
Definition globals.h:547
constexpr int kSystemPointerSizeLog2
Definition globals.h:494
base::StrongAlias< JSDispatchHandleAliasTag, uint32_t > JSDispatchHandle
Definition globals.h:557
const int kNumCallerSavedDoubles
Definition reglist-ppc.h:53
static const int kRegisterPassedArguments
Flag flags[]
Definition flags.cc:3797
constexpr Register kConstantPoolRegister
constexpr int L
QwNeonRegister Simd128Register
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
Definition globals.h:410
const char * GetAbortReason(AbortReason reason)
static constexpr int kMaxCParameters
constexpr uint32_t kZapValue
Definition globals.h:1005
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
constexpr uint32_t kTrustedPointerHandleShift
RegListBase< Simd128Register > Simd128RegList
Definition reglist-ppc.h:16
constexpr uint32_t kCodePointerHandleShift
const int kHeapObjectTag
Definition v8-internal.h:72
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
@ kExternalPointerNullTag
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
const DoubleRegList kCallerSavedDoubles
Definition reglist-ppc.h:46
const RegList kJSCallerSaved
Definition reglist-arm.h:23
Register ToRegister(int num)
constexpr bool SmiValuesAre32Bits()
constexpr Register kJavaScriptCallCodeStartRegister
constexpr int kJSDispatchTableEntrySizeLog2
Definition globals.h:562
constexpr Register r11
constexpr Register kPtrComprCageBaseRegister
constexpr VFPRoundingMode kRoundToZero
const Simd128RegList kCallerSavedSimd128s
Definition reglist-ppc.h:49
constexpr VFPRoundingMode RN
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
constexpr uint8_t kInstrSize
const int kSmiTag
Definition v8-internal.h:86
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr Register cp
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
Definition globals.h:407
constexpr uint32_t kCodePointerHandleMarker
const uint32_t kClearedWeakHeapObjectLower32
Definition globals.h:981
Condition to_condition(Condition cond)
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
Local< T > Handle
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
#define V8_STATIC_ROOTS_BOOL
Definition v8config.h:1001