8#if V8_TARGET_ARCH_PPC64
37#define __ ACCESS_MASM(masm)
47#if V8_ENABLE_WEBASSEMBLY
48constexpr int kStackSavedSavedFPSizeInBytes =
52constexpr int kStackSavedSavedFPSizeInBytes =
61 Register exclusion3)
const {
64 RegList exclusions = {exclusion1, exclusion2, exclusion3};
69 bytes += kStackSavedSavedFPSizeInBytes;
76 Register scratch2, Register exclusion1,
77 Register exclusion2, Register exclusion3) {
80 RegList exclusions = {exclusion1, exclusion2, exclusion3};
88 bytes += kStackSavedSavedFPSizeInBytes;
95 Register scratch2, Register exclusion1,
96 Register exclusion2, Register exclusion3) {
101 bytes += kStackSavedSavedFPSizeInBytes;
104 RegList exclusions = {exclusion1, exclusion2, exclusion3};
126 int current_instr_code_object_relative_offset =
133 current_instr_code_object_relative_offset -=
kInstrSize;
135 SubS64(dest, dest, Operand(current_instr_code_object_relative_offset));
144 int constant_index) {
173 ExternalReference reference, Register scratch) {
175 if (reference.IsIsolateFieldId()) {
178 if (
options().enable_root_relative_access) {
185 if (
options().isolate_independent_code) {
203 Move(scratch, reference);
213 mov(ip, Operand(target, rmode));
223 Jump(
static_cast<intptr_t
>(target), rmode, cond, cr);
233 if (
isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
238 Jump(
static_cast<intptr_t
>(target_index), rmode, cond, cr);
244 Move(scratch, reference);
284 mov(ip, Operand(target, rmode));
297 if (
isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
302 Call(
static_cast<Address>(target_index), rmode, cond);
309 switch (
options().builtin_call_jump_mode) {
329 if (
options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
352 switch (
options().builtin_call_jump_mode) {
372 if (
options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
397 add(sp, sp, scratch);
405 uint64_t frame_alignment_mask = ~(
static_cast<uint64_t
>(frame_alignment) - 1);
406 AndU64(sp, sp, Operand(frame_alignment_mask));
428 mov(r0, Operand(smi));
433 Register scratch2, PushArrayOrder order) {
440 add(scratch, array, scratch);
475 mov(dst, Operand(
static_cast<int>(index), rmode));
478 mov(dst, Operand(value.address(), rmode));
484 if (reference.IsIsolateFieldId()) {
486 Operand(reference.offset_from_root_register()));
489 if (
options().isolate_independent_code) {
497 CHECK(!reference.IsIsolateFieldId());
498 mov(dst, Operand(reference));
519 int16_t num_to_push = regs.Count();
522 subi(location, location, Operand(stack_offset));
524 if ((regs.bits() & (1 <<
i)) != 0) {
535 if ((regs.bits() & (1 <<
i)) != 0) {
540 addi(location, location, Operand(stack_offset));
544 int16_t num_to_push = dregs.Count();
547 subi(location, location, Operand(stack_offset));
549 if ((dregs.bits() & (1 <<
i)) != 0) {
559 int16_t num_to_push = simd_regs.Count();
562 subi(location, location, Operand(stack_offset));
564 if ((simd_regs.bits() & (1 <<
i)) != 0) {
576 if ((dregs.bits() & (1 <<
i)) != 0) {
582 addi(location, location, Operand(stack_offset));
590 if ((simd_regs.bits() & (1 <<
i)) != 0) {
596 addi(location, location, Operand(stack_offset));
601 Register scratch1, Register scratch2,
604#if V8_ENABLE_WEBASSEMBLY
605 bool generating_bultins =
607 if (generating_bultins) {
612 Label push_empty_simd, simd_pushed;
613 Move(scratch1, ExternalReference::supports_wasm_simd_128_address());
616 ble(&push_empty_simd);
619 bind(&push_empty_simd);
623 Operand(-
static_cast<int8_t
>(simd_regs.Count()) *
kSimd128Size));
630 Operand(-
static_cast<int8_t
>(simd_regs.Count()) *
kSimd128Size));
638 Register scratch1, Register scratch2,
640#if V8_ENABLE_WEBASSEMBLY
641 bool generating_bultins =
643 if (generating_bultins) {
644 Label pop_empty_simd, simd_popped;
645 Move(scratch1, ExternalReference::supports_wasm_simd_128_address());
648 ble(&pop_empty_simd);
651 bind(&pop_empty_simd);
653 Operand(
static_cast<int8_t
>(simd_regs.Count()) *
kSimd128Size));
660 Operand(
static_cast<int8_t
>(simd_regs.Count()) *
kSimd128Size));
689 const Register& scratch) {
710 const Register& scratch) {
713 StoreU32(value, dst_field_operand, scratch);
716 StoreU64(value, dst_field_operand, scratch);
767 Register value, Register slot_address,
770 SmiCheck smi_check, SlotDescriptor slot) {
808#ifdef V8_ENABLE_SANDBOX
809 ShiftRightU64(value, value, Operand(kSandboxedPointerShift));
820#ifdef V8_ENABLE_SANDBOX
829 Register value,
const MemOperand& dst_field_operand, Register scratch) {
831#ifdef V8_ENABLE_SANDBOX
833 Register scratch2 = temps.Acquire();
836 ShiftLeftU64(scratch2, scratch2, Operand(kSandboxedPointerShift));
837 StoreU64(scratch2, dst_field_operand, scratch);
846 Register isolate_root,
850#ifdef V8_ENABLE_SANDBOX
854 Register external_table = temps.Acquire();
856 if (isolate_root ==
no_reg) {
862 IsolateData::external_pointer_table_offset() +
868 Operand(kExternalPointerTableEntrySizeLog2));
870 mov(scratch, Operand(~tag));
881#ifdef V8_ENABLE_SANDBOX
891#ifdef V8_ENABLE_SANDBOX
899 Register scratch, Label* target,
902 CHECK(
cc == Condition::kUnsignedLessThan ||
903 cc == Condition::kUnsignedGreaterThanEqual);
909 LAST_JS_RECEIVER_TYPE);
913 FIRST_PRIMITIVE_HEAP_OBJECT_TYPE,
914 LAST_PRIMITIVE_HEAP_OBJECT_TYPE);
916 Abort(AbortReason::kInvalidReceiver);
924 Register scratch2 = temps.Acquire();
930 static_assert(LAST_JS_RECEIVER_TYPE ==
LAST_TYPE);
940#ifdef V8_ENABLE_SANDBOX
954#ifdef V8_ENABLE_SANDBOX
957 Register scratch2 = temps.Acquire();
961 FieldMemOperand(value, ExposedTrustedObject::kSelfIndirectPointerOffset),
963 StoreU32(scratch2, dst_field_operand, scratch);
969#ifdef V8_ENABLE_SANDBOX
970void MacroAssembler::ResolveIndirectPointerHandle(Register
destination,
981 Label is_trusted_pointer_handle, done;
984 beq(&is_trusted_pointer_handle, cr0);
987 bind(&is_trusted_pointer_handle);
991 }
else if (tag == kCodeIndirectPointerTag) {
998void MacroAssembler::ResolveTrustedPointerHandle(Register
destination,
1002 DCHECK_NE(tag, kCodeIndirectPointerTag);
1007 Move(table, ExternalReference::trusted_pointer_table_base_address(
isolate()));
1017void MacroAssembler::ResolveCodePointerHandle(Register
destination,
1023 LoadCodePointerTableBase(table);
1035void MacroAssembler::LoadCodeEntrypointViaCodePointer(Register
destination,
1044 LoadCodePointerTableBase(table);
1052void MacroAssembler::LoadCodePointerTableBase(Register
destination) {
1053#ifdef V8_COMPRESS_POINTERS_IN_MULTIPLE_CAGES
1057 ExternalReference::code_pointer_table_base_address(
isolate()));
1069 ExternalReference::global_code_pointer_table_base_address());
1101 Register slot_address,
1117 pop(slot_address_parameter);
1118 pop(object_parameter);
1125 Register slot_address,
1132 object, slot_address);
1141 DCHECK(!
AreAliased(object_parameter, slot_address_parameter, tag_parameter));
1148 pop(slot_address_parameter);
1149 pop(object_parameter);
1151 mov(tag_parameter, Operand(tag));
1158 Register slot_address,
1172 pop(slot_address_parameter);
1173 pop(object_parameter);
1187#if V8_ENABLE_WEBASSEMBLY
1188 if (mode == StubCallMode::kCallWasmRuntimeStub) {
1207 SlotDescriptor slot) {
1218 if (slot.contains_indirect_pointer()) {
1220 slot.indirect_pointer_tag(), scratch);
1222 DCHECK(slot.contains_direct_pointer());
1225 CmpS64(value_check, value);
1226 Check(
eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
1229 if (
v8_flags.disable_write_barriers) {
1253 if (slot.contains_direct_pointer()) {
1257 DCHECK(slot.contains_indirect_pointer());
1259 slot.indirect_pointer_tag());
1282 if (marker_reg.is_valid()) {
1290 if (marker_reg.is_valid()) {
1291 Push(r0, fp, marker_reg);
1305 if (function_reg.is_valid()) {
1313 if (function_reg.is_valid()) {
1314 Push(r0, fp,
cp, function_reg);
1367 fcfid(double_dst, double_dst);
1373 fcfidus(double_dst, double_dst);
1379 fcfidu(double_dst, double_dst);
1385 fcfids(double_dst, double_dst);
1393 fctidz(double_dst, double_input);
1396 fctid(double_dst, double_input);
1408 fctiduz(double_dst, double_input);
1411 fctidu(double_dst, double_input);
1419 Register code_target_address, Register scratch1, Register scratch2) {
1423#ifdef V8_ENABLE_SANDBOX
1424 LoadCodeEntrypointViaCodePointer(
1426 FieldMemOperand(code_target_address, Code::kSelfIndirectPointerOffset),
1436 add(scratch2, scratch1, scratch2);
1438 FieldMemOperand(code_target_address, Code::kConstantPoolOffsetOffset),
1500 bool load_constant_pool_pointer_reg) {
1518#if V8_ENABLE_WEBASSEMBLY
1564 DCHECK(frame_type == StackFrame::EXIT ||
1565 frame_type == StackFrame::BUILTIN_EXIT ||
1566 frame_type == StackFrame::API_ACCESSOR_EXIT ||
1567 frame_type == StackFrame::API_CALLBACK_EXIT);
1569 using ER = ExternalReference;
1595 ER c_entry_fp_address =
1596 ER::Create(IsolateAddressId::kCEntryFPAddress,
isolate());
1599 ER context_address = ER::Create(IsolateAddressId::kContextAddress,
isolate());
1624#if !defined(USE_SIMULATOR)
1635 return v8_flags.sim_stack_alignment;
1642 using ER = ExternalReference;
1645 ER context_address = ER::Create(IsolateAddressId::kContextAddress,
isolate());
1654 ER c_entry_fp_address =
1655 ER::Create(IsolateAddressId::kCEntryFPAddress,
isolate());
1676 : IsolateData::jslimit_offset();
1682 Label* stack_overflow) {
1689 sub(scratch, sp, scratch);
1693 ble(stack_overflow);
1697 Register actual_parameter_count,
1699 Label regular_invoke;
1706 DCHECK_EQ(expected_parameter_count, r5);
1710 sub(expected_parameter_count, expected_parameter_count,
1712 ble(®ular_invoke, cr0);
1714 Label stack_overflow;
1728 mr(r0, actual_parameter_count);
1741 LoadRoot(scratch, RootIndex::kUndefinedValue);
1743 mtctr(expected_parameter_count);
1752 bind(&stack_overflow);
1760 bind(®ular_invoke);
1764 Register expected_parameter_count,
1765 Register actual_parameter_count) {
1768 ExternalReference debug_hook_active =
1769 ExternalReference::debug_hook_on_function_call_address(
isolate());
1770 Move(r7, debug_hook_active);
1782 SmiTag(expected_parameter_count);
1783 Push(expected_parameter_count);
1785 SmiTag(actual_parameter_count);
1786 Push(actual_parameter_count);
1798 Pop(actual_parameter_count);
1801 Pop(expected_parameter_count);
1802 SmiUntag(expected_parameter_count);
1808 Register expected_parameter_count,
1809 Register actual_parameter_count,
1818 actual_parameter_count);
1822 LoadRoot(r6, RootIndex::kUndefinedValue);
1825 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
1829 constexpr int unused_argument_count = 0;
1841 Register fun, Register
new_target, Register actual_parameter_count,
1853 temp_reg,
FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
1857 SharedFunctionInfo::kFormalParameterCountOffset));
1864 Register expected_parameter_count,
1865 Register actual_parameter_count,
1877 actual_parameter_count, type);
1910#if V8_STATIC_ROOTS_BOOL
1911void MacroAssembler::CompareInstanceTypeWithUniqueCompressedMap(
1913 std::optional<RootIndex> expected =
1927 mov(scratch, Operand(expected_ptr));
1931void MacroAssembler::IsObjectTypeFast(Register
object,
1932 Register compressed_map_scratch,
1937 CompareInstanceTypeWithUniqueCompressedMap(compressed_map_scratch,
1947#if V8_STATIC_ROOTS_BOOL
1949 DCHECK((scratch1 != scratch2) || (scratch1 != r0));
1951 CompareInstanceTypeWithUniqueCompressedMap(
1952 scratch1, scratch1 != scratch2 ? scratch2 : r0, type);
1969 Register type_reg, Register scratch,
1979 static_assert(Map::kInstanceTypeOffset < 4096);
1982 cmpi(type_reg, Operand(type));
1986 unsigned lower_limit,
unsigned higher_limit) {
1989 if (lower_limit != 0) {
1990 mov(scratch, Operand(lower_limit));
1991 sub(scratch, value, scratch);
1992 cmpli(scratch, Operand(higher_limit - lower_limit));
1994 mov(scratch, Operand(higher_limit));
2005 CompareRange(type_reg, scratch, lower_limit, higher_limit);
2045 Register overflow_dst,
2047 DCHECK(dst != overflow_dst);
2049 DCHECK(overflow_dst != scratch);
2050 DCHECK(overflow_dst != left);
2051 DCHECK(overflow_dst != right);
2053 bool left_is_right = left == right;
2059 add(dst, left, right);
2060 xor_(overflow_dst, dst, scratch, xorRC);
2061 if (!left_is_right)
xor_(scratch, dst, right);
2062 }
else if (dst == right) {
2064 add(dst, left, right);
2065 xor_(overflow_dst, dst, left, xorRC);
2066 if (!left_is_right)
xor_(scratch, dst, scratch);
2068 add(dst, left, right);
2069 xor_(overflow_dst, dst, left, xorRC);
2070 if (!left_is_right)
xor_(scratch, dst, right);
2072 if (!left_is_right)
and_(overflow_dst, scratch, overflow_dst,
SetRC);
2077 Register overflow_dst,
2080 DCHECK(dst != overflow_dst);
2082 DCHECK(overflow_dst != scratch);
2083 DCHECK(overflow_dst != left);
2088 original_left = overflow_dst;
2089 mr(original_left, left);
2091 AddS64(dst, left, Operand(right), scratch);
2092 xor_(overflow_dst, dst, original_left);
2094 and_(overflow_dst, overflow_dst, dst,
SetRC);
2096 andc(overflow_dst, overflow_dst, dst,
SetRC);
2102 Register overflow_dst,
2104 DCHECK(dst != overflow_dst);
2106 DCHECK(overflow_dst != scratch);
2107 DCHECK(overflow_dst != left);
2108 DCHECK(overflow_dst != right);
2113 sub(dst, left, right);
2114 xor_(overflow_dst, dst, scratch);
2115 xor_(scratch, scratch, right);
2116 and_(overflow_dst, overflow_dst, scratch,
SetRC);
2117 }
else if (dst == right) {
2119 sub(dst, left, right);
2120 xor_(overflow_dst, dst, left);
2121 xor_(scratch, left, scratch);
2122 and_(overflow_dst, overflow_dst, scratch,
SetRC);
2124 sub(dst, left, right);
2125 xor_(overflow_dst, dst, left);
2126 xor_(scratch, left, right);
2127 and_(overflow_dst, scratch, overflow_dst,
SetRC);
2133 Label return_nan, done;
2136 xsmindp(dst, lhs, rhs);
2140 fadd(dst, lhs, rhs);
2146 Label return_nan, done;
2149 xsmaxdp(dst, lhs, rhs);
2153 fadd(dst, lhs, rhs);
2158 unsigned lower_limit,
2159 unsigned higher_limit,
2160 Label* on_in_range) {
2161 CompareRange(value, scratch, lower_limit, higher_limit);
2179#if V8_ENABLE_WEBASSEMBLY
2180 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
2212#ifndef V8_ENABLE_LEAPTIERING
2214void TailCallOptimizedCodeSlot(MacroAssembler* masm,
2215 Register optimized_code_entry,
2225 Label heal_optimized_code_slot;
2230 &heal_optimized_code_slot);
2234 optimized_code_entry,
2235 FieldMemOperand(optimized_code_entry, CodeWrapper::kCodeOffset), scratch);
2243 __ bne(&heal_optimized_code_slot, cr0);
2257 __ bind(&heal_optimized_code_slot);
2265#ifdef V8_ENABLE_DEBUG_CODE
2269 Assert(
eq, AbortReason::kExpectedFeedbackCell);
2275 Assert(
eq, AbortReason::kExpectedFeedbackVector);
2283 Register optimized_code, Register closure, Register scratch1,
2284 Register slot_address) {
2285#ifdef V8_ENABLE_LEAPTIERING
2296 mr(value, optimized_code);
2332#ifndef V8_ENABLE_LEAPTIERING
2337 Register flags, Register feedback_vector,
CodeKind current_code_kind,
2338 Label* flags_need_processing) {
2347 if (current_code_kind != CodeKind::MAGLEV) {
2350 CHECK(is_uint16(kFlagsMask));
2351 mov(r0, Operand(kFlagsMask));
2353 bne(flags_need_processing, cr0);
2357 Register flags, Register feedback_vector) {
2359 Label maybe_has_optimized_code, maybe_needs_logging;
2362 beq(&maybe_needs_logging, cr0);
2366 bind(&maybe_needs_logging);
2367 TestBitMask(flags, FeedbackVector::LogNextExecutionBit::kMask, r0);
2368 beq(&maybe_has_optimized_code, cr0);
2371 bind(&maybe_has_optimized_code);
2375 FeedbackVector::kMaybeOptimizedCodeOffset),
2377 TailCallOptimizedCodeSlot(
this, optimized_code_entry, r9);
2383 int num_arguments) {
2389 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2395 mov(r3, Operand(num_arguments));
2397 bool switch_to_central_stack =
options().is_wasm;
2404 if (function->nargs >= 0) {
2405 mov(r3, Operand(function->nargs));
2411 bool builtin_exit_frame) {
2417 Label* target_if_cleared) {
2419 beq(target_if_cleared);
2427 Register scratch2) {
2429 if (
v8_flags.native_code_counters && counter->Enabled()) {
2435 addi(scratch1, scratch1, Operand(value));
2442 Register scratch2) {
2444 if (
v8_flags.native_code_counters && counter->Enabled()) {
2450 subi(scratch1, scratch1, Operand(value));
2479 mov(r3, Operand(
static_cast<int>(reason)));
2485 Move(dst, ExternalReference::abort_with_reason());
2518 Register scratch, Label* fbv_undef) {
2529 CmpS32(scratch, Operand(FEEDBACK_VECTOR_TYPE), r0);
2533 LoadRoot(dst, RootIndex::kUndefinedValue);
2549 FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset),
2554#ifdef V8_ENABLE_DEBUG_CODE
2563 Check(
ne, AbortReason::kOperandIsASmi, cr0);
2571 Check(
eq, AbortReason::kOperandIsNotASmi, cr0);
2579 Check(
ne, AbortReason::kOperandIsASmiAndNotAConstructor, cr0);
2583 andi(
object,
object, Operand(Map::Bits1::IsConstructorBit::kMask));
2585 Check(
ne, AbortReason::kOperandIsNotAConstructor, cr0);
2593 Check(
ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
2597 LAST_JS_FUNCTION_TYPE);
2599 Check(
le, AbortReason::kOperandIsNotAFunction);
2608 Check(
ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
2614 Check(
le, AbortReason::kOperandIsNotACallableFunction);
2621 Check(
ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
2625 Check(
eq, AbortReason::kOperandIsNotABoundFunction);
2632 Check(
ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
2642 FIRST_JS_GENERATOR_OBJECT_TYPE,
2643 LAST_JS_GENERATOR_OBJECT_TYPE);
2646 Check(
le, AbortReason::kOperandIsNotAGeneratorObject);
2652 Label done_checking;
2655 beq(&done_checking);
2658 Assert(
eq, AbortReason::kExpectedUndefinedOrCell);
2659 bind(&done_checking);
2698 Abort(abort_reason);
2706 int num_double_arguments) {
2707 int stack_passed_words = 0;
2709 stack_passed_words +=
2716 return stack_passed_words;
2720 int num_double_arguments,
2723 int stack_passed_arguments =
2740 stack_space += stack_passed_arguments;
2770 int num_reg_arguments,
2771 int num_double_arguments,
2773 bool has_function_descriptor) {
2775 return CallCFunction(ip, num_reg_arguments, num_double_arguments,
2776 set_isolate_data_slots, has_function_descriptor);
2780 int num_double_arguments,
2782 bool has_function_descriptor) {
2851 int stack_passed_arguments =
2860 return call_pc_offset;
2865 bool has_function_descriptor) {
2866 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
2867 has_function_descriptor);
2872 bool has_function_descriptor) {
2873 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
2874 has_function_descriptor);
2890 bne(condition_met, cr0);
2893 beq(condition_met, cr0);
2909 mov(dst, Operand(value));
2913 mov(dst, Operand(smi));
2917 base::Double value, Register scratch) {
2936 litVal.dval = value.AsUint64();
2938 mov(scratch, Operand(litVal.ival));
2964 rldimi(scratch, src_lo, 0, 32);
2971 rldimi(scratch, src, 0, 32);
2978 rldimi(scratch, src, 32, 0);
2988 srdi(dst, dst, Operand(32));
2999 xscvspdpn(dst, dst);
3004 xscvdpspn(scratch, src);
3010 add(dst, src, value, s,
r);
3016 addi(dst, src, value);
3018 mov(scratch, value);
3019 add(dst, src, scratch, s,
r);
3025 sub(dst, src, value, s,
r);
3031 subi(dst, src, value);
3033 mov(scratch, value);
3034 sub(dst, src, scratch, s,
r);
3045 Register scratch,
RCBit r) {
3057 Register scratch,
RCBit r) {
3065 mulli(dst, src, value);
3067 mov(scratch, value);
3068 mulld(dst, src, scratch, s,
r);
3074 mulld(dst, src, value, s,
r);
3079 MulS64(dst, src, value, scratch, s,
r);
3085 MulS64(dst, src, value, s,
r);
3091 divd(dst, src, value, s,
r);
3096 divdu(dst, src, value, s,
r);
3101 divw(dst, src, value, s,
r);
3106 divwu(dst, src, value, s,
r);
3112 modsd(dst, src, value);
3116 divd(scratch, src, value);
3117 mulld(scratch, scratch, value);
3118 sub(dst, src, scratch);
3125 modud(dst, src, value);
3129 divdu(scratch, src, value);
3130 mulld(scratch, scratch, value);
3131 sub(dst, src, scratch);
3138 modsw(dst, src, value);
3142 divw(scratch, src, value);
3143 mullw(scratch, scratch, value);
3144 sub(dst, src, scratch);
3151 moduw(dst, src, value);
3155 divwu(scratch, src, value);
3156 mullw(scratch, scratch, value);
3157 sub(dst, src, scratch);
3164 Register scratch,
RCBit r) {
3165 if (is_uint16(value.immediate()) &&
r ==
SetRC) {
3166 andi(dst, src, value);
3168 mov(scratch, value);
3169 and_(dst, src, scratch,
r);
3175 and_(dst, src, value,
r);
3179 Register scratch,
RCBit r) {
3180 if (is_int16(value.immediate()) &&
r ==
LeaveRC) {
3181 ori(dst, src, value);
3183 mov(scratch, value);
3184 orx(dst, src, scratch,
r);
3190 orx(dst, src, value,
r);
3194 Register scratch,
RCBit r) {
3195 if (is_int16(value.immediate()) &&
r ==
LeaveRC) {
3196 xori(dst, src, value);
3198 mov(scratch, value);
3199 xor_(dst, src, scratch,
r);
3205 xor_(dst, src, value,
r);
3209 Register scratch,
RCBit r) {
3210 AndU64(dst, src, value, scratch,
r);
3221 Register scratch,
RCBit r) {
3222 OrU64(dst, src, value, scratch,
r);
3228 OrU64(dst, src, value,
r);
3233 Register scratch,
RCBit r) {
3234 XorU64(dst, src, value, scratch,
r);
3245 const Operand& value,
RCBit r) {
3246 sldi(dst, src, value,
r);
3250 const Operand& value,
RCBit r) {
3251 srdi(dst, src, value,
r);
3255 const Operand& value,
RCBit r) {
3256 sradi(dst, src, value.immediate(),
r);
3260 const Operand& value,
RCBit r) {
3261 slwi(dst, src, value,
r);
3265 const Operand& value,
RCBit r) {
3266 srwi(dst, src, value,
r);
3270 const Operand& value,
RCBit r) {
3271 srawi(dst, src, value.immediate(),
r);
3276 sld(dst, src, value,
r);
3281 srd(dst, src, value,
r);
3286 srad(dst, src, value,
r);
3291 slw(dst, src, value,
r);
3296 srw(dst, src, value,
r);
3301 sraw(dst, src, value,
r);
3305 cmp(src1, src2, cr);
3309 Register scratch, CRegister cr) {
3310 intptr_t value = src2.immediate();
3311 if (is_int16(value)) {
3312 cmpi(src1, src2, cr);
3315 CmpS64(src1, scratch, cr);
3320 Register scratch, CRegister cr) {
3321 intptr_t value = src2.immediate();
3322 if (is_uint16(value)) {
3323 cmpli(src1, src2, cr);
3326 CmpU64(src1, scratch, cr);
3331 cmpl(src1, src2, cr);
3335 Register scratch, CRegister cr) {
3336 intptr_t value = src2.immediate();
3337 if (is_int16(value)) {
3338 cmpwi(src1, src2, cr);
3341 CmpS32(src1, scratch, cr);
3346 cmpw(src1, src2, cr);
3350 Register scratch, CRegister cr) {
3351 intptr_t value = src2.immediate();
3352 if (is_uint16(value)) {
3356 cmplw(src1, scratch, cr);
3361 cmplw(src1, src2, cr);
3366 fadd(dst, lhs, rhs,
r);
3371 fsub(dst, lhs, rhs,
r);
3376 fmul(dst, lhs, rhs,
r);
3381 fdiv(dst, lhs, rhs,
r);
3386 fadd(dst, lhs, rhs,
r);
3392 fsub(dst, lhs, rhs,
r);
3398 fmul(dst, lhs, rhs,
r);
3404 fdiv(dst, lhs, rhs,
r);
3414 Register scratch, CRegister cr) {
3415#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3416 CmpS32(src1, Operand(smi), scratch, cr);
3419 CmpS64(src1, scratch, cr);
3424 Register scratch, CRegister cr) {
3425#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3426 CmpU64(src1, Operand(smi), scratch, cr);
3429 CmpU64(src1, scratch, cr);
3435#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3436 AddS64(dst, src, Operand(smi.ptr()), scratch);
3439 add(dst, src, scratch);
3445#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3446 AddS64(dst, src, Operand(-(
static_cast<intptr_t
>(smi.ptr()))), scratch);
3449 sub(dst, src, scratch);
3455#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3456 AndU64(dst, src, Operand(smi), scratch,
rc);
3459 and_(dst, src, scratch,
rc);
3463#define GenerateMemoryOperation(reg, mem, ri_op, rr_op) \
3465 int64_t offset = mem.offset(); \
3467 if (mem.rb() == no_reg) { \
3468 if (!is_int16(offset)) { \
3470 CHECK_NE(scratch, no_reg); \
3471 mov(scratch, Operand(offset)); \
3472 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3477 if (offset == 0) { \
3479 } else if (is_int16(offset)) { \
3480 CHECK_NE(scratch, no_reg); \
3481 addi(scratch, mem.rb(), Operand(offset)); \
3482 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3484 CHECK_NE(scratch, no_reg); \
3485 mov(scratch, Operand(offset)); \
3486 add(scratch, scratch, mem.rb()); \
3487 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3492#define GenerateMemoryOperationRR(reg, mem, op) \
3494 if (mem.offset() == 0) { \
3495 if (mem.rb() != no_reg) \
3498 op(reg, MemOperand(r0, mem.ra())); \
3499 } else if (is_int16(mem.offset())) { \
3500 if (mem.rb() != no_reg) \
3501 addi(scratch, mem.rb(), Operand(mem.offset())); \
3503 mov(scratch, Operand(mem.offset())); \
3504 op(reg, MemOperand(mem.ra(), scratch)); \
3506 mov(scratch, Operand(mem.offset())); \
3507 if (mem.rb() != no_reg) add(scratch, scratch, mem.rb()); \
3508 op(reg, MemOperand(mem.ra(), scratch)); \
3512#define GenerateMemoryOperationPrefixed(reg, mem, ri_op, rip_op, rr_op) \
3514 int64_t offset = mem.offset(); \
3516 if (mem.rb() == no_reg) { \
3517 if (is_int16(offset)) { \
3519 } else if (is_int34(offset) && CpuFeatures::IsSupported(PPC_10_PLUS)) { \
3523 CHECK_NE(scratch, no_reg); \
3524 mov(scratch, Operand(offset)); \
3525 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3528 if (offset == 0) { \
3530 } else if (is_int16(offset)) { \
3531 CHECK_NE(scratch, no_reg); \
3532 addi(scratch, mem.rb(), Operand(offset)); \
3533 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3535 CHECK_NE(scratch, no_reg); \
3536 mov(scratch, Operand(offset)); \
3537 add(scratch, scratch, mem.rb()); \
3538 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3543#define GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op) \
3545 int64_t offset = mem.offset(); \
3546 int misaligned = (offset & 3); \
3548 if (mem.rb() == no_reg) { \
3549 if (!is_int16(offset) || misaligned) { \
3551 CHECK_NE(scratch, no_reg); \
3552 mov(scratch, Operand(offset)); \
3553 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3558 if (offset == 0) { \
3560 } else if (is_int16(offset)) { \
3561 CHECK_NE(scratch, no_reg); \
3562 addi(scratch, mem.rb(), Operand(offset)); \
3563 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3565 CHECK_NE(scratch, no_reg); \
3566 mov(scratch, Operand(offset)); \
3567 add(scratch, scratch, mem.rb()); \
3568 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3573#define GenerateMemoryOperationWithAlignPrefixed(reg, mem, ri_op, rip_op, \
3576 int64_t offset = mem.offset(); \
3577 int misaligned = (offset & 3); \
3579 if (mem.rb() == no_reg) { \
3580 if (is_int16(offset) && !misaligned) { \
3582 } else if (is_int34(offset) && CpuFeatures::IsSupported(PPC_10_PLUS)) { \
3586 CHECK_NE(scratch, no_reg); \
3587 mov(scratch, Operand(offset)); \
3588 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3591 if (offset == 0) { \
3593 } else if (is_int16(offset)) { \
3594 CHECK_NE(scratch, no_reg); \
3595 addi(scratch, mem.rb(), Operand(offset)); \
3596 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3598 CHECK_NE(scratch, no_reg); \
3599 mov(scratch, Operand(offset)); \
3600 add(scratch, scratch, mem.rb()); \
3601 rr_op(reg, MemOperand(mem.ra(), scratch)); \
3606#define MEM_OP_WITH_ALIGN_LIST(V) \
3607 V(LoadU64WithUpdate, ldu, ldux) \
3608 V(StoreU64WithUpdate, stdu, stdux)
3610#define MEM_OP_WITH_ALIGN_FUNCTION(name, ri_op, rr_op) \
3611 void MacroAssembler::name(Register reg, const MemOperand& mem, \
3612 Register scratch) { \
3613 GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op); \
3615MEM_OP_WITH_ALIGN_LIST(MEM_OP_WITH_ALIGN_FUNCTION)
3616#undef MEM_OP_WITH_ALIGN_LIST
3617#undef MEM_OP_WITH_ALIGN_FUNCTION
3619#define MEM_OP_WITH_ALIGN_PREFIXED_LIST(V) \
3620 V(LoadS32, lwa, plwa, lwax) \
3621 V(LoadU64, ld, pld, ldx) \
3622 V(StoreU64, std, pstd, stdx)
3624#define MEM_OP_WITH_ALIGN_PREFIXED_FUNCTION(name, ri_op, rip_op, rr_op) \
3625 void MacroAssembler::name(Register reg, const MemOperand& mem, \
3626 Register scratch) { \
3627 GenerateMemoryOperationWithAlignPrefixed(reg, mem, ri_op, rip_op, rr_op); \
3629MEM_OP_WITH_ALIGN_PREFIXED_LIST(MEM_OP_WITH_ALIGN_PREFIXED_FUNCTION)
3630#undef MEM_OP_WITH_ALIGN_PREFIXED_LIST
3631#undef MEM_OP_WITH_ALIGN_PREFIXED_FUNCTION
3633#define MEM_OP_LIST(V) \
3634 V(LoadF64WithUpdate, DoubleRegister, lfdu, lfdux) \
3635 V(LoadF32WithUpdate, DoubleRegister, lfsu, lfsux) \
3636 V(StoreF64WithUpdate, DoubleRegister, stfdu, stfdux) \
3637 V(StoreF32WithUpdate, DoubleRegister, stfsu, stfsux)
3639#define MEM_OP_FUNCTION(name, result_t, ri_op, rr_op) \
3640 void MacroAssembler::name(result_t reg, const MemOperand& mem, \
3641 Register scratch) { \
3642 GenerateMemoryOperation(reg, mem, ri_op, rr_op); \
3644MEM_OP_LIST(MEM_OP_FUNCTION)
3646#undef MEM_OP_FUNCTION
3648#define MEM_OP_PREFIXED_LIST(V) \
3649 V(LoadU32, Register, lwz, plwz, lwzx) \
3650 V(LoadS16, Register, lha, plha, lhax) \
3651 V(LoadU16, Register, lhz, plhz, lhzx) \
3652 V(LoadU8, Register, lbz, plbz, lbzx) \
3653 V(StoreU32, Register, stw, pstw, stwx) \
3654 V(StoreU16, Register, sth, psth, sthx) \
3655 V(StoreU8, Register, stb, pstb, stbx) \
3656 V(LoadF64, DoubleRegister, lfd, plfd, lfdx) \
3657 V(LoadF32, DoubleRegister, lfs, plfs, lfsx) \
3658 V(StoreF64, DoubleRegister, stfd, pstfd, stfdx) \
3659 V(StoreF32, DoubleRegister, stfs, pstfs, stfsx)
3661#define MEM_OP_PREFIXED_FUNCTION(name, result_t, ri_op, rip_op, rr_op) \
3662 void MacroAssembler::name(result_t reg, const MemOperand& mem, \
3663 Register scratch) { \
3664 GenerateMemoryOperationPrefixed(reg, mem, ri_op, rip_op, rr_op); \
3666MEM_OP_PREFIXED_LIST(MEM_OP_PREFIXED_FUNCTION)
3667#undef MEM_OP_PREFIXED_LIST
3668#undef MEM_OP_PREFIXED_FUNCTION
3670#define MEM_OP_SIMD_LIST(V) \
3671 V(LoadSimd128, lxvx) \
3672 V(StoreSimd128, stxvx) \
3673 V(LoadSimd128Uint64, lxsdx) \
3674 V(LoadSimd128Uint32, lxsiwzx) \
3675 V(LoadSimd128Uint16, lxsihzx) \
3676 V(LoadSimd128Uint8, lxsibzx) \
3677 V(StoreSimd128Uint64, stxsdx) \
3678 V(StoreSimd128Uint32, stxsiwx) \
3679 V(StoreSimd128Uint16, stxsihx) \
3680 V(StoreSimd128Uint8, stxsibx)
3682#define MEM_OP_SIMD_FUNCTION(name, rr_op) \
3683 void MacroAssembler::name(Simd128Register reg, const MemOperand& mem, \
3684 Register scratch) { \
3685 GenerateMemoryOperationRR(reg, mem, rr_op); \
3687MEM_OP_SIMD_LIST(MEM_OP_SIMD_FUNCTION)
3688#undef MEM_OP_SIMD_LIST
3689#undef MEM_OP_SIMD_FUNCTION
3693 LoadU8(dst, mem, scratch);
3697#define MEM_LE_OP_LIST(V) \
3701 V(StoreU64, stdbrx) \
3702 V(StoreU32, stwbrx) \
3705#ifdef V8_TARGET_BIG_ENDIAN
3706#define MEM_LE_OP_FUNCTION(name, op) \
3707 void MacroAssembler::name##LE(Register reg, const MemOperand& mem, \
3708 Register scratch) { \
3709 GenerateMemoryOperationRR(reg, mem, op); \
3712#define MEM_LE_OP_FUNCTION(name, op) \
3713 void MacroAssembler::name##LE(Register reg, const MemOperand& mem, \
3714 Register scratch) { \
3715 name(reg, mem, scratch); \
3719MEM_LE_OP_LIST(MEM_LE_OP_FUNCTION)
3720#undef MEM_LE_OP_FUNCTION
3721#undef MEM_LE_OP_LIST
3725#ifdef V8_TARGET_BIG_ENDIAN
3735#ifdef V8_TARGET_BIG_ENDIAN
3744 Register scratch, Register scratch2) {
3745#ifdef V8_TARGET_BIG_ENDIAN
3756 Register scratch, Register scratch2) {
3757#ifdef V8_TARGET_BIG_ENDIAN
3768 Register scratch, Register scratch2) {
3769#ifdef V8_TARGET_BIG_ENDIAN
3771 LoadU64(scratch, mem, scratch2);
3779 Register scratch, Register scratch2) {
3780#ifdef V8_TARGET_BIG_ENDIAN
3782 LoadU32(scratch, mem, scratch2);
3790#define SIMD_BINOP_LIST(V) \
3791 V(F64x2Add, xvadddp) \
3792 V(F64x2Sub, xvsubdp) \
3793 V(F64x2Mul, xvmuldp) \
3794 V(F64x2Div, xvdivdp) \
3795 V(F64x2Eq, xvcmpeqdp) \
3796 V(F32x4Add, vaddfp) \
3797 V(F32x4Sub, vsubfp) \
3798 V(F32x4Mul, xvmulsp) \
3799 V(F32x4Div, xvdivsp) \
3800 V(F32x4Min, vminfp) \
3801 V(F32x4Max, vmaxfp) \
3802 V(F32x4Eq, xvcmpeqsp) \
3803 V(I64x2Add, vaddudm) \
3804 V(I64x2Sub, vsubudm) \
3805 V(I64x2Eq, vcmpequd) \
3806 V(I64x2GtS, vcmpgtsd) \
3807 V(I32x4Add, vadduwm) \
3808 V(I32x4Sub, vsubuwm) \
3809 V(I32x4Mul, vmuluwm) \
3810 V(I32x4MinS, vminsw) \
3811 V(I32x4MinU, vminuw) \
3812 V(I32x4MaxS, vmaxsw) \
3813 V(I32x4MaxU, vmaxuw) \
3814 V(I32x4Eq, vcmpequw) \
3815 V(I32x4GtS, vcmpgtsw) \
3816 V(I32x4GtU, vcmpgtuw) \
3817 V(I16x8Add, vadduhm) \
3818 V(I16x8Sub, vsubuhm) \
3819 V(I16x8MinS, vminsh) \
3820 V(I16x8MinU, vminuh) \
3821 V(I16x8MaxS, vmaxsh) \
3822 V(I16x8MaxU, vmaxuh) \
3823 V(I16x8Eq, vcmpequh) \
3824 V(I16x8GtS, vcmpgtsh) \
3825 V(I16x8GtU, vcmpgtuh) \
3826 V(I16x8AddSatS, vaddshs) \
3827 V(I16x8SubSatS, vsubshs) \
3828 V(I16x8AddSatU, vadduhs) \
3829 V(I16x8SubSatU, vsubuhs) \
3830 V(I16x8RoundingAverageU, vavguh) \
3831 V(I8x16Add, vaddubm) \
3832 V(I8x16Sub, vsububm) \
3833 V(I8x16MinS, vminsb) \
3834 V(I8x16MinU, vminub) \
3835 V(I8x16MaxS, vmaxsb) \
3836 V(I8x16MaxU, vmaxub) \
3837 V(I8x16Eq, vcmpequb) \
3838 V(I8x16GtS, vcmpgtsb) \
3839 V(I8x16GtU, vcmpgtub) \
3840 V(I8x16AddSatS, vaddsbs) \
3841 V(I8x16SubSatS, vsubsbs) \
3842 V(I8x16AddSatU, vaddubs) \
3843 V(I8x16SubSatU, vsububs) \
3844 V(I8x16RoundingAverageU, vavgub) \
3848 V(S128AndNot, vandc)
3850#define EMIT_SIMD_BINOP(name, op) \
3851 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
3852 Simd128Register src2) { \
3853 op(dst, src1, src2); \
3856#undef EMIT_SIMD_BINOP
3857#undef SIMD_BINOP_LIST
3859#define SIMD_SHIFT_LIST(V) \
3861 V(I64x2ShrS, vsrad) \
3862 V(I64x2ShrU, vsrd) \
3864 V(I32x4ShrS, vsraw) \
3865 V(I32x4ShrU, vsrw) \
3867 V(I16x8ShrS, vsrah) \
3868 V(I16x8ShrU, vsrh) \
3870 V(I8x16ShrS, vsrab) \
3873#define EMIT_SIMD_SHIFT(name, op) \
3874 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
3875 Register src2, Simd128Register scratch) { \
3876 mtvsrd(scratch, src2); \
3877 vspltb(scratch, scratch, Operand(7)); \
3878 op(dst, src1, scratch); \
3880 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
3881 const Operand& src2, Register scratch1, \
3882 Simd128Register scratch2) { \
3883 mov(scratch1, src2); \
3884 name(dst, src1, scratch1, scratch2); \
3887#undef EMIT_SIMD_SHIFT
3888#undef SIMD_SHIFT_LIST
3890#define SIMD_UNOP_LIST(V) \
3891 V(F64x2Abs, xvabsdp) \
3892 V(F64x2Neg, xvnegdp) \
3893 V(F64x2Sqrt, xvsqrtdp) \
3894 V(F64x2Ceil, xvrdpip) \
3895 V(F64x2Floor, xvrdpim) \
3896 V(F64x2Trunc, xvrdpiz) \
3897 V(F32x4Abs, xvabssp) \
3898 V(F32x4Neg, xvnegsp) \
3899 V(F32x4Sqrt, xvsqrtsp) \
3900 V(F32x4Ceil, xvrspip) \
3901 V(F32x4Floor, xvrspim) \
3902 V(F32x4Trunc, xvrspiz) \
3903 V(F32x4SConvertI32x4, xvcvsxwsp) \
3904 V(F32x4UConvertI32x4, xvcvuxwsp) \
3905 V(I64x2Neg, vnegd) \
3906 V(I64x2SConvertI32x4Low, vupklsw) \
3907 V(I64x2SConvertI32x4High, vupkhsw) \
3908 V(I32x4Neg, vnegw) \
3909 V(I32x4SConvertI16x8Low, vupklsh) \
3910 V(I32x4SConvertI16x8High, vupkhsh) \
3911 V(I32x4UConvertF32x4, xvcvspuxws) \
3912 V(I16x8SConvertI8x16Low, vupklsb) \
3913 V(I16x8SConvertI8x16High, vupkhsb) \
3914 V(I8x16Popcnt, vpopcntb)
3916#define EMIT_SIMD_UNOP(name, op) \
3917 void MacroAssembler::name(Simd128Register dst, Simd128Register src) { \
3921#undef EMIT_SIMD_UNOP
3922#undef SIMD_UNOP_LIST
3924#define EXT_MUL(dst_even, dst_odd, mul_even, mul_odd) \
3925 mul_even(dst_even, src1, src2); \
3926 mul_odd(dst_odd, src1, src2);
3927#define SIMD_EXT_MUL_LIST(V) \
3928 V(I32x4ExtMulLowI16x8S, vmulesh, vmulosh, vmrglw) \
3929 V(I32x4ExtMulHighI16x8S, vmulesh, vmulosh, vmrghw) \
3930 V(I32x4ExtMulLowI16x8U, vmuleuh, vmulouh, vmrglw) \
3931 V(I32x4ExtMulHighI16x8U, vmuleuh, vmulouh, vmrghw) \
3932 V(I16x8ExtMulLowI8x16S, vmulesb, vmulosb, vmrglh) \
3933 V(I16x8ExtMulHighI8x16S, vmulesb, vmulosb, vmrghh) \
3934 V(I16x8ExtMulLowI8x16U, vmuleub, vmuloub, vmrglh) \
3935 V(I16x8ExtMulHighI8x16U, vmuleub, vmuloub, vmrghh)
3937#define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge) \
3938 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
3939 Simd128Register src2, Simd128Register scratch) { \
3940 EXT_MUL(scratch, dst, mul_even, mul_odd) \
3941 merge(dst, scratch, dst); \
3944#undef EMIT_SIMD_EXT_MUL
3945#undef SIMD_EXT_MUL_LIST
3947#define SIMD_ALL_TRUE_LIST(V) \
3948 V(I64x2AllTrue, vcmpgtud) \
3949 V(I32x4AllTrue, vcmpgtuw) \
3950 V(I16x8AllTrue, vcmpgtuh) \
3951 V(I8x16AllTrue, vcmpgtub)
3953#define EMIT_SIMD_ALL_TRUE(name, op) \
3954 void MacroAssembler::name(Register dst, Simd128Register src, \
3955 Register scratch1, Register scratch2, \
3956 Simd128Register scratch3) { \
3957 constexpr uint8_t fxm = 0x2; \
3958 constexpr int bit_number = 24; \
3959 li(scratch1, Operand(0)); \
3960 li(scratch2, Operand(1)); \
3962 vxor(scratch3, scratch3, scratch3); \
3963 mtcrf(scratch1, fxm); \
3964 op(scratch3, src, scratch3, SetRC); \
3965 isel(dst, scratch2, scratch1, bit_number); \
3968#undef EMIT_SIMD_ALL_TRUE
3969#undef SIMD_ALL_TRUE_LIST
3971#define SIMD_BITMASK_LIST(V) \
3972 V(I64x2BitMask, vextractdm, 0x8080808080800040) \
3973 V(I32x4BitMask, vextractwm, 0x8080808000204060) \
3974 V(I16x8BitMask, vextracthm, 0x10203040506070)
3976#define EMIT_SIMD_BITMASK(name, op, indicies) \
3977 void MacroAssembler::name(Register dst, Simd128Register src, \
3978 Register scratch1, Simd128Register scratch2) { \
3979 if (CpuFeatures::IsSupported(PPC_10_PLUS)) { \
3982 mov(scratch1, Operand(indicies)); \
3983 mtvsrd(scratch2, scratch1); \
3984 vbpermq(scratch2, src, scratch2); \
3985 vextractub(scratch2, scratch2, Operand(6)); \
3986 mfvsrd(dst, scratch2); \
3990#undef EMIT_SIMD_BITMASK
3991#undef SIMD_BITMASK_LIST
3993#define SIMD_QFM_LIST(V) \
3994 V(F64x2Qfma, xvmaddmdp) \
3995 V(F64x2Qfms, xvnmsubmdp) \
3996 V(F32x4Qfma, xvmaddmsp) \
3997 V(F32x4Qfms, xvnmsubmsp)
3999#define EMIT_SIMD_QFM(name, op) \
4000 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
4001 Simd128Register src2, Simd128Register src3, \
4002 Simd128Register scratch) { \
4003 Simd128Register dest = dst; \
4004 if (dst != src1) { \
4005 vor(scratch, src1, src1); \
4008 op(dest, src2, src3); \
4009 if (dest != dst) { \
4010 vor(dst, dest, dest); \
4021 constexpr int lane_width_in_bytes = 8;
4022 EXT_MUL(scratch, dst, vmulesw, vmulosw)
4023 vextractd(scratch, scratch, Operand(1 * lane_width_in_bytes));
4024 vinsertd(dst, scratch, Operand(0));
4031 constexpr int lane_width_in_bytes = 8;
4032 EXT_MUL(scratch, dst, vmulesw, vmulosw)
4033 vinsertd(scratch, dst, Operand(1 * lane_width_in_bytes));
4034 vor(dst, scratch, scratch);
4041 constexpr int lane_width_in_bytes = 8;
4042 EXT_MUL(scratch, dst, vmuleuw, vmulouw)
4043 vextractd(scratch, scratch, Operand(1 * lane_width_in_bytes));
4044 vinsertd(dst, scratch, Operand(0));
4051 constexpr int lane_width_in_bytes = 8;
4052 EXT_MUL(scratch, dst, vmuleuw, vmulouw)
4053 vinsertd(scratch, dst, Operand(1 * lane_width_in_bytes));
4054 vor(dst, scratch, scratch);
4060#ifdef V8_TARGET_BIG_ENDIAN
4071#ifdef V8_TARGET_BIG_ENDIAN
4072 xxbrq(scratch2, src);
4081 constexpr int lane_width_in_bytes = 8;
4084 vinsertd(dst, dst, Operand(1 * lane_width_in_bytes));
4091 vspltw(dst, dst, Operand(1));
4095 constexpr int lane_width_in_bytes = 8;
4097 vinsertd(dst, dst, Operand(1 * lane_width_in_bytes));
4102 vspltw(dst, dst, Operand(1));
4107 vsplth(dst, dst, Operand(3));
4112 vspltb(dst, dst, Operand(7));
4116 uint8_t imm_lane_idx,
4118 Register scratch2) {
4119 constexpr int lane_width_in_bytes = 8;
4120 vextractd(scratch1, src, Operand((1 - imm_lane_idx) * lane_width_in_bytes));
4121 mfvsrd(scratch2, scratch1);
4126 uint8_t imm_lane_idx,
4128 Register scratch2, Register scratch3) {
4129 constexpr int lane_width_in_bytes = 4;
4130 vextractuw(scratch1, src, Operand((3 - imm_lane_idx) * lane_width_in_bytes));
4131 mfvsrd(scratch2, scratch1);
4136 uint8_t imm_lane_idx,
4138 constexpr int lane_width_in_bytes = 8;
4139 vextractd(scratch, src, Operand((1 - imm_lane_idx) * lane_width_in_bytes));
4144 uint8_t imm_lane_idx,
4146 constexpr int lane_width_in_bytes = 4;
4147 vextractuw(scratch, src, Operand((3 - imm_lane_idx) * lane_width_in_bytes));
4152 uint8_t imm_lane_idx,
4154 constexpr int lane_width_in_bytes = 2;
4155 vextractuh(scratch, src, Operand((7 - imm_lane_idx) * lane_width_in_bytes));
4160 uint8_t imm_lane_idx,
4167 uint8_t imm_lane_idx,
4169 vextractub(scratch, src, Operand(15 - imm_lane_idx));
4174 uint8_t imm_lane_idx,
4184 constexpr int lane_width_in_bytes = 8;
4186 vor(dst, src1, src1);
4190 vinsd(dst, scratch1, Operand((1 - imm_lane_idx) * lane_width_in_bytes));
4192 mtvsrd(scratch2, scratch1);
4193 vinsertd(dst, scratch2, Operand((1 - imm_lane_idx) * lane_width_in_bytes));
4202 constexpr int lane_width_in_bytes = 4;
4204 vor(dst, src1, src1);
4208 vinsw(dst, scratch1, Operand((3 - imm_lane_idx) * lane_width_in_bytes));
4210 mtvsrd(scratch3, scratch1);
4211 vinsertw(dst, scratch3, Operand((3 - imm_lane_idx) * lane_width_in_bytes));
4216 Register src2, uint8_t imm_lane_idx,
4218 constexpr int lane_width_in_bytes = 8;
4220 vor(dst, src1, src1);
4223 vinsd(dst, src2, Operand((1 - imm_lane_idx) * lane_width_in_bytes));
4226 vinsertd(dst, scratch, Operand((1 - imm_lane_idx) * lane_width_in_bytes));
4231 Register src2, uint8_t imm_lane_idx,
4233 constexpr int lane_width_in_bytes = 4;
4235 vor(dst, src1, src1);
4238 vinsw(dst, src2, Operand((3 - imm_lane_idx) * lane_width_in_bytes));
4241 vinsertw(dst, scratch, Operand((3 - imm_lane_idx) * lane_width_in_bytes));
4246 Register src2, uint8_t imm_lane_idx,
4248 constexpr int lane_width_in_bytes = 2;
4250 vor(dst, src1, src1);
4253 vinserth(dst, scratch, Operand((7 - imm_lane_idx) * lane_width_in_bytes));
4257 Register src2, uint8_t imm_lane_idx,
4260 vor(dst, src1, src1);
4263 vinsertb(dst, scratch, Operand(15 - imm_lane_idx));
4268 Register scratch2, Register scratch3,
4270 constexpr int lane_width_in_bytes = 8;
4272 vmulld(dst, src1, src2);
4276 for (
int i = 0;
i < 2;
i++) {
4278 vextractd(scratch4, src1, Operand(1 * lane_width_in_bytes));
4279 vextractd(dst, src2, Operand(1 * lane_width_in_bytes));
4285 mulld(scratch_1, scratch_1, scratch_2);
4286 scratch_1 = scratch2;
4287 scratch_2 = scratch3;
4289 mtvsrdd(dst, scratch1, scratch2);
4299#define F64X2_MIN_MAX_NAN(result) \
4300 xvcmpeqdp(scratch2, src1, src1); \
4301 vsel(result, src1, result, scratch2); \
4302 xvcmpeqdp(scratch2, src2, src2); \
4303 vsel(dst, src2, result, scratch2); \
4305 xvmindp(dst, dst, dst);
4309 xvmindp(scratch1, src1, src2);
4311 F64X2_MIN_MAX_NAN(scratch1)
4317 xvmaxdp(scratch1, src1, src2);
4319 F64X2_MIN_MAX_NAN(scratch1)
4321#undef F64X2_MIN_MAX_NAN
4325 xvcmpgtdp(dst, src2, src1);
4330 xvcmpgedp(dst, src2, src1);
4335 xvcmpeqdp(scratch, src1, src2);
4336 vnor(dst, scratch, scratch);
4341 xvcmpgtsp(dst, src2, src1);
4346 xvcmpgesp(dst, src2, src1);
4351 xvcmpeqsp(scratch, src1, src2);
4352 vnor(dst, scratch, scratch);
4357 vcmpequd(scratch, src1, src2);
4358 vnor(dst, scratch, scratch);
4363 vcmpgtsd(scratch, src2, src1);
4364 vnor(dst, scratch, scratch);
4369 vcmpequw(scratch, src1, src2);
4370 vnor(dst, scratch, scratch);
4375 vcmpgtsw(scratch, src2, src1);
4376 vnor(dst, scratch, scratch);
4381 vcmpequw(scratch, src1, src2);
4382 vcmpgtuw(dst, src1, src2);
4383 vor(dst, dst, scratch);
4388 vcmpequh(scratch, src1, src2);
4389 vnor(dst, scratch, scratch);
4394 vcmpgtsh(scratch, src2, src1);
4395 vnor(dst, scratch, scratch);
4400 vcmpequh(scratch, src1, src2);
4401 vcmpgtuh(dst, src1, src2);
4402 vor(dst, dst, scratch);
4407 vcmpequb(scratch, src1, src2);
4408 vnor(dst, scratch, scratch);
4413 vcmpgtsb(scratch, src2, src1);
4414 vnor(dst, scratch, scratch);
4419 vcmpequb(scratch, src1, src2);
4420 vcmpgtub(dst, src1, src2);
4421 vor(dst, dst, scratch);
4428 vsrad(scratch, src, scratch);
4429 vxor(dst, src, scratch);
4430 vsubudm(dst, dst, scratch);
4436 vsraw(scratch, src, scratch);
4437 vxor(dst, src, scratch);
4438 vsubuwm(dst, dst, scratch);
4444 vsrah(scratch, src, scratch);
4445 vxor(dst, src, scratch);
4446 vsubuhm(dst, dst, scratch);
4450 vspltish(scratch, Operand(1));
4451 vnor(dst, src, src);
4452 vadduhm(dst, scratch, dst);
4458 vsrab(scratch, src, scratch);
4459 vxor(dst, src, scratch);
4460 vsububm(dst, dst, scratch);
4465 vnor(dst, src, src);
4466 vaddubm(dst, scratch, dst);
4497 xvcmpeqsp(scratch, src, src);
4498 vand(scratch, src, scratch);
4499 xvcvspsxws(dst, scratch);
4505 vpkswss(dst, src2, src1);
4511 vpkswus(dst, src2, src1);
4517 vpkshss(dst, src2, src1);
4523 vpkshus(dst, src2, src1);
4529 xvcvsxddp(dst, dst);
4536 constexpr int lane_width_in_bytes = 8;
4539 mov(scratch1, Operand(0xFFFFFFFF));
4540 mtvsrd(scratch2, scratch1);
4541 vinsertd(scratch2, scratch2, Operand(1 * lane_width_in_bytes));
4542 vand(dst, scratch2, dst);
4543 xvcvuxddp(dst, dst);
4550 constexpr int lane_width_in_bytes = 8;
4553 mov(scratch1, Operand(0xFFFFFFFF));
4554 mtvsrd(scratch2, scratch1);
4555 vinsertd(scratch2, scratch2, Operand(1 * lane_width_in_bytes));
4556 vand(dst, scratch2, dst);
4563 constexpr int lane_width_in_bytes = 8;
4566 mov(scratch1, Operand(0xFFFFFFFF));
4567 mtvsrd(scratch2, scratch1);
4568 vinsertd(scratch2, scratch2, Operand(1 * lane_width_in_bytes));
4569 vand(dst, scratch2, dst);
4578 mov(scratch1, Operand(0xFFFF));
4579 mtvsrd(scratch2, scratch1);
4580 vspltw(scratch2, scratch2, Operand(1));
4581 vand(dst, scratch2, dst);
4590 mov(scratch1, Operand(0xFFFF));
4591 mtvsrd(scratch2, scratch1);
4592 vspltw(scratch2, scratch2, Operand(1));
4593 vand(dst, scratch2, dst);
4602 li(scratch1, Operand(0xFF));
4603 mtvsrd(scratch2, scratch1);
4604 vsplth(scratch2, scratch2, Operand(3));
4605 vand(dst, scratch2, dst);
4614 li(scratch1, Operand(0xFF));
4615 mtvsrd(scratch2, scratch1);
4616 vsplth(scratch2, scratch2, Operand(3));
4617 vand(dst, scratch2, dst);
4621 Register scratch1, Register scratch2,
4624 vextractbm(dst, src);
4626 mov(scratch1, Operand(0x8101820283038));
4627 mov(scratch2, Operand(0x4048505860687078));
4628 mtvsrdd(scratch3, scratch1, scratch2);
4629 vbpermq(scratch3, src, scratch3);
4644 vmsummbm(dst, src1, src2, src3);
4650 vmulesb(scratch, src1, src2);
4651 vmulosb(dst, src1, src2);
4652 vadduhm(dst, scratch, dst);
4667 vminub(scratch, src2, scratch);
4676 uint64_t low, Register scratch1,
4678 mov(scratch1, Operand(low));
4679 mov(scratch2, Operand(high));
4680 mtvsrdd(scratch3, scratch2, scratch1);
4681 vperm(dst, src1, src2, scratch3);
4684#define EXT_ADD_PAIRWISE(splat, mul_even, mul_odd, add) \
4685 splat(scratch1, Operand(1)); \
4686 mul_even(scratch2, src, scratch1); \
4687 mul_odd(scratch1, src, scratch1); \
4688 add(dst, scratch2, scratch1);
4693 EXT_ADD_PAIRWISE(vspltish, vmulesh, vmulosh, vadduwm)
4699 EXT_ADD_PAIRWISE(vspltish, vmuleuh, vmulouh, vadduwm)
4705 EXT_ADD_PAIRWISE(
xxspltib, vmulesb, vmulosb, vadduhm)
4711 EXT_ADD_PAIRWISE(
xxspltib, vmuleub, vmuloub, vadduhm)
4713#undef EXT_ADD_PAIRWISE
4717 constexpr int lane_number = 8;
4718 vextractd(dst, src, Operand(lane_number));
4719 vinsertw(dst, dst, Operand(lane_number));
4726 constexpr int lane_number = 8;
4727 xvcvdpsp(scratch, src);
4728 vextractuw(dst, scratch, Operand(lane_number));
4729 vinsertw(scratch, dst, Operand(4));
4730 vxor(dst, dst, dst);
4731 vinsertd(dst, scratch, Operand(lane_number));
4737 constexpr int lane_number = 8;
4739 xvcmpeqdp(scratch, src, src);
4740 vand(scratch, src, scratch);
4741 xvcvdpsxws(scratch, scratch);
4742 vextractuw(dst, scratch, Operand(lane_number));
4743 vinsertw(scratch, dst, Operand(4));
4744 vxor(dst, dst, dst);
4745 vinsertd(dst, scratch, Operand(lane_number));
4751 constexpr int lane_number = 8;
4752 xvcvdpuxws(scratch, src);
4753 vextractuw(dst, scratch, Operand(lane_number));
4754 vinsertw(scratch, dst, Operand(4));
4755 vxor(dst, dst, dst);
4756 vinsertd(dst, scratch, Operand(lane_number));
4759#if V8_TARGET_BIG_ENDIAN
4760#define MAYBE_REVERSE_BYTES(reg, instr) instr(reg, reg);
4762#define MAYBE_REVERSE_BYTES(reg, instr)
4765 int lane, Register scratch1,
4767 constexpr int lane_width_in_bytes = 8;
4769 MAYBE_REVERSE_BYTES(scratch2, xxbrd)
4770 vinsertd(dst, scratch2, Operand((1 - lane) * lane_width_in_bytes));
4774 int lane, Register scratch1,
4776 constexpr int lane_width_in_bytes = 4;
4778 MAYBE_REVERSE_BYTES(scratch2, xxbrw)
4779 vinsertw(dst, scratch2, Operand((3 - lane) * lane_width_in_bytes));
4783 int lane, Register scratch1,
4785 constexpr int lane_width_in_bytes = 2;
4787 MAYBE_REVERSE_BYTES(scratch2, xxbrh)
4788 vinserth(dst, scratch2, Operand((7 - lane) * lane_width_in_bytes));
4792 int lane, Register scratch1,
4795 vinsertb(dst, scratch2, Operand((15 - lane)));
4799 int lane, Register scratch1,
4801 constexpr int lane_width_in_bytes = 8;
4802 vextractd(scratch2, src, Operand((1 - lane) * lane_width_in_bytes));
4803 MAYBE_REVERSE_BYTES(scratch2, xxbrd)
4808 int lane, Register scratch1,
4810 constexpr int lane_width_in_bytes = 4;
4811 vextractuw(scratch2, src, Operand((3 - lane) * lane_width_in_bytes));
4812 MAYBE_REVERSE_BYTES(scratch2, xxbrw)
4817 int lane, Register scratch1,
4819 constexpr int lane_width_in_bytes = 2;
4820 vextractuh(scratch2, src, Operand((7 - lane) * lane_width_in_bytes));
4821 MAYBE_REVERSE_BYTES(scratch2, xxbrh)
4826 int lane, Register scratch1,
4828 vextractub(scratch2, src, Operand(15 - lane));
4835 constexpr int lane_width_in_bytes = 8;
4837 MAYBE_REVERSE_BYTES(dst, xxbrd)
4838 vinsertd(dst, dst, Operand(1 * lane_width_in_bytes));
4845 MAYBE_REVERSE_BYTES(dst, xxbrw)
4846 vspltw(dst, dst, Operand(1));
4853 MAYBE_REVERSE_BYTES(dst, xxbrh)
4854 vsplth(dst, dst, Operand(3));
4861 vspltb(dst, dst, Operand(7));
4868 MAYBE_REVERSE_BYTES(dst, xxbrd)
4876 constexpr int lane_width_in_bytes = 8;
4879 mov(scratch1, Operand(0xFFFFFFFF));
4880 mtvsrd(scratch2, scratch1);
4881 vinsertd(scratch2, scratch2, Operand(1 * lane_width_in_bytes));
4882 vand(dst, scratch2, dst);
4889 MAYBE_REVERSE_BYTES(dst, xxbrd)
4899 mov(scratch1, Operand(0xFFFF));
4900 mtvsrd(scratch2, scratch1);
4901 vspltw(scratch2, scratch2, Operand(1));
4902 vand(dst, scratch2, dst);
4909 MAYBE_REVERSE_BYTES(dst, xxbrd)
4919 li(scratch1, Operand(0xFF));
4920 mtvsrd(scratch2, scratch1);
4921 vsplth(scratch2, scratch2, Operand(3));
4922 vand(dst, scratch2, dst);
4928 constexpr int lane_width_in_bytes = 8;
4930 MAYBE_REVERSE_BYTES(scratch2, xxbrd)
4931 vxor(dst, dst, dst);
4932 vinsertd(dst, scratch2, Operand(1 * lane_width_in_bytes));
4938 constexpr int lane_width_in_bytes = 4;
4940 MAYBE_REVERSE_BYTES(scratch2, xxbrw)
4941 vxor(dst, dst, dst);
4942 vinsertw(dst, scratch2, Operand(3 * lane_width_in_bytes));
4944#undef MAYBE_REVERSE_BYTES
4947 Register scratch1, Register scratch2,
4949 constexpr uint8_t fxm = 0x2;
4950 constexpr int bit_number = 24;
4951 li(scratch1, Operand(0));
4952 li(scratch2, Operand(1));
4954 vxor(scratch3, scratch3, scratch3);
4955 mtcrf(scratch1, fxm);
4956 vcmpequd(scratch3, src, scratch3,
SetRC);
4957 isel(dst, scratch1, scratch2, bit_number);
4961 vnor(dst, src, src);
4965 Register scratch1, Register scratch2) {
4966 mov(scratch1, Operand(low));
4967 mov(scratch2, Operand(high));
4968 mtvsrdd(dst, scratch2, scratch1);
4977 Register reg4, Register reg5,
4979 RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
4982 for (
int i = 0;
i < config->num_allocatable_general_registers(); ++
i) {
4983 int code = config->GetAllocatableGeneralCode(
i);
4985 if (regs.has(candidate))
continue;
4992 if (src == dst)
return;
5000 if (dst.ra() != r0 && dst.ra().is_valid())
5002 if (dst.rb() != r0 && dst.rb().is_valid())
5011 Register scratch_1) {
5012 if (src.ra() != r0 && src.ra().is_valid())
5014 if (src.rb() != r0 && src.rb().is_valid())
5016 if (dst.ra() != r0 && dst.ra().is_valid())
5018 if (dst.rb() != r0 && dst.rb().is_valid())
5021 if (is_int16(src.offset()) || is_int16(dst.offset())) {
5022 if (!is_int16(src.offset())) {
5028 LoadU64(scratch_1, dst, scratch_0);
5031 StoreU64(scratch_0, dst, scratch_1);
5033 LoadU64(scratch_1, dst, scratch_0);
5035 LoadU64(scratch_0, src, scratch_1);
5036 StoreU64(scratch_0, dst, scratch_1);
5038 StoreU64(scratch_1, src, scratch_0);
5044 if (src == dst)
return;
5071 if (src == dst)
return;
5098 if (src == dst)
return;
5099 vor(scratch, src, src);
5101 vor(dst, scratch, scratch);
5109 vor(src, scratch1, scratch1);
5129 rlwinm(scratch, val, 8, 16, 23);
5130 rlwinm(dst, val, 24, 24, 31);
5131 orx(dst, scratch, dst);
5143 rlwimi(scratch, val, 24, 0, 7);
5144 rlwimi(scratch, val, 24, 16, 23);
5184 AddS64(target, target, Operand(IsolateData::builtin_entry_table_offset()));
5207#ifdef V8_ENABLE_LEAPTIERING
5209void MacroAssembler::LoadEntrypointFromJSDispatchTable(Register
destination,
5210 Register dispatch_handle,
5216 Move(scratch, ExternalReference::js_dispatch_table_address());
5217 ShiftRightU64(index, dispatch_handle, Operand(kJSDispatchHandleShift));
5219 AddS64(scratch, scratch, index);
5226 Register code_object,
5229#ifdef V8_ENABLE_SANDBOX
5230 LoadCodeEntrypointViaCodePointer(
5253 uint16_t argument_count, Register scratch) {
5255#if V8_ENABLE_LEAPTIERING
5259 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, ip);
5261#elif V8_ENABLE_SANDBOX
5265 LoadCodeEntrypointViaCodePointer(
5266 code,
FieldMemOperand(function_object, JSFunction::kCodeOffset), scratch);
5270 code,
FieldMemOperand(function_object, JSFunction::kCodeOffset), scratch);
5275#if V8_ENABLE_LEAPTIERING
5277 uint16_t argument_count) {
5281 mov(dispatch_handle_reg,
5289 static_assert(!JSDispatchTable::kSupportsCompaction);
5290 LoadEntrypointFromJSDispatchTable(code, dispatch_handle_reg, scratch);
5301#if V8_ENABLE_LEAPTIERING
5305 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, ip);
5307#elif V8_ENABLE_SANDBOX
5311 LoadCodeEntrypointViaCodePointer(
5312 code,
FieldMemOperand(function_object, JSFunction::kCodeOffset), scratch);
5318 code,
FieldMemOperand(function_object, JSFunction::kCodeOffset), scratch);
5323#ifdef V8_ENABLE_WEBASSEMBLY
5325void MacroAssembler::ResolveWasmCodePointer(Register target) {
5328 ExternalReference global_jump_table =
5329 ExternalReference::wasm_code_pointer_table();
5331 Register scratch = temps.Acquire();
5332 Move(scratch, global_jump_table);
5338void MacroAssembler::CallWasmCodePointer(Register target,
5340 ResolveWasmCodePointer(target);
5348void MacroAssembler::LoadWasmCodePointer(Register dst,
MemOperand src) {
5349 static_assert(
sizeof(WasmCodePointer) == 4);
5363 static constexpr int after_call_offset = 5 *
kInstrSize;
5381 addi(r7, r7, Operand(after_call_offset));
5406#ifdef V8_ENABLE_LEAPTIERING
5429 clrldi(dst, src, Operand(56));
5433 clrldi(dst, src, Operand(48));
5437 clrldi(dst, src, Operand(32));
5448 cntlzw(dst, src,
r);
5452 cntlzd(dst, src,
r);
5455#define COUNT_TRAILING_ZEROES_SLOW(max_count, scratch1, scratch2) \
5457 li(scratch1, Operand(max_count)); \
5459 mr(scratch1, src); \
5460 li(dst, Operand::Zero()); \
5462 andi(scratch2, scratch1, Operand(1)); \
5464 srdi(scratch1, scratch1, Operand(1)); \
5465 addi(dst, dst, Operand(1)); \
5469 Register scratch1, Register scratch2,
5472 cnttzw(dst, src,
r);
5474 COUNT_TRAILING_ZEROES_SLOW(32, scratch1, scratch2);
5479 Register scratch1, Register scratch2,
5482 cnttzd(dst, src,
r);
5484 COUNT_TRAILING_ZEROES_SLOW(64, scratch1, scratch2);
5487#undef COUNT_TRAILING_ZEROES_SLOW
5490 CHECK(0 <= byte_idx && byte_idx <= 7);
5491 int shift = byte_idx*8;
5497 Register scratch1, Register scratch2) {
5499 for (
int i = 0;
i < 8;
i++) {
5505 Register scratch1, Register scratch2) {
5507 for (
int i = 4;
i < 8;
i++) {
5517 CHECK(0 <= byte_idx && byte_idx <= 7);
5520 li(scratch2, Operand(0));
5521 for (
int i = 0;
i <= 7;
i++) {
5523 li(scratch1, Operand(0));
5530 rldicl(scratch1, scratch1, 0,
j*8+
i);
5532 orx(scratch2, scratch2, scratch1);
5536 orx(dst, dst, scratch2);
5546 Register function_address,
5547 ExternalReference thunk_ref, Register thunk_arg,
5548 int slots_to_drop_on_return,
5551 using ER = ExternalReference;
5553 Isolate* isolate = masm->isolate();
5555 ER::handle_scope_next_address(isolate),
no_reg);
5557 ER::handle_scope_limit_address(isolate),
no_reg);
5559 ER::handle_scope_level_address(isolate),
no_reg);
5569 Register prev_next_address_reg = r14;
5578 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
5583 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
5585 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
5588 "Allocate HandleScope in callee-save registers.");
5589 __ LoadU64(prev_next_address_reg, next_mem_op);
5590 __ LoadU64(prev_limit_reg, limit_mem_op);
5591 __ lwz(prev_level_reg, level_mem_op);
5592 __ addi(scratch, prev_level_reg, Operand(1));
5593 __ stw(scratch, level_mem_op);
5596 Label profiler_or_side_effects_check_enabled, done_api_call;
5597 if (with_profiling) {
5598 __ RecordComment(
"Check if profiler or side effects check is enabled");
5602 __ bne(&profiler_or_side_effects_check_enabled);
5603#ifdef V8_RUNTIME_CALL_STATS
5605 __ Move(scratch, ER::address_of_runtime_stats_flag());
5608 __ bne(&profiler_or_side_effects_check_enabled);
5616 Label propagate_exception;
5617 Label delete_allocated_handles;
5618 Label leave_exit_frame;
5627 "No more valid handles (the result handle was the last one)."
5628 "Restore previous handle scope.");
5629 __ StoreU64(prev_next_address_reg, next_mem_op);
5631 __ lwz(scratch, level_mem_op);
5632 __ subi(scratch, scratch, Operand(1));
5633 __ CmpS64(scratch, prev_level_reg);
5634 __ Check(
eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
5636 __ stw(prev_level_reg, level_mem_op);
5638 __ CmpS64(scratch, prev_limit_reg);
5639 __ bne(&delete_allocated_handles);
5643 __ bind(&leave_exit_frame);
5644 Register argc_reg = prev_limit_reg;
5645 if (argc_operand !=
nullptr) {
5653 "Check if the function scheduled an exception.");
5654 __ LoadRoot(scratch, RootIndex::kTheHoleValue);
5656 ER::exception_address(isolate),
no_reg));
5658 __ bne(&propagate_exception);
5662 AbortReason::kAPICallReturnedInvalidObject);
5664 if (argc_operand ==
nullptr) {
5677 if (with_profiling) {
5679 __ bind(&profiler_or_side_effects_check_enabled);
5681 if (thunk_arg.is_valid()) {
5683 IsolateFieldId::kApiCallbackThunkArgument);
5686 __ Move(scratch, thunk_ref);
5688 __ b(&done_api_call);
5692 __ bind(&propagate_exception);
5697 masm,
"HandleScope limit has changed. Delete allocated extensions.");
5698 __ bind(&delete_allocated_handles);
5701 Register saved_result = prev_limit_reg;
5702 __ mr(saved_result, return_value);
5706 __ mr(return_value, saved_result);
5707 __ b(&leave_exit_frame);
constexpr int kPageSizeBits
#define Assert(condition)
static int ActivationFrameAlignment()
bool is_constant_pool_available() const
friend class ConstantPoolUnavailableScope
EmbeddedObjectIndex AddEmbeddedObject(IndirectHandle< HeapObject > object)
friend class FrameAndConstantPoolScope
size_t EmbeddedObjectIndex
int AddCodeTarget(IndirectHandle< Code > target)
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
const AssemblerOptions & options() const
void set_constant_pool_available(bool available)
Label * ConstantPoolPosition()
void mtvsrdd(const Simd128Register rt, const Register ra, const Register rb)
void divwu(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void addi(Register dst, Register src, const Operand &imm)
void patch_pc_address(Register dst, int pc_offset, int return_address_offset)
void rldicr(Register dst, Register src, int sh, int me, RCBit r=LeaveRC)
void divw(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void fctidu(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void mov_label_offset(Register dst, Label *label)
void fcpsgn(const DoubleRegister frt, const DoubleRegister fra, const DoubleRegister frc, RCBit rc=LeaveRC)
void xxspltib(const Simd128Register rt, const Operand &imm)
void mr(Register dst, Register src)
void extsw(Register rs, Register ra, RCBit rc=LeaveRC)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
void rldicl(Register dst, Register src, int sh, int mb, RCBit r=LeaveRC)
void sradi(Register ra, Register rs, int sh, RCBit r=LeaveRC)
void bne(Register rj, Register rd, int32_t offset)
void fctid(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void fctiduz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
friend class BlockTrampolinePoolScope
void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode, intptr_t value)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void isel(Register rt, Register ra, Register rb, int cb)
void srdi(Register dst, Register src, const Operand &val, RCBit rc=LeaveRC)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void mtfprwz(DoubleRegister dst, Register src)
void blt(Register rj, Register rd, int32_t offset)
void frsp(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void lhz(Register dst, const MemOperand &src)
void ble(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void clrldi(Register dst, Register src, const Operand &val, RCBit rc=LeaveRC)
void cmpw(Operand dst, Immediate src)
void cmp(Register src1, const Operand &src2, Condition cond=al)
void rldimi(Register dst, Register src, int sh, int mb, RCBit r=LeaveRC)
Simd128Register Simd128Register Simd128Register Simd128Register rc
void addis(Register dst, Register src, const Operand &imm)
void srwi(Register dst, Register src, const Operand &val, RCBit rc=LeaveRC)
void xori(Register rd, Register rj, int32_t ui12)
friend class UseScratchRegisterScope
void mulld(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void cmpwi(Register src1, const Operand &src2, CRegister cr=cr0)
void mffprd(Register dst, DoubleRegister src)
void shift(Operand dst, Immediate shift_amount, int subcode, int size)
void fcfidu(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void sldi(Register dst, Register src, const Operand &val, RCBit rc=LeaveRC)
static constexpr int kPcLoadDelta
void mtfprwa(DoubleRegister dst, Register src)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void lfd(const DoubleRegister frt, const MemOperand &src)
void xor_(Register dst, int32_t imm32)
void rlwimi(Register ra, Register rs, int sh, int mb, int me, RCBit rc=LeaveRC)
void andi(Register rd, Register rj, int32_t ui12)
void fcmpu(const DoubleRegister fra, const DoubleRegister frb, CRegister cr=cr0)
void fadd(const VRegister &vd, const VRegister &vn, const VRegister &vm)
void stfd(const DoubleRegister frs, const MemOperand &src)
void cmpi(Register src1, const Operand &src2, CRegister cr=cr0)
void cmpli(Register src1, const Operand &src2, CRegister cr=cr0)
void lwz(Register dst, const MemOperand &src)
static constexpr int kMovInstructionsNoConstantPool
void add_label_offset(Register dst, Register base, Label *label, int delta=0)
void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void mfvsrd(const Register ra, const Simd128Register r)
void fsub(const VRegister &vd, const VRegister &vn, const VRegister &vm)
void fctidz(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void fcfid(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void cmplwi(Register src1, const Operand &src2, CRegister cr=cr0)
void fcfids(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void fdiv(const VRegister &vd, const VRegister &vn, const VRegister &vm)
void mtvsrd(const Simd128Register rt, const Register ra)
void divd(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void mtfprd(DoubleRegister dst, Register src)
void bge(Register rj, Register rd, int32_t offset)
void vsel(const Condition cond, const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2)
void bunordered(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void ori(Register rd, Register rj, int32_t ui12)
void rlwinm(Register ra, Register rs, int sh, int mb, int me, RCBit rc=LeaveRC)
void fmr(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void mulli(Register dst, Register src, const Operand &imm)
void mffprwz(Register dst, DoubleRegister src)
void lbz(Register dst, const MemOperand &src)
void bkpt(uint32_t imm16)
void mtcrf(Register src, uint8_t FXM)
void divdu(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void rotlwi(Register ra, Register rs, int sh, RCBit r=LeaveRC)
void stw(Register dst, const MemOperand &src)
void fmul(const VRegister &vd, const VRegister &vn, const VRegister &vm)
void subi(Register dst, Register src1, const Operand &src2)
void fcfidus(const DoubleRegister frt, const DoubleRegister frb, RCBit rc=LeaveRC)
void mullw(Register dst, Register src1, Register src2, OEBit o=LeaveOE, RCBit r=LeaveRC)
void stfdu(const DoubleRegister frs, const MemOperand &src)
void mtfsfi(int bf, int immediate, RCBit rc=LeaveRC)
bool ConstantPoolAccessIsInOverflow() const
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
int SizeOfCodeGeneratedSince(Label *label)
void beq(Register rj, Register rd, int32_t offset)
void bdnz(Label *L, LKBit lk=LeaveLK)
void slwi(Register dst, Register src, const Operand &val, RCBit rc=LeaveRC)
static constexpr Builtin RecordWrite(SaveFPRegsMode fp_mode)
static bool IsIsolateIndependentBuiltin(Tagged< Code > code)
V8_EXPORT_PRIVATE Handle< Code > code_handle(Builtin builtin)
static constexpr Builtin RuntimeCEntry(int result_size, bool switch_to_central_stack=false)
static constexpr Builtin EphemeronKeyBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin IndirectPointerBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin kLastTier0
static constexpr Builtin CEntry(int result_size, ArgvMode argv_mode, bool builtin_exit_frame=false, bool switch_to_central_stack=false)
static const int kMarkedForDeoptimizationBit
static constexpr int kConstantPoolOffset
static constexpr int kCallerSPOffset
static constexpr int kCallerFPOffset
static constexpr int kCallerPCOffset
static const int kInvalidContext
static V8_INLINE constexpr int SlotOffset(int index)
static bool IsSupported(CpuFeature f)
static bool SupportsWasmSimd128()
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static constexpr int kSPOffset
static constexpr int kCallerSPDisplacement
static V8_EXPORT_PRIVATE ExternalReference address_of_code_pointer_table_base_address()
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr uint32_t kFlagsMaybeHasMaglevCode
static constexpr uint32_t kFlagsTieringStateIsAnyRequested
static constexpr uint32_t kFlagsLogNextExecution
static constexpr uint32_t kFlagsMaybeHasTurbofanCode
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Register IndirectPointerTagRegister()
static constexpr Register ObjectRegister()
static constexpr Register SlotAddressRegister()
static NEVER_READ_ONLY_SPACE constexpr bool kOnHeapBodyIsContiguous
static const int kExternalPointerTableBasePointerOffset
static constexpr int BuiltinEntrySlotOffset(Builtin id)
static constexpr int real_jslimit_offset()
static IsolateGroup * current()
bool IsGeneratingEmbeddedBuiltins() const
Address BuiltinEntry(Builtin builtin)
bool root_array_available_
static bool IsAddressableThroughRootRegister(Isolate *isolate, const ExternalReference &reference)
static constexpr bool CanBeImmediate(RootIndex index)
V8_INLINE std::string CommentForOffHeapTrampoline(const char *prefix, Builtin builtin)
static int32_t RootRegisterOffsetForExternalReferenceTableEntry(Isolate *isolate, const ExternalReference &reference)
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
Isolate * isolate() const
Tagged_t ReadOnlyRootPtr(RootIndex index)
bool root_array_available() const
void IndirectLoadConstant(Register destination, Handle< HeapObject > object)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
bool should_abort_hard() const
void IndirectLoadExternalReference(Register destination, ExternalReference reference)
void F32x4ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2, Register scratch3)
void LoadSimd128LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void I16x8DotI8x16S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void MaxF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, DoubleRegister scratch=kScratchDoubleReg)
void ClearByteU64(Register dst, int byte_idx)
void Abort(AbortReason msg)
void ConvertIntToFloat(Register src, DoubleRegister dst)
void LoadStackLimit(Register destination, StackLimitKind kind)
void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch)
void LoadReceiver(Register dest)
void LoadSimd128Uint64(Simd128Register reg, const MemOperand &mem, Register scratch)
void MultiPushF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs, Register scratch1, Register scratch2, Register location=sp)
void ConvertIntToDouble(Register src, DoubleRegister dst)
void LoadS16LE(Register dst, const MemOperand &mem, Register scratch)
void Call(Register target, Condition cond=al)
void CallJSFunction(Register function_object, uint16_t argument_count)
void S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask)
void I64x2UConvertI32x4High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void CountTrailingZerosU64(Register dst, Register src, Register scratch1=ip, Register scratch2=r0, RCBit r=LeaveRC)
void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void TestCodeIsMarkedForDeoptimization(Register code, Register scratch)
void LoadU64LE(Register dst, const MemOperand &mem, Register scratch)
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void ByteReverseU64(Register dst, Register val, Register=r0)
void I16x8UConvertI8x16Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadU8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void DecompressTaggedSigned(const Register &destination, const MemOperand &field_operand)
void I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2, Simd128Register scratch3)
void Drop(int count, Condition cond=al)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand &me, Register scratch)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void AndU32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=SetRC)
void I32x4UConvertI16x8Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void MovFromFloatResult(DwVfpRegister dst)
void mov(Register rd, Register rj)
void XorU32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=SetRC)
void CompareTagged(Register src1, Register src2, CRegister cr=cr0)
void LoadConstantPoolPointerRegister()
void ModS32(Register dst, Register src, Register value)
void IsObjectType(Register heap_object, Register scratch1, Register scratch2, InstanceType type)
void ModS64(Register dst, Register src, Register value)
void I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void SmiUntag(Register reg, SBit s=LeaveCC)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void TestIfSmi(Register value, Register scratch)
void I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi, Register src_lo, Register scratch)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void MulS32(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTagRange tag_range, Register isolate_root=Register::no_reg())
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
void SubSmiLiteral(Register dst, Register src, Tagged< Smi > smi, Register scratch)
static int CallSizeNotPredictableCodeSize(Address target, RelocInfo::Mode rmode, Condition cond=al)
void MultiPopF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs, Register scratch1, Register scratch2, Register location=sp)
void LoadLane32LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void CopySignF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void PushStandardFrame(Register function_reg)
void StoreF64(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void I8x16ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void CompareRoot(Register obj, RootIndex index)
void I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void LoadV64ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void MovDoubleLowToInt(Register dst, DoubleRegister src)
void MovFromFloatParameter(DwVfpRegister dst)
void StoreU64WithUpdate(Register src, const MemOperand &mem, Register scratch=no_reg)
void StoreU64LE(Register src, const MemOperand &mem, Register scratch)
void LoadF32(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void ModU64(Register dst, Register src, Register value)
void LoadV32ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void Move(Register dst, Tagged< Smi > smi)
void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void CountTrailingZerosU32(Register dst, Register src, Register scratch1=ip, Register scratch2=r0, RCBit r=LeaveRC)
void MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void StoreF32LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
void I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void StoreReturnAddressAndCall(Register target)
void I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
void ZeroExtWord32(Register dst, Register src)
void LoadTrustedPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void I16x8Splat(Simd128Register dst, Register src)
void LoadRootRelative(Register destination, int32_t offset) final
void JumpIfSmi(Register value, Label *smi_label)
void StoreSimd128Uint16(Simd128Register reg, const MemOperand &mem, Register scratch)
void LoadPC(Register dst)
void ShiftRightU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void LoadU16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void MultiPopV128(Simd128RegList dregs, Register scratch, Register location=sp)
void MultiPush(RegList regs)
void CallCodeObject(Register code_object)
void LoadSandboxedPointerField(Register destination, MemOperand field_operand)
void SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void DivS32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void SwapDouble(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void SubAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch=r0)
void LoadS32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
void CheckDebugHook(Register fun, Register new_target, Register expected_parameter_count_or_dispatch_handle, Register actual_parameter_count)
void CountLeadingZerosU32(Register dst, Register src, RCBit r=LeaveRC)
void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scrahc2, Register scratch3, Simd128Register scratch4)
void I32x4SConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void LoadDoubleLiteral(DoubleRegister result, base::Double value, Register scratch)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src)
void F64x2Max(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2)
void StoreLane8LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void LoadSimd128Uint16(Simd128Register reg, const MemOperand &mem, Register scratch)
void SwapFloat32(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void LoadLane64LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void BailoutIfDeoptimized()
void LoadS32LE(Register dst, const MemOperand &mem, Register scratch)
void DecodeSandboxedPointer(Register value)
void MovDoubleHighToInt(Register dst, DoubleRegister src)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void CmpU32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadU16LE(Register dst, const MemOperand &mem, Register scratch)
void MinF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, DoubleRegister scratch=kScratchDoubleReg)
void MultiPopDoubles(DoubleRegList dregs, Register location=sp)
void CompareTaggedRoot(Register with, RootIndex index)
void ModU32(Register dst, Register src, Register value)
void StoreSimd128Uint64(Simd128Register reg, const MemOperand &mem, Register scratch)
void JumpIfJSAnyIsNotPrimitive(Register heap_object, Register scratch, Label *target, Label::Distance distance=Label::kFar, Condition condition=Condition::kUnsignedGreaterThanEqual)
void OrU64(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=SetRC)
void LoadSimd128Uint32(Simd128Register reg, const MemOperand &mem, Register scratch)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void EnforceStackAlignment()
void SmiTag(Register reg, SBit s=LeaveCC)
void StoreSimd128(Simd128Register src, const MemOperand &mem, Register scratch)
void StoreLane32LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void MovDoubleToInt64(Register dst, DoubleRegister src)
void F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, DoubleRegister scratch2, Simd128Register scratch3)
void PushArray(Register array, Register size, Register scratch, PushArrayOrder order=PushArrayOrder::kNormal)
void I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3)
void ZeroExtHalfWord(Register dst, Register src)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void TestIfInt32(Register value, Register scratch, CRegister cr=cr0)
void MovToFloatResult(DwVfpRegister src)
void AddS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void SetRoundingMode(FPRoundingMode RN)
void I16x8ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void CallJSEntry(Register target)
void SubS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ShiftRightU32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void StoreF64LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void Zero(const MemOperand &dest)
void ShiftRightS32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void AndU64(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=SetRC)
void MovToFloatParameter(DwVfpRegister src)
void MovFloatToInt(Register dst, DoubleRegister src, DoubleRegister scratch)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void I16x8ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void PushCommonFrame(Register marker_reg=no_reg)
void LoadIndirectPointerField(Register destination, MemOperand field_operand, IndirectPointerTag tag)
void JumpIfEqual(Register x, int32_t y, Label *dest)
void CallIndirectPointerBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode, IndirectPointerTag tag)
void AndSmiLiteral(Register dst, Register src, Tagged< Smi > smi, Register scratch, RCBit rc=LeaveRC)
int LeaveFrame(StackFrame::Type type)
void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch)
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void GetLabelAddress(Register dst, Label *target)
void StoreF32(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void V128AnyTrue(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
void ShiftLeftU32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
Operand ClearedValue() const
void SwapSimd128(Simd128Register src, Simd128Register dst, Simd128Register scratch)
void StoreCodePointerField(Register value, MemOperand dst_field_operand)
void CmpU64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void MultiPop(RegList regs)
void LoadF64(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void I32x4Splat(Simd128Register dst, Register src)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void AddS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
void I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void LoadSimd128(Simd128Register dst, const MemOperand &mem, Register scratch)
void Jump(Register target, Condition cond=al)
void StoreU32(Register src, const MemOperand &mem, Register scratch)
void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void LoadRoot(Register destination, RootIndex index) final
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void I8x16ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void MovInt64ToDouble(DoubleRegister dst, Register src)
void LoadF32LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void LoadU32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void Popcnt32(Register dst, Register src)
void CanonicalizeNaN(const VRegister &dst, const VRegister &src)
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src)
void DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void SwapP(Register src, Register dst, Register scratch)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void CompareRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit)
void S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void I64x2Splat(Simd128Register dst, Register src)
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst)
void LoadU64WithUpdate(Register dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void I32x4UConvertI16x8High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
static int ActivationFrameAlignment()
void LoadFromConstantsTable(Register destination, int constant_index) final
void ShiftRightS64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void LoadS8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void CompareObjectTypeRange(Register heap_object, Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void F64x2Min(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2)
void ComputeCodeStartAddress(Register dst)
void MaybeSaveRegisters(RegList registers)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void LoadTaggedRoot(Register destination, RootIndex index)
void I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void SubS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
void StoreIndirectPointerField(Register value, MemOperand dst_field_operand)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void LoadLane8LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
void ConvertDoubleToUnsignedInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void LoadCompressedMap(Register dst, Register object)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void CallBuiltin(Builtin builtin, Condition cond=al)
void CmplSmiLiteral(Register src1, Tagged< Smi > smi, Register scratch, CRegister cr=cr0)
void DivU32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void Popcnt64(Register dst, Register src)
void I64x2Abs(QwNeonRegister dst, QwNeonRegister src)
void AddSmiLiteral(Register dst, Register src, Tagged< Smi > smi, Register scratch)
void I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void F64x2ConvertLowI32x4S(QwNeonRegister dst, QwNeonRegister src)
void OrU32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=SetRC)
void MulS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand &mem, Register scratch)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode)
void ShiftLeftU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void LoadIntLiteral(Register dst, int value)
void CmpS64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void ReverseBitsU64(Register dst, Register src, Register scratch1, Register scratch2)
void LoadCodePointerField(Register destination, MemOperand field_operand)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void LoadSmiLiteral(Register dst, Tagged< Smi > smi)
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void Check(Condition cond, AbortReason reason)
void DivU64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2, Register scratch3, Register scratch4)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void LoadU32LE(Register dst, const MemOperand &mem, Register scratch)
void MovIntToFloat(DoubleRegister dst, Register src, Register scratch)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void StoreTrustedPointerField(Register value, MemOperand dst_field_operand)
void I32x4DotI8x16AddS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register src3)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void StoreSandboxedPointerField(Register value, MemOperand dst_field_operand)
void F64x2ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2)
void MovUnsignedIntToDouble(DoubleRegister dst, Register src, Register scratch)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand &mem, Register scratch)
void ReverseBitsU32(Register dst, Register src, Register scratch1, Register scratch2)
void I16x8UConvertI8x16High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void CmpS32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void RestoreFrameStateForTailCall()
void StoreU64(Register src, const MemOperand &mem, Register scratch=no_reg)
void XorU64(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=SetRC)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void MultiPushV128(Simd128RegList dregs, Register scratch, Register location=sp)
void JumpIfLessThan(Register x, int32_t y, Label *dest)
void ReverseBitsInSingleByteU64(Register dst, Register src, Register scratch1, Register scratch2, int byte_idx)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void TestBitMask(Register value, uintptr_t mask, Register scratch=r0)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void I8x16BitMask(Register dst, VRegister src, VRegister temp=NoVReg)
void DivS64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void CompareInstanceTypeRange(Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadIsolateField(Register dst, IsolateFieldId id)
void StoreSimd128Uint8(Simd128Register reg, const MemOperand &mem, Register scratch)
void LoadU64(Register dst, const MemOperand &mem, Register scratch=no_reg)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void MaybeRestoreRegisters(RegList registers)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void I64x2UConvertI32x4Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2)
void MovIntToDouble(DoubleRegister dst, Register src, Register scratch)
void I8x16Splat(Simd128Register dst, Register src)
void CmpSmiLiteral(Register src1, Tagged< Smi > smi, Register scratch, CRegister cr=cr0)
void LoadF64LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void F32x4Splat(Simd128Register dst, DoubleRegister src, DoubleRegister scratch1, Register scratch2)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void ByteReverseU16(Register dst, Register val, Register scratch)
void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void LoadSimd128Uint8(Simd128Register reg, const MemOperand &mem, Register scratch)
void ZeroExtByte(Register dst, Register src)
void StubPrologue(StackFrame::Type type)
void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2)
void StoreU32LE(Register src, const MemOperand &mem, Register scratch)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void StoreRootRelative(int32_t offset, Register value) final
void F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, Simd128Register scratch2)
void CountLeadingZerosU64(Register dst, Register src, RCBit r=LeaveRC)
void LoadTaggedSignedField(const Register &destination, const MemOperand &field_operand)
void LoadMap(Register destination, Register object)
void TailCallRuntime(Runtime::FunctionId fid)
void TestBit(Register value, int bitNumber, Register scratch=r0)
void AddAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch=r0)
void LoadNativeContextSlot(Register dst, int index)
void I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void StoreSimd128Uint32(Simd128Register reg, const MemOperand &mem, Register scratch)
void ConvertInt64ToDouble(Register src, DoubleRegister double_dst)
static const int kSmiShift
void StoreSimd128LE(Simd128Register src, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(Register code_target_address, Register scratch1, Register scratch2)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst)
void F64x2Splat(Simd128Register dst, DoubleRegister src, Register scratch)
void I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void LoadS16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void StoreLane16LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void ByteReverseU32(Register dst, Register val, Register scratch)
void ConvertDoubleToInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void StoreLane64LE(Simd128Register src, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void I8x16Swizzle(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Simd128Register scratch3)
void LoadLane16LE(Simd128Register dst, const MemOperand &mem, int lane, Register scratch1, Simd128Register scratch2)
void DropArguments(Register count)
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void MultiPushDoubles(DoubleRegList dregs, Register location=sp)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr intptr_t FlagsOffset()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static V8_INLINE Operand Zero()
static constexpr int8_t kNumRegisters
static constexpr DwVfpRegister from_code(int8_t code)
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr Register no_reg()
static constexpr bool IsCompressedEmbeddedObject(Mode mode)
static constexpr bool IsCodeTarget(Mode mode)
static constexpr bool IsFullEmbeddedObject(Mode mode)
static constexpr bool IsReadOnly(RootIndex root_index)
static constexpr bool IsImmortalImmovable(RootIndex root_index)
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
void S128Not(XMMRegister dst, XMMRegister src, XMMRegister scratch)
void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src, XMMRegister tmp)
void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src, XMMRegister scratch, Register tmp)
void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src, Register scratch)
void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src, Register scratch)
static SlotDescriptor ForCodePointerSlot()
static constexpr Tagged< Smi > FromInt(int value)
static constexpr Tagged< Smi > zero()
static constexpr int32_t TypeToMarker(Type type)
static bool IsJavaScript(Type t)
static const int kNextOffset
static constexpr int OffsetOfElementAt(int index)
static constexpr int kFixedFrameSizeFromFp
static constexpr Register ObjectRegister()
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Register SlotAddressRegister()
static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode)
#define EMIT_SIMD_UNOP(name)
#define EMIT_SIMD_QFM(name)
#define EMIT_SIMD_ALL_TRUE(name)
#define EMIT_SIMD_BINOP(name)
#define EMIT_SIMD_SHIFT(name)
#define EMIT_SIMD_EXT_MUL(name)
#define ASM_CODE_COMMENT_STRING(asm,...)
#define ASM_CODE_COMMENT(asm)
#define V8_ENABLE_LEAPTIERING_BOOL
#define COMPRESS_POINTERS_BOOL
#define V8_EMBEDDED_CONSTANT_POOL_BOOL
#define V8_ENABLE_SANDBOX_BOOL
#define ABI_USES_FUNCTION_DESCRIPTORS
DirectHandle< Object > new_target
RoundingMode rounding_mode
ZoneVector< RpoNumber > & result
#define SIMD_BITMASK_LIST(V)
#define SIMD_UNOP_LIST(V)
#define SIMD_ALL_TRUE_LIST(V)
#define SIMD_BINOP_LIST(V)
#define SIMD_SHIFT_LIST(V)
#define SIMD_EXT_MUL_LIST(V)
RegListBase< RegisterT > registers
InstructionOperand destination
constexpr bool IsPowerOfTwo(T value)
constexpr int WhichPowerOfTwo(T value)
V8_INLINE Dest bit_cast(Source const &source)
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
constexpr Tagged_t kNonJsReceiverMapLimit
V8_INLINE constexpr std::optional< RootIndex > UniqueMapOfInstanceType(InstanceType type)
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
constexpr Register kRootRegister
constexpr int kCodePointerTableEntrySizeLog2
constexpr VFPRoundingMode kRoundToNearest
RegListBase< DoubleRegister > DoubleRegList
constexpr int kTaggedSize
constexpr int kSimd128Size
const int kNumRequiredStackFrameSlots
DwVfpRegister DoubleRegister
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
constexpr Simd128Register kSimd128RegZero
constexpr DoubleRegister kScratchDoubleReg
@ kUnknownIndirectPointerTag
constexpr Simd128Register kScratchSimd128Reg
RegListBase< Register > RegList
constexpr bool CodeKindCanTierUp(CodeKind kind)
constexpr Register kJavaScriptCallTargetRegister
constexpr int kCodePointerTableEntryCodeObjectOffset
const int kStackFrameExtraParamSlot
constexpr int kTrustedPointerTableEntrySizeLog2
const Address kWeakHeapObjectMask
constexpr Register kJavaScriptCallArgCountRegister
constexpr int kSystemPointerSizeLog2
base::StrongAlias< JSDispatchHandleAliasTag, uint32_t > JSDispatchHandle
const int kNumCallerSavedDoubles
static const int kRegisterPassedArguments
constexpr Register kConstantPoolRegister
QwNeonRegister Simd128Register
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
const char * GetAbortReason(AbortReason reason)
static constexpr int kMaxCParameters
constexpr uint32_t kZapValue
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
constexpr uint32_t kTrustedPointerHandleShift
RegListBase< Simd128Register > Simd128RegList
constexpr uint32_t kCodePointerHandleShift
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
@ kExternalPointerNullTag
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
const DoubleRegList kCallerSavedDoubles
const RegList kJSCallerSaved
Register ToRegister(int num)
constexpr bool SmiValuesAre32Bits()
constexpr Register kJavaScriptCallCodeStartRegister
constexpr int kJSDispatchTableEntrySizeLog2
constexpr Register kPtrComprCageBaseRegister
constexpr VFPRoundingMode kRoundToZero
const Simd128RegList kCallerSavedSimd128s
constexpr VFPRoundingMode RN
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
constexpr uint8_t kInstrSize
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
constexpr uint32_t kCodePointerHandleMarker
const uint32_t kClearedWeakHeapObjectLower32
@ kFirstStrongOrReadOnlyRoot
@ kLastStrongOrReadOnlyRoot
Condition to_condition(Condition cond)
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
#define DCHECK_LE(v1, v2)
#define CHECK_LE(lhs, rhs)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
constexpr bool IsAligned(T value, U alignment)
#define V8_STATIC_ROOTS_BOOL