8#if V8_TARGET_ARCH_S390X
36#define __ ACCESS_MASM(masm)
46#if V8_ENABLE_WEBASSEMBLY
47constexpr int kStackSavedSavedFPSizeInBytes =
50constexpr int kStackSavedSavedFPSizeInBytes =
65 Label check_zero, return_left, return_right, return_nan, done;
66 cdbr(left_reg, right_reg);
80 ldr(result_reg, left_reg);
81 adbr(result_reg, right_reg);
86 adbr(left_reg, right_reg);
90 if (right_reg != result_reg) {
91 ldr(result_reg, right_reg);
96 if (left_reg != result_reg) {
97 ldr(result_reg, left_reg);
110 Label check_zero, return_left, return_right, return_nan, done;
111 cdbr(left_reg, right_reg);
125 lcdbr(left_reg, left_reg);
126 ldr(result_reg, left_reg);
127 if (left_reg == right_reg) {
128 adbr(result_reg, right_reg);
130 sdbr(result_reg, right_reg);
132 lcdbr(result_reg, result_reg);
137 adbr(left_reg, right_reg);
141 if (right_reg != result_reg) {
142 ldr(result_reg, right_reg);
147 if (left_reg != result_reg) {
148 ldr(result_reg, left_reg);
161 Label check_zero, return_left, return_right, return_nan, done;
162 cebr(left_reg, right_reg);
176 ldr(result_reg, left_reg);
177 aebr(result_reg, right_reg);
182 aebr(left_reg, right_reg);
186 if (right_reg != result_reg) {
187 ldr(result_reg, right_reg);
192 if (left_reg != result_reg) {
193 ldr(result_reg, left_reg);
207 Label check_zero, return_left, return_right, return_nan, done;
208 cebr(left_reg, right_reg);
222 lcebr(left_reg, left_reg);
223 ldr(result_reg, left_reg);
224 if (left_reg == right_reg) {
225 aebr(result_reg, right_reg);
227 sebr(result_reg, right_reg);
229 lcebr(result_reg, result_reg);
234 aebr(left_reg, right_reg);
238 if (right_reg != result_reg) {
239 ldr(result_reg, right_reg);
244 if (left_reg != result_reg) {
245 ldr(result_reg, left_reg);
285 Register exclusion3)
const {
288 RegList exclusions = {exclusion1, exclusion2, exclusion3};
293 bytes += kStackSavedSavedFPSizeInBytes;
300 Register exclusion1, Register exclusion2,
301 Register exclusion3) {
304 RegList exclusions = {exclusion1, exclusion2, exclusion3};
311 bytes += kStackSavedSavedFPSizeInBytes;
318 Register exclusion1, Register exclusion2,
319 Register exclusion3) {
323 bytes += kStackSavedSavedFPSizeInBytes;
326 RegList exclusions = {exclusion1, exclusion2, exclusion3};
335 int constant_index) {
362 }
else if (is_uint12(
offset)) {
371 ExternalReference reference, Register scratch) {
373 if (reference.IsIsolateFieldId()) {
376 if (
options().enable_root_relative_access) {
383 if (
options().isolate_independent_code) {
401 Move(scratch, reference);
417 mov(ip, Operand(target, rmode));
426 Jump(
static_cast<intptr_t
>(target), rmode, cond);
436 if (
isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
459 Move(scratch, reference);
487 mov(ip, Operand(target, rmode));
499 if (
isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
511 switch (
options().builtin_call_jump_mode) {
536 switch (
options().builtin_call_jump_mode) {
549 if (
options().use_pc_relative_calls_and_jumps_for_mksnapshot) {
564 if (is_uint12(total)) {
566 }
else if (is_int20(total)) {
569 AddS64(sp, Operand(total));
584 uint64_t frame_alignment_mask = ~(
static_cast<uint64_t
>(frame_alignment) - 1);
585 AndP(sp, sp, Operand(frame_alignment_mask));
606 mov(r0, Operand(smi));
612 mov(r0, Operand(
static_cast<uint32_t
>(index.value())));
627 mov(dst, Operand(
static_cast<int>(index), rmode));
630 mov(dst, Operand(value.address(), rmode));
636 if (reference.IsIsolateFieldId()) {
638 Operand(reference.offset_from_root_register()));
641 if (
options().isolate_independent_code) {
649 CHECK(!reference.IsIsolateFieldId());
650 mov(dst, Operand(reference));
679 const Operand& length) {
680 mvc(opnd1, opnd2, Operand(
static_cast<intptr_t
>(length.immediate() - 1)));
686 const Operand& length) {
687 clc(opnd1, opnd2, Operand(
static_cast<intptr_t
>(length.immediate() - 1)));
693 const Operand& length) {
694 xc(opnd1, opnd2, Operand(
static_cast<intptr_t
>(length.immediate() - 1)));
699 const Operand& startBit,
700 const Operand& endBit,
701 const Operand& shiftAmt,
705 risbg(dst, src, startBit,
706 Operand(
static_cast<intptr_t
>(endBit.immediate() | 0x80)), shiftAmt);
708 risbg(dst, src, startBit, endBit, shiftAmt);
717 Register scratch2, PushArrayOrder order) {
735 mov(scratch2, array);
737 CmpS64(scratch2, scratch);
748 int16_t num_to_push = regs.Count();
751 SubS64(location, location, Operand(stack_offset));
753 if ((regs.bits() & (1 <<
i)) != 0) {
764 if ((regs.bits() & (1 <<
i)) != 0) {
769 AddS64(location, location, Operand(stack_offset));
773 int16_t num_to_push = dregs.Count();
776 SubS64(location, location, Operand(stack_offset));
778 if ((dregs.bits() & (1 <<
i)) != 0) {
788 int16_t num_to_push = dregs.Count();
791 SubS64(location, location, Operand(stack_offset));
793 if ((dregs.bits() & (1 <<
i)) != 0) {
805 if ((dregs.bits() & (1 <<
i)) != 0) {
811 AddS64(location, location, Operand(stack_offset));
819 if ((dregs.bits() & (1 <<
i)) != 0) {
825 AddS64(location, location, Operand(stack_offset));
830#if V8_ENABLE_WEBASSEMBLY
831 bool generating_bultins =
833 if (generating_bultins) {
834 Label push_doubles, simd_pushed;
835 Move(r1, ExternalReference::supports_wasm_simd_128_address());
864#if V8_ENABLE_WEBASSEMBLY
865 bool generating_bultins =
867 if (generating_bultins) {
868 Label pop_doubles, simd_popped;
869 Move(r1, ExternalReference::supports_wasm_simd_128_address());
948 const Register& scratch) {
957 const Register& scratch) {
979 const Register& scratch) {
985 StoreU64(value, dst_field_operand, scratch);
1035 Register value, Register slot_address,
1100 Register slot_address,
1113 Pop(slot_address_parameter);
1114 Pop(object_parameter);
1121 Register slot_address,
1135 Pop(slot_address_parameter);
1136 Pop(object_parameter);
1150#if V8_ENABLE_WEBASSEMBLY
1151 if (mode == StubCallMode::kCallWasmRuntimeStub) {
1173 Check(
eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
1176 if (
v8_flags.disable_write_barriers) {
1218 if (marker_reg.is_valid()) {
1219 Push(r14, fp, marker_reg);
1229 if (marker_reg.is_valid()) {
1230 Pop(r14, fp, marker_reg);
1238 if (function_reg.is_valid()) {
1239 Push(r14, fp,
cp, function_reg);
1263 if (dst != src)
ldr(dst, src);
1295 cegbr(double_dst, src);
1300 cdgbr(double_dst, src);
1332 cgebr(
m, dst, double_input);
1354 cgdbr(
m, dst, double_input);
1378 cfdbr(
m, dst, double_input);
1504 int prologue_offset) {
1530 bool load_constant_pool_pointer_reg) {
1545#if V8_ENABLE_WEBASSEMBLY
1592 DCHECK(frame_type == StackFrame::EXIT ||
1593 frame_type == StackFrame::BUILTIN_EXIT ||
1594 frame_type == StackFrame::API_ACCESSOR_EXIT ||
1595 frame_type == StackFrame::API_CALLBACK_EXIT);
1602 using ER = ExternalReference;
1618 ER c_entry_fp_address =
1619 ER::Create(IsolateAddressId::kCEntryFPAddress,
isolate());
1622 ER context_address = ER::Create(IsolateAddressId::kContextAddress,
isolate());
1630 if (frame_alignment > 0) {
1644#if !defined(USE_SIMULATOR)
1655 return v8_flags.sim_stack_alignment;
1660 using ER = ExternalReference;
1663 ER context_address = ER::Create(IsolateAddressId::kContextAddress,
isolate());
1672 ER c_entry_fp_address =
1673 ER::Create(IsolateAddressId::kCEntryFPAddress,
isolate());
1691 Isolate* isolate = this->
isolate();
1692 ExternalReference limit =
1694 ? ExternalReference::address_of_real_jslimit(isolate)
1695 : ExternalReference::address_of_jslimit(isolate);
1705 Label* stack_overflow) {
1712 SubS64(scratch, sp, scratch);
1716 ble(stack_overflow);
1720 Register actual_parameter_count,
1722 Label regular_invoke;
1729 DCHECK_EQ(expected_parameter_count, r4);
1733 SubS64(expected_parameter_count, expected_parameter_count,
1734 actual_parameter_count);
1735 ble(®ular_invoke);
1737 Label stack_overflow;
1745 Register num = r7, src = r8, dest = ip;
1752 ltgr(num, actual_parameter_count);
1759 SubS64(num, num, Operand(1));
1765 LoadRoot(scratch, RootIndex::kUndefinedValue);
1771 SubS64(expected_parameter_count, expected_parameter_count, Operand(1));
1776 bind(&stack_overflow);
1784 bind(®ular_invoke);
1788 Register expected_parameter_count,
1789 Register actual_parameter_count) {
1792 ExternalReference debug_hook_active =
1793 ExternalReference::debug_hook_on_function_call_address(
isolate());
1794 Move(r6, debug_hook_active);
1804 SmiTag(expected_parameter_count);
1805 Push(expected_parameter_count);
1807 SmiTag(actual_parameter_count);
1808 Push(actual_parameter_count);
1820 Pop(actual_parameter_count);
1823 Pop(expected_parameter_count);
1824 SmiUntag(expected_parameter_count);
1830 Register expected_parameter_count,
1831 Register actual_parameter_count,
1840 actual_parameter_count);
1844 LoadRoot(r5, RootIndex::kUndefinedValue);
1847 InvokePrologue(expected_parameter_count, actual_parameter_count, type);
1851 constexpr int unused_argument_count = 0;
1863 Register fun, Register
new_target, Register actual_parameter_count,
1878 SharedFunctionInfo::kFormalParameterCountOffset));
1885 Register expected_parameter_count,
1886 Register actual_parameter_count,
1898 actual_parameter_count, type);
1944 Register type_reg, Register scratch,
1953 unsigned lower_limit,
unsigned higher_limit) {
1956 if (lower_limit != 0) {
1957 mov(scratch, value);
1958 slgfi(scratch, Operand(lower_limit));
1959 CmpU64(scratch, Operand(higher_limit - lower_limit));
1961 CmpU64(value, Operand(higher_limit));
1971 CompareRange(type_reg, scratch, lower_limit, higher_limit);
1991#ifdef V8_TARGET_BIG_ENDIAN
1998 unsigned lower_limit,
1999 unsigned higher_limit,
2000 Label* on_in_range) {
2001 CompareRange(value, scratch, lower_limit, higher_limit);
2019#if V8_ENABLE_WEBASSEMBLY
2020 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
2049#ifndef V8_ENABLE_LEAPTIERING
2051void TailCallOptimizedCodeSlot(MacroAssembler* masm,
2052 Register optimized_code_entry,
2062 Label heal_optimized_code_slot;
2067 &heal_optimized_code_slot);
2071 optimized_code_entry,
2078 __ bne(&heal_optimized_code_slot);
2092 __ bind(&heal_optimized_code_slot);
2100#ifdef V8_ENABLE_DEBUG_CODE
2103 IsObjectType(
object, scratch, scratch, FEEDBACK_CELL_TYPE);
2104 Assert(
eq, AbortReason::kExpectedFeedbackCell);
2109 IsObjectType(
object, scratch, scratch, FEEDBACK_VECTOR_TYPE);
2110 Assert(
eq, AbortReason::kExpectedFeedbackVector);
2116 Register scratch = temps.Acquire();
2118 Assert(
eq, AbortReason::kExpectedFeedbackVector);
2126 Register optimized_code, Register closure, Register scratch1,
2127 Register slot_address) {
2128#ifdef V8_ENABLE_LEAPTIERING
2139 mov(value, optimized_code);
2175#ifndef V8_ENABLE_LEAPTIERING
2180 Register flags, Register feedback_vector,
CodeKind current_code_kind) {
2189 if (current_code_kind != CodeKind::MAGLEV) {
2192 CHECK(is_uint16(kFlagsMask));
2193 tmll(flags, Operand(kFlagsMask));
2200 Register flags, Register feedback_vector,
CodeKind current_code_kind,
2201 Label* flags_need_processing) {
2205 flags_need_processing);
2209 Register flags, Register feedback_vector) {
2211 Label maybe_has_optimized_code, maybe_needs_logging;
2214 beq(&maybe_needs_logging);
2218 bind(&maybe_needs_logging);
2219 TestBitMask(flags, FeedbackVector::LogNextExecutionBit::kMask, r0);
2220 beq(&maybe_has_optimized_code);
2223 bind(&maybe_has_optimized_code);
2227 FeedbackVector::kMaybeOptimizedCodeOffset));
2228 TailCallOptimizedCodeSlot(
this, optimized_code_entry, r1);
2234 int num_arguments) {
2240 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2246 mov(r2, Operand(num_arguments));
2248 bool switch_to_central_stack =
options().is_wasm;
2255 if (function->nargs >= 0) {
2256 mov(r2, Operand(function->nargs));
2262 bool builtin_exit_frame) {
2268 Label* target_if_cleared) {
2270 beq(target_if_cleared);
2277 Register scratch2) {
2278 DCHECK(value > 0 && is_int8(value));
2279 if (
v8_flags.native_code_counters && counter->Enabled()) {
2283 AddS64(scratch1, Operand(value));
2290 Register scratch2) {
2291 DCHECK(value > 0 && is_int8(value));
2292 if (
v8_flags.native_code_counters && counter->Enabled()) {
2296 AddS64(scratch1, Operand(-value));
2325 lgfi(r2, Operand(
static_cast<int>(reason)));
2328 CallCFunction(ExternalReference::abort_with_reason(), 1, 0);
2330 Move(r3, ExternalReference::abort_with_reason());
2368 Register scratch, Label* fbv_undef) {
2377 IsObjectType(dst, scratch, scratch, FEEDBACK_VECTOR_TYPE);
2381 LoadRoot(dst, RootIndex::kUndefinedValue);
2391 dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
2395#ifdef V8_ENABLE_DEBUG_CODE
2405 if (!
v8_flags.slow_debug_code)
return;
2408 CmpS64(int32_register, r0);
2409 Check(
le, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
2416 Check(
ne, AbortReason::kOperandIsNotAMap);
2421 Check(
eq, AbortReason::kOperandIsNotAMap);
2428 Check(
ne, AbortReason::kOperandIsASmi, cr0);
2436 Check(
eq, AbortReason::kOperandIsNotASmi, cr0);
2444 Check(
ne, AbortReason::kOperandIsASmiAndNotAConstructor);
2447 Operand(Map::Bits1::IsConstructorBit::kMask));
2448 Check(
ne, AbortReason::kOperandIsNotAConstructor);
2456 Check(
ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
2460 LAST_JS_FUNCTION_TYPE);
2462 Check(
le, AbortReason::kOperandIsNotAFunction);
2471 Check(
ne, AbortReason::kOperandIsASmiAndNotAFunction);
2478 Check(
le, AbortReason::kOperandIsNotACallableFunction);
2485 Check(
ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
2487 IsObjectType(
object,
object,
object, JS_BOUND_FUNCTION_TYPE);
2489 Check(
eq, AbortReason::kOperandIsNotABoundFunction);
2496 Check(
ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
2506 FIRST_JS_GENERATOR_OBJECT_TYPE,
2507 LAST_JS_GENERATOR_OBJECT_TYPE);
2510 Check(
le, AbortReason::kOperandIsNotAGeneratorObject);
2516 Label done_checking;
2522 Assert(
eq, AbortReason::kExpectedUndefinedOrCell);
2523 bind(&done_checking);
2562 Abort(abort_reason);
2570 int num_double_arguments) {
2571 int stack_passed_words = 0;
2573 stack_passed_words +=
2580 return stack_passed_words;
2584 int num_double_arguments,
2587 int stack_passed_arguments =
2601 stack_space += stack_passed_arguments;
2628 int num_reg_arguments,
2629 int num_double_arguments,
2631 bool has_function_descriptor,
2632 Label* return_label) {
2634 return CallCFunction(ip, num_reg_arguments, num_double_arguments,
2635 set_isolate_data_slots, has_function_descriptor,
2640 int num_double_arguments,
2642 bool has_function_descriptor,
2643 Label* return_label) {
2664 if (num_reg_arguments == 4) {
2667 }
else if (num_reg_arguments >= 5) {
2694 if (has_function_descriptor) {
2710 if (has_function_descriptor) {
2713 nop(BASR_CALL_TYPE_NOP);
2719 if (num_reg_arguments == 4) {
2722 }
else if (num_reg_arguments >= 5) {
2739 if (return_label)
bind(return_label);
2750 int stack_passed_arguments =
2760 return call_pc_offset;
2765 bool has_function_descriptor,
2766 Label* return_label) {
2767 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
2768 has_function_descriptor, return_label);
2773 bool has_function_descriptor,
2774 Label* return_label) {
2775 return CallCFunction(function, num_arguments, 0, set_isolate_data_slots,
2776 has_function_descriptor, return_label);
2790 uint32_t shifted_mask =
mask;
2794 }
else if (
mask < 0x8000) {
2796 shifted_mask =
mask >> 8;
2797 }
else if (
mask < 0x800000) {
2799 shifted_mask =
mask >> 16;
2802 shifted_mask =
mask >> 24;
2804#if V8_TARGET_LITTLE_ENDIAN
2809 Operand(shifted_mask));
2825 Register reg4, Register reg5,
2827 RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6};
2830 for (
int i = 0;
i < config->num_allocatable_general_registers(); ++
i) {
2831 int code = config->GetAllocatableGeneralCode(
i);
2833 if (regs.has(candidate))
continue;
2844 if (src.is_heap_number_request()) {
2847 value = src.immediate();
2860 if (is_uint16(lo_32)) {
2861 llill(dst, Operand(lo_32));
2864 llilf(dst, Operand(lo_32));
2866 }
else if (lo_32 == 0) {
2867 if (is_uint16(hi_32)) {
2868 llihl(dst, Operand(hi_32));
2871 llihf(dst, Operand(hi_32));
2873 }
else if (is_int16(value)) {
2874 lghi(dst, Operand(value));
2876 }
else if (is_int32(value)) {
2877 lgfi(dst, Operand(value));
2882 iihf(dst, Operand(hi_32));
2883 iilf(dst, Operand(lo_32));
2887 if (is_uint12(src1.offset())) {
2889 }
else if (is_int20(src1.offset())) {
2902#define Generate_MulHigh32(instr) \
2906 srlg(dst, dst, Operand(32)); \
2911 Generate_MulHigh32(msgf);
2916 std::swap(src1, src2);
2918 Generate_MulHigh32(msgfr);
2922 const Operand& src2) {
2923 Generate_MulHigh32(msgfi);
2926#undef Generate_MulHigh32
2928#define Generate_MulHighU32(instr) \
2937 Generate_MulHighU32(ml);
2941 Generate_MulHighU32(mlr);
2945 const Operand& src2) {
2952#undef Generate_MulHighU32
2954#define Generate_Mul32WithOverflowIfCCUnequal(instr) \
2964 if (src2.rx() == dst || src2.rb() == dst) dst =
r0;
2965 Generate_Mul32WithOverflowIfCCUnequal(msgf);
2972 std::swap(src1, src2);
2974 Generate_Mul32WithOverflowIfCCUnequal(msgfr);
2978 const Operand& src2) {
2979 Generate_Mul32WithOverflowIfCCUnequal(msgfi);
2982#undef Generate_Mul32WithOverflowIfCCUnequal
2984#define Generate_Div32(instr) \
2993 Generate_Div32(dsgf);
2997 Generate_Div32(dsgfr);
3000#undef Generate_Div32
3002#define Generate_DivU32(instr) \
3005 srdl(r0, Operand(32)); \
3012 Generate_DivU32(dl);
3016 Generate_DivU32(dlr);
3019#undef Generate_DivU32
3021#define Generate_Div64(instr) \
3030 Generate_Div64(dsg);
3034 Generate_Div64(dsgr);
3037#undef Generate_Div64
3039#define Generate_DivU64(instr) \
3042 lghi(r0, Operand::Zero()); \
3049 Generate_DivU64(dlg);
3053 Generate_DivU64(dlgr);
3056#undef Generate_DivU64
3058#define Generate_Mod32(instr) \
3067 Generate_Mod32(dsgf);
3071 Generate_Mod32(dsgfr);
3074#undef Generate_Mod32
3076#define Generate_ModU32(instr) \
3079 srdl(r0, Operand(32)); \
3086 Generate_ModU32(dl);
3090 Generate_ModU32(dlr);
3093#undef Generate_ModU32
3095#define Generate_Mod64(instr) \
3104 Generate_Mod64(dsg);
3108 Generate_Mod64(dsgr);
3111#undef Generate_Mod64
3113#define Generate_ModU64(instr) \
3116 lghi(r0, Operand::Zero()); \
3123 Generate_ModU64(dlg);
3127 Generate_ModU64(dlgr);
3130#undef Generate_ModU64
3144 mgrk(r0, src1, src2);
3152 FrameScope scope(
this, StackFrame::INTERNAL);
3154 CallCFunction(ExternalReference::int64_mul_high_function(), 2, 0);
3184 if (is_uint12(input.offset())) {
3197 if (is_int16(opnd.immediate()))
3205 if (is_int16(opnd.immediate()))
3212 AddS32(dst, src, Operand(opnd));
3219 ahik(dst, src, opnd);
3228 AddS64(dst, src, Operand(opnd));
3235 aghik(dst, src, opnd);
3251 if (dst != src1 && dst != src2) {
3255 ark(dst, src1, src2);
3260 }
else if (dst == src2) {
3268 if (dst != src1 && dst != src2) {
3272 agrk(dst, src1, src2);
3277 }
else if (dst == src2) {
3285 DCHECK(is_int20(opnd.offset()));
3286 if (is_uint12(opnd.offset()))
3294 DCHECK(is_int20(opnd.offset()));
3300 DCHECK(is_int8(imm.immediate()));
3301 DCHECK(is_int20(opnd.offset()));
3308 DCHECK(is_int8(imm.immediate()));
3309 DCHECK(is_int20(opnd.offset()));
3320 if (dst != src2 && dst != src1) {
3323 }
else if (dst != src2) {
3345 if (dst != src2 && dst != src1) {
3347 algrk(dst, src1, src2);
3352 }
else if (dst != src2) {
3365 DCHECK(is_int20(opnd.offset()));
3366 if (is_uint12(opnd.offset()))
3374 DCHECK(is_int20(opnd.offset()));
3384 if (dst != src2 && dst != src1) {
3387 }
else if (dst != src2) {
3401 AddS32(dst, Operand(-(imm.immediate())));
3406 AddS64(dst, Operand(-(imm.immediate())));
3410 SubS32(dst, src, Operand(imm));
3415 AddS32(dst, src, Operand(-(imm.immediate())));
3419 SubS64(dst, src, Operand(imm));
3424 AddS64(dst, src, Operand(-(imm.immediate())));
3437 srk(dst, src1, src2);
3440 if (dst != src1 && dst != src2) lr(dst, src1);
3442 if (dst != src1 && dst == src2) {
3457 sgrk(dst, src1, src2);
3460 if (dst != src1 && dst != src2)
mov(dst, src1);
3462 if (dst != src1 && dst == src2) {
3475 DCHECK(is_int20(opnd.offset()));
3476 if (is_uint12(opnd.offset()))
3488 sllg(r0, src, Operand(32));
3494 srlg(dst, dst, Operand(32));
3501 laa(dst, dst, opnd);
3507 laag(dst, dst, opnd);
3516 DCHECK(is_int20(opnd.offset()));
3517 if (is_uint12(opnd.offset()))
3525 DCHECK(is_int20(opnd.offset()));
3541 if (dst != src1 && dst != src2) {
3545 nrk(dst, src1, src2);
3550 }
else if (dst == src2) {
3558 if (dst != src1 && dst != src2) {
3562 ngrk(dst, src1, src2);
3567 }
else if (dst == src2) {
3575 DCHECK(is_int20(opnd.offset()));
3576 if (is_uint12(opnd.offset()))
3584 DCHECK(is_int20(opnd.offset()));
3593 intptr_t value = opnd.immediate();
3594 if (value >> 32 != -1) {
3596 nihf(dst, Operand(value >> 32));
3598 nilf(dst, Operand(value & 0xFFFFFFFF));
3603 if (dst != src) lr(dst, src);
3610 intptr_t value = opnd.immediate();
3612 intptr_t shifted_value =
value;
3613 int trailing_zeros = 0;
3616 while ((0 != shifted_value) && (0 == (shifted_value & 1))) {
3618 shifted_value >>= 1;
3628 int endBit = 63 - trailing_zeros;
3633 }
else if (-1 == shifted_value) {
3636 int endBit = 63 - trailing_zeros;
3644 if (dst != src && (0 != value))
mov(dst, src);
3656 if (dst != src1 && dst != src2) {
3660 ork(dst, src1, src2);
3665 }
else if (dst == src2) {
3673 if (dst != src1 && dst != src2) {
3677 ogrk(dst, src1, src2);
3682 }
else if (dst == src2) {
3690 DCHECK(is_int20(opnd.offset()));
3691 if (is_uint12(opnd.offset()))
3699 DCHECK(is_int20(opnd.offset()));
3708 intptr_t value = opnd.immediate();
3709 if (value >> 32 != 0) {
3711 oihf(dst, Operand(value >> 32));
3713 oilf(dst, Operand(value & 0xFFFFFFFF));
3718 if (dst != src) lr(dst, src);
3724 if (dst != src)
mov(dst, src);
3736 if (dst != src1 && dst != src2) {
3740 xrk(dst, src1, src2);
3745 }
else if (dst == src2) {
3753 if (dst != src1 && dst != src2) {
3757 xgrk(dst, src1, src2);
3762 }
else if (dst == src2) {
3770 DCHECK(is_int20(opnd.offset()));
3771 if (is_uint12(opnd.offset()))
3779 DCHECK(is_int20(opnd.offset()));
3788 intptr_t value = opnd.immediate();
3789 xihf(dst, Operand(value >> 32));
3790 xilf(dst, Operand(value & 0xFFFFFFFF));
3795 if (dst != src) lr(dst, src);
3801 if (dst != src)
mov(dst, src);
3806 if (src !=
no_reg && src != dst) lr(dst, src);
3807 xilf(dst, Operand(0xFFFFFFFF));
3811 if (src !=
no_reg && src != dst) lgr(dst, src);
3812 xihf(dst, Operand(0xFFFFFFFF));
3813 xilf(dst, Operand(0xFFFFFFFF));
3843 intptr_t value = opnd.immediate();
3844 if (is_int16(value))
3869 DCHECK(is_int20(opnd.offset()));
3870 if (is_uint12(opnd.offset()))
3879 DCHECK(is_int20(opnd.offset()));
3892 DCHECK(is_int12(src2.offset()));
3897 DCHECK(is_int12(src2.offset()));
3904 if (is_uint12(opnd.offset())) {
3905 cs(old_val, new_val, opnd);
3907 csy(old_val, new_val, opnd);
3913 DCHECK(is_int20(opnd.offset()));
3914 csg(old_val, new_val, opnd);
3936 DCHECK_EQ(
static_cast<uint32_t
>(opnd.immediate() >> 32), 0);
3943 DCHECK(is_int20(opnd.offset()));
3944 if (is_uint12(opnd.offset()))
3953 DCHECK(is_int20(opnd.offset()));
3958 intptr_t value = opnd.immediate();
3959 if (is_int16(value))
3969 brctg(r1, Operand(
offset));
3977 intptr_t value =
static_cast<intptr_t
>(smi.ptr());
3978#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3979 llilf(dst, Operand(value));
3983 llihf(dst, Operand(value >> 32));
3989#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
3991 cfi(src1, Operand(smi));
3994 cih(src1, Operand(
static_cast<intptr_t
>(smi.ptr()) >> 32));
4004 int offset = mem.offset();
4008 DCHECK(scratch !=
no_reg && scratch != r0 && mem.rx() == r0);
4009 DCHECK(scratch != mem.rb());
4019 if (!is_int20(mem.offset())) {
4022 mov(scratch, Operand(mem.offset()));
4037 mem.getIndexRegister() == r0 && is_int16(opnd.immediate())) {
4047 DCHECK(is_int20(mem.offset()));
4048 lmg(dst1, dst2, mem);
4053 DCHECK(is_int20(mem.offset()));
4054 stmg(src1, src2, mem);
4059 if (is_uint12(mem.offset())) {
4060 lm(dst1, dst2, mem);
4062 DCHECK(is_int20(mem.offset()));
4063 lmy(dst1, dst2, mem);
4069 if (is_uint12(mem.offset())) {
4070 stm(src1, src2, mem);
4072 DCHECK(is_int20(mem.offset()));
4073 stmy(src1, src2, mem);
4085 int offset = mem.offset();
4106 int offset = mem.offset();
4110 }
else if (scratch !=
no_reg) {
4146#ifdef V8_TARGET_BIG_ENDIAN
4175 Register scratch0, Register scratch1) {
4177 is_uint12(opnd.offset());
4181 lrvg(scratch0, opnd);
4184 vlvgp(dst, scratch1, scratch0);
4190 lrvg(scratch, opnd);
4203 if (!is_int20(mem.offset())) {
4206 mov(scratch, Operand(mem.offset()));
4215 if (!is_int20(mem.offset())) {
4218 mov(scratch, Operand(mem.offset()));
4227 if (!is_int20(mem.offset())) {
4230 mov(scratch, Operand(mem.offset()));
4239 DCHECK(is_int20(opnd.offset()));
4241 strvg(scratch, opnd);
4246 DCHECK(is_int20(opnd.offset()));
4249 strv(scratch, opnd);
4253 Register scratch1, Register scratch2) {
4255 is_uint12(mem.offset());
4261 strvg(scratch1, mem);
4292 Register scratch0, Register scratch1) {
4335 Register scratch1, Register scratch2) {
4364 locgr(cond, dst, src);
4370 if (is_uint12(mem.offset())) {
4379 if (is_uint12(mem.offset())) {
4382 DCHECK(is_int20(mem.offset()));
4390 if (is_uint12(mem.offset())) {
4393 DCHECK(is_int20(mem.offset()));
4401 if (is_uint12(mem.offset())) {
4410 if (is_uint12(mem.offset())) {
4420 if (is_uint12(mem.offset())) {
4423 DCHECK(is_int20(mem.offset()));
4433 }
else if (dst == rhs) {
4445 }
else if (dst == rhs) {
4458 }
else if (dst == rhs) {
4470 }
else if (dst == rhs) {
4486 }
else if (dst == rhs) {
4498 }
else if (dst == rhs) {
4511 }
else if (dst == rhs) {
4523 }
else if (dst == rhs) {
4537 if (is_uint12(opnd.offset())) {
4547 if (is_uint12(opnd.offset())) {
4557 if (is_uint12(opnd.offset())) {
4567 if (is_uint12(opnd.offset())) {
4577 if (is_uint12(opnd.offset())) {
4581 meebr(dst, scratch);
4587 if (is_uint12(opnd.offset())) {
4597 if (is_uint12(opnd.offset())) {
4607 if (is_uint12(opnd.offset())) {
4616 if (is_uint12(opnd.offset())) {
4629 int offset = mem.offset();
4631 bool use_RXform =
false;
4632 bool use_RXYform =
false;
4637 }
else if (is_int20(
offset)) {
4640 }
else if (scratch !=
no_reg) {
4650 }
else if (use_RXYform) {
4666 int offset = mem.offset();
4682 int offset = mem.offset();
4686 }
else if (is_int20(
offset)) {
4700 int offset = mem.offset();
4704 }
else if (is_int20(
offset)) {
4715 const Operand& val) {
4721 const Operand& val2) {
4723 sll(dst, val, val2);
4725 sllk(dst, src, val, val2);
4727 DCHECK(dst != val || val == r0);
4729 sll(dst, val, val2);
4735 const Operand& val) {
4741 const Operand& val2) {
4742 sllg(dst, src, val, val2);
4747 const Operand& val) {
4753 const Operand& val2) {
4755 srl(dst, val, val2);
4757 srlk(dst, src, val, val2);
4759 DCHECK(dst != val || val == r0);
4761 srl(dst, val, val2);
4766 const Operand& val2) {
4767 srlg(dst, src, val, val2);
4772 const Operand& val) {
4778 const Operand& val) {
4784 const Operand& val2) {
4786 sra(dst, val, val2);
4788 srak(dst, src, val, val2);
4790 DCHECK(dst != val || val == r0);
4792 sra(dst, val, val2);
4798 const Operand& val) {
4804 const Operand& val2) {
4805 srag(dst, src, val, val2);
4810 const Operand& val) {
4815 int endBit = 63 - numBitsToClear;
4821 uint64_t hexMask = ~((1L << numBitsToClear) - 1);
4824 if (dst != src)
mov(dst, src);
4826 if (numBitsToClear <= 16) {
4827 nill(dst, Operand(
static_cast<uint16_t>(hexMask)));
4828 }
else if (numBitsToClear <= 32) {
4829 nilf(dst, Operand(
static_cast<uint32_t
>(hexMask)));
4830 }
else if (numBitsToClear <= 64) {
4831 nilf(dst, Operand(
static_cast<intptr_t
>(0)));
4832 nihf(dst, Operand(hexMask >> 32));
4863 if (src == dst)
return;
4880 Register scratch_1) {
4894 if (src == dst)
return;
4925 if (src == dst)
return;
4956 if (src == dst)
return;
4990 larl(dst, ¤t_pc);
5019 IsolateData::builtin_entry_table_offset()));
5041#ifdef V8_ENABLE_LEAPTIERING
5043void MacroAssembler::LoadEntrypointFromJSDispatchTable(Register
destination,
5044 Register dispatch_handle,
5050 Move(scratch, ExternalReference::js_dispatch_table_address());
5051 ShiftRightU64(index, dispatch_handle, Operand(kJSDispatchHandleShift));
5053 AddS64(scratch, scratch, index);
5060 Register code_object,
5081 uint16_t argument_count) {
5083#if V8_ENABLE_LEAPTIERING
5088 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
5097#if V8_ENABLE_LEAPTIERING
5099 uint16_t argument_count) {
5103 mov(dispatch_handle_reg,
5111 static_assert(!JSDispatchTable::kSupportsCompaction);
5112 LoadEntrypointFromJSDispatchTable(code, dispatch_handle_reg, scratch);
5123#if V8_ENABLE_LEAPTIERING
5128 LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
5139void MacroAssembler::zosStoreReturnAddressAndCall(Register target,
5141 DCHECK(target == r3 || target == r4);
5169#ifdef V8_ENABLE_WEBASSEMBLY
5171void MacroAssembler::ResolveWasmCodePointer(Register target) {
5174 ExternalReference global_jump_table =
5175 ExternalReference::wasm_code_pointer_table();
5177 Register scratch = temps.Acquire();
5178 Move(scratch, global_jump_table);
5184void MacroAssembler::CallWasmCodePointer(Register target,
5186 ResolveWasmCodePointer(target);
5194void MacroAssembler::LoadWasmCodePointer(Register dst,
MemOperand src) {
5195 static_assert(
sizeof(WasmCodePointer) == 4);
5227 bind(&return_label);
5246#ifdef V8_ENABLE_LEAPTIERING
5272 Register scratch_pair) {
5276 AddS32(dst, scratch_pair, Operand(-32));
5280 Register scratch_pair) {
5283 mov(dst, scratch_pair);
5287 Register scratch_pair) {
5296 mov(dst, Operand(32));
5298 llgfr(scratch1, scratch1);
5299 lcgr(scratch0, scratch1);
5300 ngr(scratch1, scratch0);
5301 flogr(scratch0, scratch1);
5302 mov(dst, Operand(63));
5308 Register scratch_pair) {
5316 ltgr(scratch1, src);
5317 mov(dst, Operand(64));
5319 lcgr(scratch0, scratch1);
5320 ngr(scratch0, scratch1);
5321 flogr(scratch0, scratch0);
5322 mov(dst, Operand(63));
5329 Register new_value,
int start,
5330 int end,
int shift_amount,
5331 int offset, Register temp0,
5334 llgfr(temp1, temp0);
5336 Operand(shift_amount),
false);
5338 Operand(shift_amount),
false);
5341 Operand(
end + shift_amount),
5342 Operand(64 - shift_amount),
true);
5346 Register old_value, Register new_value,
5347 Register temp0, Register temp1) {
5348#ifdef V8_TARGET_BIG_ENDIAN
5349#define ATOMIC_COMP_EXCHANGE_BYTE(i) \
5351 constexpr int idx = (i); \
5352 static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
5353 constexpr int start = 32 + 8 * idx; \
5354 constexpr int end = start + 7; \
5355 constexpr int shift_amount = (3 - idx) * 8; \
5356 AtomicCmpExchangeHelper(addr, output, old_value, new_value, start, end, \
5357 shift_amount, -idx, temp0, temp1); \
5360#define ATOMIC_COMP_EXCHANGE_BYTE(i) \
5362 constexpr int idx = (i); \
5363 static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
5364 constexpr int start = 32 + 8 * (3 - idx); \
5365 constexpr int end = start + 7; \
5366 constexpr int shift_amount = idx * 8; \
5367 AtomicCmpExchangeHelper(addr, output, old_value, new_value, start, end, \
5368 shift_amount, -idx, temp0, temp1); \
5372 Label
one, two, three, done;
5373 tmll(addr, Operand(3));
5378 ATOMIC_COMP_EXCHANGE_BYTE(0);
5382 ATOMIC_COMP_EXCHANGE_BYTE(1);
5386 ATOMIC_COMP_EXCHANGE_BYTE(2);
5390 ATOMIC_COMP_EXCHANGE_BYTE(3);
5396 Register new_value, Register temp0,
5398#ifdef V8_TARGET_BIG_ENDIAN
5399#define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
5401 constexpr int idx = (i); \
5402 static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
5403 constexpr int start = 32 + 16 * idx; \
5404 constexpr int end = start + 15; \
5405 constexpr int shift_amount = (1 - idx) * 16; \
5406 AtomicCmpExchangeHelper(addr, output, old_value, new_value, start, end, \
5407 shift_amount, -idx * 2, temp0, temp1); \
5410#define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
5412 constexpr int idx = (i); \
5413 static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
5414 constexpr int start = 32 + 16 * (1 - idx); \
5415 constexpr int end = start + 15; \
5416 constexpr int shift_amount = idx * 16; \
5417 AtomicCmpExchangeHelper(addr, output, old_value, new_value, start, end, \
5418 shift_amount, -idx * 2, temp0, temp1); \
5423 tmll(addr, Operand(3));
5425 ATOMIC_COMP_EXCHANGE_HALFWORD(0);
5428 ATOMIC_COMP_EXCHANGE_HALFWORD(1);
5433 Register output,
int start,
int end,
5434 int shift_amount,
int offset,
5439 llgfr(scratch, output);
5441 Operand(shift_amount),
false);
5444 srl(output, Operand(shift_amount));
5448 Register output, Register scratch) {
5449#ifdef V8_TARGET_BIG_ENDIAN
5450#define ATOMIC_EXCHANGE_BYTE(i) \
5452 constexpr int idx = (i); \
5453 static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
5454 constexpr int start = 32 + 8 * idx; \
5455 constexpr int end = start + 7; \
5456 constexpr int shift_amount = (3 - idx) * 8; \
5457 AtomicExchangeHelper(addr, value, output, start, end, shift_amount, -idx, \
5461#define ATOMIC_EXCHANGE_BYTE(i) \
5463 constexpr int idx = (i); \
5464 static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
5465 constexpr int start = 32 + 8 * (3 - idx); \
5466 constexpr int end = start + 7; \
5467 constexpr int shift_amount = idx * 8; \
5468 AtomicExchangeHelper(addr, value, output, start, end, shift_amount, -idx, \
5472 Label three, two,
one, done;
5473 tmll(addr, Operand(3));
5479 ATOMIC_EXCHANGE_BYTE(0);
5484 ATOMIC_EXCHANGE_BYTE(1);
5489 ATOMIC_EXCHANGE_BYTE(2);
5494 ATOMIC_EXCHANGE_BYTE(3);
5500 Register output, Register scratch) {
5501#ifdef V8_TARGET_BIG_ENDIAN
5502#define ATOMIC_EXCHANGE_HALFWORD(i) \
5504 constexpr int idx = (i); \
5505 static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
5506 constexpr int start = 32 + 16 * idx; \
5507 constexpr int end = start + 15; \
5508 constexpr int shift_amount = (1 - idx) * 16; \
5509 AtomicExchangeHelper(addr, value, output, start, end, shift_amount, \
5510 -idx * 2, scratch); \
5513#define ATOMIC_EXCHANGE_HALFWORD(i) \
5515 constexpr int idx = (i); \
5516 static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
5517 constexpr int start = 32 + 16 * (1 - idx); \
5518 constexpr int end = start + 15; \
5519 constexpr int shift_amount = idx * 16; \
5520 AtomicExchangeHelper(addr, value, output, start, end, shift_amount, \
5521 -idx * 2, scratch); \
5525 tmll(addr, Operand(3));
5529 ATOMIC_EXCHANGE_HALFWORD(0);
5534 ATOMIC_EXCHANGE_HALFWORD(1);
5541 vrep(dst, src, Operand(0),
Condition(3));
5545 vrep(dst, src, Operand(0),
Condition(2));
5550 vrep(dst, dst, Operand(0),
Condition(3));
5555 vrep(dst, dst, Operand(0),
Condition(2));
5560 vrep(dst, dst, Operand(0),
Condition(1));
5565 vrep(dst, dst, Operand(0),
Condition(0));
5569 uint8_t imm_lane_idx, Register) {
5570 vrep(dst, src, Operand(1 - imm_lane_idx),
Condition(3));
5574 uint8_t imm_lane_idx, Register) {
5575 vrep(dst, src, Operand(3 - imm_lane_idx),
Condition(2));
5579 uint8_t imm_lane_idx, Register) {
5584 uint8_t imm_lane_idx, Register) {
5589 uint8_t imm_lane_idx, Register) {
5594 uint8_t imm_lane_idx, Register scratch) {
5600 uint8_t imm_lane_idx, Register) {
5605 uint8_t imm_lane_idx, Register scratch) {
5631 Register src2, uint8_t imm_lane_idx,
5640 Register src2, uint8_t imm_lane_idx,
5649 Register src2, uint8_t imm_lane_idx,
5658 Register src2, uint8_t imm_lane_idx,
5683#define SIMD_UNOP_LIST_VRR_A(V) \
5684 V(F64x2Abs, vfpso, 2, 0, 3) \
5685 V(F64x2Neg, vfpso, 0, 0, 3) \
5686 V(F64x2Sqrt, vfsq, 0, 0, 3) \
5687 V(F64x2Ceil, vfi, 6, 0, 3) \
5688 V(F64x2Floor, vfi, 7, 0, 3) \
5689 V(F64x2Trunc, vfi, 5, 0, 3) \
5690 V(F64x2NearestInt, vfi, 4, 0, 3) \
5691 V(F32x4Abs, vfpso, 2, 0, 2) \
5692 V(F32x4Neg, vfpso, 0, 0, 2) \
5693 V(F32x4Sqrt, vfsq, 0, 0, 2) \
5694 V(F32x4Ceil, vfi, 6, 0, 2) \
5695 V(F32x4Floor, vfi, 7, 0, 2) \
5696 V(F32x4Trunc, vfi, 5, 0, 2) \
5697 V(F32x4NearestInt, vfi, 4, 0, 2) \
5698 V(I64x2Abs, vlp, 0, 0, 3) \
5699 V(I64x2Neg, vlc, 0, 0, 3) \
5700 V(I64x2SConvertI32x4Low, vupl, 0, 0, 2) \
5701 V(I64x2SConvertI32x4High, vuph, 0, 0, 2) \
5702 V(I64x2UConvertI32x4Low, vupll, 0, 0, 2) \
5703 V(I64x2UConvertI32x4High, vuplh, 0, 0, 2) \
5704 V(I32x4Abs, vlp, 0, 0, 2) \
5705 V(I32x4Neg, vlc, 0, 0, 2) \
5706 V(I32x4SConvertI16x8Low, vupl, 0, 0, 1) \
5707 V(I32x4SConvertI16x8High, vuph, 0, 0, 1) \
5708 V(I32x4UConvertI16x8Low, vupll, 0, 0, 1) \
5709 V(I32x4UConvertI16x8High, vuplh, 0, 0, 1) \
5710 V(I16x8Abs, vlp, 0, 0, 1) \
5711 V(I16x8Neg, vlc, 0, 0, 1) \
5712 V(I16x8SConvertI8x16Low, vupl, 0, 0, 0) \
5713 V(I16x8SConvertI8x16High, vuph, 0, 0, 0) \
5714 V(I16x8UConvertI8x16Low, vupll, 0, 0, 0) \
5715 V(I16x8UConvertI8x16High, vuplh, 0, 0, 0) \
5716 V(I8x16Abs, vlp, 0, 0, 0) \
5717 V(I8x16Neg, vlc, 0, 0, 0) \
5718 V(I8x16Popcnt, vpopct, 0, 0, 0)
5720#define EMIT_SIMD_UNOP_VRR_A(name, op, c1, c2, c3) \
5721 void MacroAssembler::name(Simd128Register dst, Simd128Register src) { \
5722 op(dst, src, Condition(c1), Condition(c2), Condition(c3)); \
5724SIMD_UNOP_LIST_VRR_A(EMIT_SIMD_UNOP_VRR_A)
5725#undef EMIT_SIMD_UNOP_VRR_A
5726#undef SIMD_UNOP_LIST_VRR_A
5728#define SIMD_BINOP_LIST_VRR_B(V) \
5729 V(I64x2Eq, vceq, 0, 3) \
5730 V(I64x2GtS, vch, 0, 3) \
5731 V(I32x4Eq, vceq, 0, 2) \
5732 V(I32x4GtS, vch, 0, 2) \
5733 V(I32x4GtU, vchl, 0, 2) \
5734 V(I16x8Eq, vceq, 0, 1) \
5735 V(I16x8GtS, vch, 0, 1) \
5736 V(I16x8GtU, vchl, 0, 1) \
5737 V(I8x16Eq, vceq, 0, 0) \
5738 V(I8x16GtS, vch, 0, 0) \
5739 V(I8x16GtU, vchl, 0, 0)
5741#define EMIT_SIMD_BINOP_VRR_B(name, op, c1, c2) \
5742 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
5743 Simd128Register src2) { \
5744 op(dst, src1, src2, Condition(c1), Condition(c2)); \
5746SIMD_BINOP_LIST_VRR_B(EMIT_SIMD_BINOP_VRR_B)
5747#undef EMIT_SIMD_BINOP_VRR_B
5748#undef SIMD_BINOP_LIST_VRR_B
5750#define SIMD_BINOP_LIST_VRR_C(V) \
5751 V(F64x2Add, vfa, 0, 0, 3) \
5752 V(F64x2Sub, vfs, 0, 0, 3) \
5753 V(F64x2Mul, vfm, 0, 0, 3) \
5754 V(F64x2Div, vfd, 0, 0, 3) \
5755 V(F64x2Min, vfmin, 1, 0, 3) \
5756 V(F64x2Max, vfmax, 1, 0, 3) \
5757 V(F64x2Eq, vfce, 0, 0, 3) \
5758 V(F64x2Pmin, vfmin, 3, 0, 3) \
5759 V(F64x2Pmax, vfmax, 3, 0, 3) \
5760 V(F32x4Add, vfa, 0, 0, 2) \
5761 V(F32x4Sub, vfs, 0, 0, 2) \
5762 V(F32x4Mul, vfm, 0, 0, 2) \
5763 V(F32x4Div, vfd, 0, 0, 2) \
5764 V(F32x4Min, vfmin, 1, 0, 2) \
5765 V(F32x4Max, vfmax, 1, 0, 2) \
5766 V(F32x4Eq, vfce, 0, 0, 2) \
5767 V(F32x4Pmin, vfmin, 3, 0, 2) \
5768 V(F32x4Pmax, vfmax, 3, 0, 2) \
5769 V(I64x2Add, va, 0, 0, 3) \
5770 V(I64x2Sub, vs, 0, 0, 3) \
5771 V(I32x4Add, va, 0, 0, 2) \
5772 V(I32x4Sub, vs, 0, 0, 2) \
5773 V(I32x4Mul, vml, 0, 0, 2) \
5774 V(I32x4MinS, vmn, 0, 0, 2) \
5775 V(I32x4MinU, vmnl, 0, 0, 2) \
5776 V(I32x4MaxS, vmx, 0, 0, 2) \
5777 V(I32x4MaxU, vmxl, 0, 0, 2) \
5778 V(I16x8Add, va, 0, 0, 1) \
5779 V(I16x8Sub, vs, 0, 0, 1) \
5780 V(I16x8Mul, vml, 0, 0, 1) \
5781 V(I16x8MinS, vmn, 0, 0, 1) \
5782 V(I16x8MinU, vmnl, 0, 0, 1) \
5783 V(I16x8MaxS, vmx, 0, 0, 1) \
5784 V(I16x8MaxU, vmxl, 0, 0, 1) \
5785 V(I16x8RoundingAverageU, vavgl, 0, 0, 1) \
5786 V(I8x16Add, va, 0, 0, 0) \
5787 V(I8x16Sub, vs, 0, 0, 0) \
5788 V(I8x16MinS, vmn, 0, 0, 0) \
5789 V(I8x16MinU, vmnl, 0, 0, 0) \
5790 V(I8x16MaxS, vmx, 0, 0, 0) \
5791 V(I8x16MaxU, vmxl, 0, 0, 0) \
5792 V(I8x16RoundingAverageU, vavgl, 0, 0, 0) \
5793 V(S128And, vn, 0, 0, 0) \
5794 V(S128Or, vo, 0, 0, 0) \
5795 V(S128Xor, vx, 0, 0, 0) \
5796 V(S128AndNot, vnc, 0, 0, 0)
5798#define EMIT_SIMD_BINOP_VRR_C(name, op, c1, c2, c3) \
5799 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
5800 Simd128Register src2) { \
5801 op(dst, src1, src2, Condition(c1), Condition(c2), Condition(c3)); \
5803SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
5804#undef EMIT_SIMD_BINOP_VRR_C
5805#undef SIMD_BINOP_LIST_VRR_C
5807#define SIMD_SHIFT_LIST(V) \
5808 V(I64x2Shl, veslv, 3) \
5809 V(I64x2ShrS, vesrav, 3) \
5810 V(I64x2ShrU, vesrlv, 3) \
5811 V(I32x4Shl, veslv, 2) \
5812 V(I32x4ShrS, vesrav, 2) \
5813 V(I32x4ShrU, vesrlv, 2) \
5814 V(I16x8Shl, veslv, 1) \
5815 V(I16x8ShrS, vesrav, 1) \
5816 V(I16x8ShrU, vesrlv, 1) \
5817 V(I8x16Shl, veslv, 0) \
5818 V(I8x16ShrS, vesrav, 0) \
5819 V(I8x16ShrU, vesrlv, 0)
5821#define EMIT_SIMD_SHIFT(name, op, c1) \
5822 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
5823 Register src2, Simd128Register scratch) { \
5824 vlvg(scratch, src2, MemOperand(r0, 0), Condition(c1)); \
5825 vrep(scratch, scratch, Operand(0), Condition(c1)); \
5826 op(dst, src1, scratch, Condition(0), Condition(0), Condition(c1)); \
5828 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
5829 const Operand& src2, Register scratch1, \
5830 Simd128Register scratch2) { \
5831 mov(scratch1, src2); \
5832 name(dst, src1, scratch1, scratch2); \
5835#undef EMIT_SIMD_SHIFT
5836#undef SIMD_SHIFT_LIST
5838#define SIMD_EXT_MUL_LIST(V) \
5839 V(I64x2ExtMulLowI32x4S, vme, vmo, vmrl, 2) \
5840 V(I64x2ExtMulHighI32x4S, vme, vmo, vmrh, 2) \
5841 V(I64x2ExtMulLowI32x4U, vmle, vmlo, vmrl, 2) \
5842 V(I64x2ExtMulHighI32x4U, vmle, vmlo, vmrh, 2) \
5843 V(I32x4ExtMulLowI16x8S, vme, vmo, vmrl, 1) \
5844 V(I32x4ExtMulHighI16x8S, vme, vmo, vmrh, 1) \
5845 V(I32x4ExtMulLowI16x8U, vmle, vmlo, vmrl, 1) \
5846 V(I32x4ExtMulHighI16x8U, vmle, vmlo, vmrh, 1) \
5847 V(I16x8ExtMulLowI8x16S, vme, vmo, vmrl, 0) \
5848 V(I16x8ExtMulHighI8x16S, vme, vmo, vmrh, 0) \
5849 V(I16x8ExtMulLowI8x16U, vmle, vmlo, vmrl, 0) \
5850 V(I16x8ExtMulHighI8x16U, vmle, vmlo, vmrh, 0)
5852#define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge, mode) \
5853 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
5854 Simd128Register src2, Simd128Register scratch) { \
5855 mul_even(scratch, src1, src2, Condition(0), Condition(0), \
5857 mul_odd(dst, src1, src2, Condition(0), Condition(0), Condition(mode)); \
5858 merge(dst, scratch, dst, Condition(0), Condition(0), Condition(mode + 1)); \
5861#undef EMIT_SIMD_EXT_MUL
5862#undef SIMD_EXT_MUL_LIST
5864#define SIMD_ALL_TRUE_LIST(V) \
5865 V(I64x2AllTrue, 3) \
5866 V(I32x4AllTrue, 2) \
5867 V(I16x8AllTrue, 1) \
5870#define EMIT_SIMD_ALL_TRUE(name, mode) \
5871 void MacroAssembler::name(Register dst, Simd128Register src, \
5872 Register scratch1, Simd128Register scratch2) { \
5873 mov(scratch1, Operand(1)); \
5875 vx(scratch2, scratch2, scratch2, Condition(0), Condition(0), \
5877 vceq(scratch2, src, scratch2, Condition(0), Condition(mode)); \
5878 vtm(scratch2, scratch2, Condition(0), Condition(0), Condition(0)); \
5879 locgr(Condition(8), dst, scratch1); \
5882#undef EMIT_SIMD_ALL_TRUE
5883#undef SIMD_ALL_TRUE_LIST
5885#define SIMD_QFM_LIST(V) \
5886 V(F64x2Qfma, vfma, 3) \
5887 V(F64x2Qfms, vfnms, 3) \
5888 V(F32x4Qfma, vfma, 2) \
5889 V(F32x4Qfms, vfnms, 2)
5891#define EMIT_SIMD_QFM(name, op, c1) \
5892 void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \
5893 Simd128Register src2, Simd128Register src3) { \
5894 op(dst, src1, src2, src3, Condition(c1), Condition(0)); \
5902 Register scratch2, Register scratch3) {
5905 for (
int i = 0;
i < 2;
i++) {
5908 MulS64(scratch_1, scratch_2);
5909 scratch_1 = scratch2;
5910 scratch_2 = scratch3;
5912 vlvgp(dst, scratch1, scratch2);
6022 mov(scratch1, Operand(0x8080808080800040));
6030 mov(scratch1, Operand(0x8080808000204060));
6038 mov(scratch1, Operand(0x10203040506070));
6057 Register scratch1, Register scratch2,
6059 mov(scratch1, Operand(0x4048505860687078));
6060 mov(scratch2, Operand(0x8101820283038));
6061 vlvgp(scratch3, scratch2, scratch1);
6068 mov(dst, Operand(1));
6069 xgr(scratch, scratch);
6074#define CONVERT_FLOAT_TO_INT32(convert, dst, src, scratch1, scratch2) \
6075 for (int index = 0; index < 4; index++) { \
6076 vlgv(scratch2, src, MemOperand(r0, index), Condition(2)); \
6077 MovIntToFloat(scratch1, scratch2); \
6078 convert(scratch2, scratch1, kRoundToZero); \
6079 vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \
6084 Register scratch2) {
6098 Register scratch2) {
6108#undef CONVERT_FLOAT_TO_INT32
6110#define CONVERT_INT32_TO_FLOAT(convert, dst, src, scratch1, scratch2) \
6111 for (int index = 0; index < 4; index++) { \
6112 vlgv(scratch2, src, MemOperand(r0, index), Condition(2)); \
6113 convert(scratch1, scratch2); \
6114 MovFloatToInt(scratch2, scratch1); \
6115 vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \
6120 Register scratch2) {
6130 Register scratch2) {
6138#undef CONVERT_INT32_TO_FLOAT
6152#define VECTOR_PACK_UNSIGNED(dst, src1, src2, scratch, mode) \
6153 vx(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero, Condition(0), \
6154 Condition(0), Condition(mode)); \
6155 vmx(scratch, src1, kDoubleRegZero, Condition(0), Condition(0), \
6157 vmx(dst, src2, kDoubleRegZero, Condition(0), Condition(0), Condition(mode));
6163 VECTOR_PACK_UNSIGNED(dst, src1, src2, scratch, 2)
6172 VECTOR_PACK_UNSIGNED(dst, src1, src2, scratch, 1)
6175#undef VECTOR_PACK_UNSIGNED
6177#define BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, op, extract_high, \
6178 extract_low, mode) \
6179 DCHECK(dst != scratch1 && dst != scratch2); \
6180 DCHECK(dst != src1 && dst != src2); \
6181 extract_high(scratch1, src1, Condition(0), Condition(0), Condition(mode)); \
6182 extract_high(scratch2, src2, Condition(0), Condition(0), Condition(mode)); \
6183 op(dst, scratch1, scratch2, Condition(0), Condition(0), \
6184 Condition(mode + 1)); \
6185 extract_low(scratch1, src1, Condition(0), Condition(0), Condition(mode)); \
6186 extract_low(scratch2, src2, Condition(0), Condition(0), Condition(mode)); \
6187 op(scratch1, scratch1, scratch2, Condition(0), Condition(0), \
6188 Condition(mode + 1));
6193 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, va, vuph, vupl, 1)
6201 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2,
vs, vuph, vupl, 1)
6209 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, va, vuplh, vupll, 1)
6217 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2,
vs, vuplh, vupll, 1)
6231 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, va, vuph, vupl, 0)
6239 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2,
vs, vuph, vupl, 0)
6247 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2, va, vuplh, vupll, 0)
6255 BINOP_EXTRACT(dst, src1, src2, scratch1, scratch2,
vs, vuplh, vupll, 0)
6269 Register scratch2, Register scratch3,
6270 Register scratch4) {
6272 for (
int index = 0; index < 2; ++
index) {
6275 ldebr(scratch1, scratch1);
6279 vlvgp(dst, scratch3, scratch4);
6285 Register scratch2, Register scratch3,
6286 Register scratch4) {
6288 for (
int index = 0; index < 2; ++
index) {
6291 ledbr(scratch1, scratch1);
6300#define EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, lane_size, mul_even, \
6302 CHECK_NE(src, scratch2); \
6303 vrepi(scratch2, Operand(1), Condition(lane_size)); \
6304 mul_even(scratch1, src, scratch2, Condition(0), Condition(0), \
6305 Condition(lane_size)); \
6306 mul_odd(scratch2, src, scratch2, Condition(0), Condition(0), \
6307 Condition(lane_size)); \
6308 va(dst, scratch1, scratch2, Condition(0), Condition(0), \
6309 Condition(lane_size + 1));
6314 EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 1, vme, vmo)
6335 EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 0, vme, vmo)
6342 EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 0, vmle, vmlo)
6344#undef EXT_ADD_PAIRWISE
6366 Register scratch1, Register scratch2) {
6367 mov(scratch1, Operand(low));
6368 mov(scratch2, Operand(high));
6369 vlvgp(dst, scratch2, scratch1);
6379 vrepi(scratch3, Operand(31),
Condition(0));
6384 lrvgr(scratch1, scratch1);
6385 lrvgr(scratch2, scratch2);
6386 vlvgp(dst, scratch2, scratch1);
6394 uint64_t low, Register scratch1,
6396 mov(scratch1, Operand(low));
6397 mov(scratch2, Operand(high));
6398 vlvgp(scratch3, scratch2, scratch1);
6419 vrepi(scratch2, Operand(1),
Condition(1));
6435#define Q15_MUL_ROAUND(accumulator, src1, src2, const_val, scratch, unpack) \
6436 unpack(scratch, src1, Condition(0), Condition(0), Condition(1)); \
6437 unpack(accumulator, src2, Condition(0), Condition(0), Condition(1)); \
6438 vml(accumulator, scratch, accumulator, Condition(0), Condition(0), \
6440 va(accumulator, accumulator, const_val, Condition(0), Condition(0), \
6442 vrepi(scratch, Operand(15), Condition(2)); \
6443 vesrav(accumulator, accumulator, scratch, Condition(0), Condition(0), \
6452 vrepi(scratch1, Operand(0x4000),
Condition(2));
6453 Q15_MUL_ROAUND(scratch2, src1, src2, scratch1, scratch3, vupl)
6454 Q15_MUL_ROAUND(dst, src1, src2, scratch1, scratch3, vuph)
6457#undef Q15_MUL_ROAUND
6460#ifdef V8_TARGET_BIG_ENDIAN
6461#define IS_BIG_ENDIAN true
6463#define IS_BIG_ENDIAN false
6466#define CAN_LOAD_STORE_REVERSE \
6467 IS_BIG_ENDIAN&& CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)
6469#define LOAD_SPLAT_LIST(V) \
6470 V(64x2, vlbrrep, LoadU64LE, 3) \
6471 V(32x4, vlbrrep, LoadU32LE, 2) \
6472 V(16x8, vlbrrep, LoadU16LE, 1) \
6473 V(8x16, vlrep, LoadU8, 0)
6475#define LOAD_SPLAT(name, vector_instr, scalar_instr, condition) \
6476 void MacroAssembler::LoadAndSplat##name##LE( \
6477 Simd128Register dst, const MemOperand& mem, Register scratch) { \
6478 if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
6479 vector_instr(dst, mem, Condition(condition)); \
6482 scalar_instr(scratch, mem); \
6483 vlvg(dst, scratch, MemOperand(r0, 0), Condition(condition)); \
6484 vrep(dst, dst, Operand(0), Condition(condition)); \
6488#undef LOAD_SPLAT_LIST
6490#define LOAD_EXTEND_LIST(V) \
6491 V(32x2U, vuplh, 2) \
6493 V(16x4U, vuplh, 1) \
6498#define LOAD_EXTEND(name, unpack_instr, condition) \
6499 void MacroAssembler::LoadAndExtend##name##LE( \
6500 Simd128Register dst, const MemOperand& mem, Register scratch) { \
6501 if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
6502 vlebrg(dst, mem, Condition(0)); \
6504 LoadU64LE(scratch, mem); \
6505 vlvg(dst, scratch, MemOperand(r0, 0), Condition(3)); \
6507 unpack_instr(dst, dst, Condition(0), Condition(0), Condition(condition)); \
6516 if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
6527 if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
6535#define LOAD_LANE_LIST(V) \
6536 V(64, vlebrg, LoadU64LE, 3) \
6537 V(32, vlebrf, LoadU32LE, 2) \
6538 V(16, vlebrh, LoadU16LE, 1) \
6539 V(8, vleb, LoadU8, 0)
6541#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
6542 void MacroAssembler::LoadLane##name##LE(Simd128Register dst, \
6543 const MemOperand& mem, int lane, \
6544 Register scratch) { \
6545 if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
6546 vector_instr(dst, mem, Condition(lane)); \
6549 scalar_instr(scratch, mem); \
6550 vlvg(dst, scratch, MemOperand(r0, lane), Condition(condition)); \
6554#undef LOAD_LANE_LIST
6556#define STORE_LANE_LIST(V) \
6557 V(64, vstebrg, StoreU64LE, 3) \
6558 V(32, vstebrf, StoreU32LE, 2) \
6559 V(16, vstebrh, StoreU16LE, 1) \
6560 V(8, vsteb, StoreU8, 0)
6562#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
6563 void MacroAssembler::StoreLane##name##LE(Simd128Register src, \
6564 const MemOperand& mem, int lane, \
6565 Register scratch) { \
6566 if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
6567 vector_instr(src, mem, Condition(lane)); \
6570 vlgv(scratch, src, MemOperand(r0, lane), Condition(condition)); \
6571 scalar_instr(scratch, mem); \
6575#undef STORE_LANE_LIST
6576#undef CAN_LOAD_STORE_REVERSE
6584 : IsolateData::jslimit_offset();
6590 int case_value_base, Label** labels,
6592 Label fallthrough, jump_table;
6593 if (case_value_base != 0) {
6594 SubS64(value, value, Operand(case_value_base));
6596 CmpU64(value, Operand(num_labels));
6599 int entry_size_log2 = 3;
6601 larl(r1, &jump_table);
6606 for (
int i = 0;
i < num_labels; ++
i) {
6614 Register code, Register scratch, Label* if_marked_for_deoptimization) {
6616 bne(if_marked_for_deoptimization);
6620 Label* if_turbofanned) {
6623 bne(if_turbofanned);
6628 Register feedback_vector,
6632 Label fallthrough, clear_slot;
6637 LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
6649 if (min_opt_level == CodeKind::TURBOFAN_JS) {
6675 Register function_address,
6676 ExternalReference thunk_ref, Register thunk_arg,
6677 int slots_to_drop_on_return,
6680 using ER = ExternalReference;
6682 Isolate* isolate = masm->isolate();
6684 ER::handle_scope_next_address(isolate),
no_reg);
6686 ER::handle_scope_limit_address(isolate),
no_reg);
6688 ER::handle_scope_level_address(isolate),
no_reg);
6702 Register prev_next_address_reg = r14;
6704 Register prev_next_address_reg = r6;
6714 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
6719 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
6721 scratch, scratch2, prev_next_address_reg, prev_limit_reg));
6724 "Allocate HandleScope in callee-save registers.");
6725 __ LoadU64(prev_next_address_reg, next_mem_op);
6726 __ LoadU64(prev_limit_reg, limit_mem_op);
6727 __ LoadU32(prev_level_reg, level_mem_op);
6728 __ AddS64(scratch, prev_level_reg, Operand(1));
6732 Label profiler_or_side_effects_check_enabled, done_api_call;
6733 if (with_profiling) {
6734 __ RecordComment(
"Check if profiler or side effects check is enabled");
6739#ifdef V8_RUNTIME_CALL_STATS
6741 __ Move(scratch, ER::address_of_runtime_stats_flag());
6750 __ mov(scratch, function_address);
6751 __ zosStoreReturnAddressAndCall(function_address, scratch);
6757 Label propagate_exception;
6758 Label delete_allocated_handles;
6759 Label leave_exit_frame;
6767 "No more valid handles (the result handle was the last one)."
6768 "Restore previous handle scope.");
6769 __ StoreU64(prev_next_address_reg, next_mem_op);
6773 __ CmpS64(scratch, prev_level_reg);
6774 __ Check(
eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
6777 __ CmpS64(prev_limit_reg, limit_mem_op);
6782 __ bind(&leave_exit_frame);
6783 Register argc_reg = prev_limit_reg;
6784 if (argc_operand !=
nullptr) {
6793 "Check if the function scheduled an exception.");
6795 ER::exception_address(isolate),
no_reg));
6801 AbortReason::kAPICallReturnedInvalidObject);
6803 if (argc_operand ==
nullptr) {
6816 if (with_profiling) {
6818 __ bind(&profiler_or_side_effects_check_enabled);
6820 if (thunk_arg.is_valid()) {
6822 IsolateFieldId::kApiCallbackThunkArgument);
6825 __ Move(scratch, thunk_ref);
6827 __ zosStoreReturnAddressAndCall(function_address, scratch);
6831 __ b(&done_api_call);
6835 __ bind(&propagate_exception);
6841 masm,
"HandleScope limit has changed. Delete allocated extensions.");
6842 __ bind(&delete_allocated_handles);
6845 Register saved_result = prev_limit_reg;
6846 __ mov(saved_result, return_value);
6850 __ mov(return_value, saved_result);
constexpr int kPageSizeBits
#define Assert(condition)
#define BUILTIN_CODE(isolate, name)
static int ActivationFrameAlignment()
void RequestHeapNumber(HeapNumberRequest request)
friend class ConstantPoolUnavailableScope
EmbeddedObjectIndex AddEmbeddedObject(IndirectHandle< HeapObject > object)
friend class FrameAndConstantPoolScope
size_t EmbeddedObjectIndex
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
const AssemblerOptions & options() const
void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask=NoMask)
void cgdbr(Condition m3, R1 r1, R2 r2)
void ld(Register rd, const MemOperand &rs)
void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void seb(Register rd, Register rt)
void popcnt(Register dst, Register src)
void b(int branch_offset, Condition cond=al, RelocInfo::Mode rmode=RelocInfo::NO_INFO)
int branch_offset(Label *L)
void stm(BlockAddrMode am, Register base, RegList src, Condition cond=al)
void bne(Register rj, Register rd, int32_t offset)
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc=0)
void lzdr(DoubleRegister r1)
void jump(Handle< Code > target, RelocInfo::Mode rmode, Condition cond)
void blt(Register rj, Register rd, int32_t offset)
void ble(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
Simd128Register Simd128Register ra
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data=0)
void cfdbr(Condition m3, R1 r1, R2 r2)
friend class UseScratchRegisterScope
void cfebr(Condition m3, R1 r1, R2 r2)
void larl(Register r, Label *l)
void srl(Register rd, Register rt, uint16_t sa)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
void brxhg(Register dst, Register inc, Label *L)
void bge(Register rj, Register rd, int32_t offset)
void vsel(const Condition cond, const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2)
void bunordered(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void cgebr(Condition m3, R1 r1, R2 r2)
void bkpt(uint32_t imm16)
void sth(Register dst, const MemOperand &src)
void bgt(Label *L, CRegister cr=cr0, LKBit lk=LeaveLK)
void stop(Condition cond=al, int32_t code=kDefaultStopCode)
int SizeOfCodeGeneratedSince(Label *label)
void beq(Register rj, Register rd, int32_t offset)
void sra(Register rt, Register rd, uint16_t sa)
static constexpr Builtin RecordWrite(SaveFPRegsMode fp_mode)
static bool IsIsolateIndependentBuiltin(Tagged< Code > code)
V8_EXPORT_PRIVATE Handle< Code > code_handle(Builtin builtin)
static constexpr Builtin RuntimeCEntry(int result_size, bool switch_to_central_stack=false)
static constexpr Builtin EphemeronKeyBarrier(SaveFPRegsMode fp_mode)
static constexpr Builtin CEntry(int result_size, ArgvMode argv_mode, bool builtin_exit_frame=false, bool switch_to_central_stack=false)
static const int kIsTurbofannedBit
static const int kMarkedForDeoptimizationBit
static constexpr int kCallerSPOffset
static constexpr int kCallerFPOffset
static constexpr int kCallerPCOffset
static const int kInvalidContext
static V8_INLINE constexpr int SlotOffset(int index)
static bool IsSupported(CpuFeature f)
static bool SupportsWasmSimd128()
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
static constexpr int kSPOffset
static constexpr int kCallerSPDisplacement
static ExternalReference Create(const SCTableReference &table_ref)
static constexpr uint32_t kFlagsMaybeHasMaglevCode
static constexpr uint32_t kFlagsTieringStateIsAnyRequested
static constexpr uint32_t kFlagsLogNextExecution
static constexpr uint32_t kFlagsMaybeHasTurbofanCode
static constexpr int OffsetOfElementAt(int index)
static constexpr int kHeaderSize
static constexpr int kMapOffset
static constexpr int BuiltinEntrySlotOffset(Builtin id)
static constexpr int real_jslimit_offset()
static IsolateGroup * current()
bool IsGeneratingEmbeddedBuiltins() const
Address BuiltinEntry(Builtin builtin)
bool root_array_available_
static bool IsAddressableThroughRootRegister(Isolate *isolate, const ExternalReference &reference)
static constexpr bool CanBeImmediate(RootIndex index)
V8_INLINE std::string CommentForOffHeapTrampoline(const char *prefix, Builtin builtin)
static int32_t RootRegisterOffsetForExternalReferenceTableEntry(Isolate *isolate, const ExternalReference &reference)
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index)
Isolate * isolate() const
Tagged_t ReadOnlyRootPtr(RootIndex index)
bool root_array_available() const
void IndirectLoadConstant(Register destination, Handle< HeapObject > object)
static intptr_t RootRegisterOffsetForExternalReference(Isolate *isolate, const ExternalReference &reference)
bool should_abort_hard() const
void IndirectLoadExternalReference(Register destination, ExternalReference reference)
void F32x4ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2, Register scratch3)
void DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void I16x8DotI8x16S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void DivFloat32(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void F32x4UConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void PushAll(RegList registers)
void NearestIntF32(DoubleRegister dst, DoubleRegister src)
void Abort(AbortReason msg)
void ConvertIntToFloat(Register src, DoubleRegister dst)
void LoadStackLimit(Register destination, StackLimitKind kind)
void SubU64(Register dst, const MemOperand &opnd)
void LoadReceiver(Register dest)
void StoreV128(Simd128Register src, const MemOperand &mem, Register scratch)
void ConvertIntToDouble(Register src, DoubleRegister dst)
void MulHighU64(Register dst, Register src1, Register src2)
void LoadS16LE(Register dst, const MemOperand &mem, Register scratch)
void Call(Register target, Condition cond=al)
void CallJSFunction(Register function_object, uint16_t argument_count)
void S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask)
void CountTrailingZerosU64(Register dst, Register src, Register scratch1=ip, Register scratch2=r0, RCBit r=LeaveRC)
void LoadF32AsF64(DoubleRegister dst, const MemOperand &opnd)
void TestCodeIsMarkedForDeoptimization(Register code, Register scratch)
void AtomicExchangeU8(Register addr, Register value, Register output, Register scratch)
void LoadU64LE(Register dst, const MemOperand &mem, Register scratch)
void AssertMap(Register object) NOOP_UNLESS_DEBUG_CODE
void JumpIfIsInRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit, Label *on_in_range)
void LoadU8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void DecompressTaggedSigned(const Register &destination, const MemOperand &field_operand)
void I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2, Simd128Register scratch3)
void SubFloat32(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void Drop(int count, Condition cond=al)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void XorP(Register dst, Register src)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void MovFromFloatResult(DwVfpRegister dst)
void PopCommonFrame(Register marker_reg=no_reg)
void RotateInsertSelectBits(Register dst, Register src, const Operand &startBit, const Operand &endBit, const Operand &shiftAmt, bool zeroBits)
void mov(Register rd, Register rj)
void CompareTagged(Register src1, Register src2, CRegister cr=cr0)
void ModS32(Register dst, Register src, Register value)
void IsObjectType(Register heap_object, Register scratch1, Register scratch2, InstanceType type)
MemOperand StackLimitAsMemOperand(StackLimitKind kind)
void ModS64(Register dst, Register src, Register value)
void I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void SmiUntag(Register reg, SBit s=LeaveCC)
void AssertFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void TestIfSmi(Register value, Register scratch)
void I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void MulHighU32(Register dst, Register src1, const MemOperand &src2)
void AssertNotSmi(Register object, AbortReason reason=AbortReason::kOperandIsASmi) NOOP_UNLESS_DEBUG_CODE
void MulS32(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void AssertGeneratorObject(Register object) NOOP_UNLESS_DEBUG_CODE
static int CallSizeNotPredictableCodeSize(Address target, RelocInfo::Mode rmode, Condition cond=al)
void ConvertFloat32ToUnsignedInt32(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void CompareLogicalChar(const MemOperand &opnd1, const MemOperand &opnd2, const Operand &length)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void DivFloat64(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void F32x4SConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void PushStandardFrame(Register function_reg)
void StoreF64(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
void LoadAndSub32(Register dst, Register src, const MemOperand &opnd)
void AtomicCmpExchangeHelper(Register addr, Register output, Register old_value, Register new_value, int start, int end, int shift_amount, int offset, Register temp0, Register temp1)
void I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void I8x16ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void CompareRoot(Register obj, RootIndex index)
void I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void LoadV64ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void MovFromFloatParameter(DwVfpRegister dst)
void StoreU64LE(Register src, const MemOperand &mem, Register scratch)
void LoadF32(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void ModU64(Register dst, Register src, Register value)
void LoadV32ZeroLE(Simd128Register dst, const MemOperand &mem, Register scratch1, Simd128Register scratch2)
void Move(Register dst, Tagged< Smi > smi)
void I32x4BitMask(Register dst, VRegister src)
void CountTrailingZerosU32(Register dst, Register src, Register scratch1=ip, Register scratch2=r0, RCBit r=LeaveRC)
void MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void StoreF32LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
void I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void StoreReturnAddressAndCall(Register target)
void I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void StoreU16LE(Register src, const MemOperand &mem, Register scratch)
void StackOverflowCheck(Register num_args, Register scratch, Label *stack_overflow)
void AssertFeedbackVector(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void CallBuiltinByIndex(Register builtin_index, Register target)
void I16x8Splat(Simd128Register dst, Register src)
void LoadRootRelative(Register destination, int32_t offset) final
void AtomicCmpExchangeU16(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1)
void JumpIfSmi(Register value, Label *smi_label)
void TruncF64(DoubleRegister dst, DoubleRegister src)
void LoadPC(Register dst)
void BranchRelativeOnIdxHighP(Register dst, Register inc, Label *L)
void ShiftRightU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void LoadU16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void MultiPopV128(Simd128RegList dregs, Register scratch, Register location=sp)
void MultiPush(RegList regs)
void CallCodeObject(Register code_object)
void AssertUnreachable(AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void I32x4UConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void ConvertFloat32ToUnsignedInt64(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void DivS32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void SwapDouble(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch)
void LoadRootRegisterOffset(Register destination, intptr_t offset) final
void LoadS32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void SubU32(Register dst, const MemOperand &opnd)
void LoadCodeInstructionStart(Register destination, Register code_object, CodeEntrypointTag tag=kDefaultCodeEntrypointTag)
void CheckDebugHook(Register fun, Register new_target, Register expected_parameter_count_or_dispatch_handle, Register actual_parameter_count)
void CountLeadingZerosU32(Register dst, Register src, RCBit r=LeaveRC)
void I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scrahc2, Register scratch3, Simd128Register scratch4)
void I32x4SConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void LoadFeedbackVector(Register dst, Register closure, Register scratch, Label *fbv_undef)
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src)
void EmitIncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void StoreV128LE(Simd128Register src, const MemOperand &mem, Register scratch1, Register scratch2)
void SwapFloat32(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch)
void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void CmpAndSwap64(Register old_val, Register new_val, const MemOperand &opnd)
void BailoutIfDeoptimized()
void LoadS32LE(Register dst, const MemOperand &mem, Register scratch)
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg) const
void CmpU32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void LoadU16LE(Register dst, const MemOperand &mem, Register scratch)
void DoubleMax(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg)
void MultiPopDoubles(DoubleRegList dregs, Register location=sp)
void CompareTaggedRoot(Register with, RootIndex index)
void ModU32(Register dst, Register src, Register value)
void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void EnforceStackAlignment()
void SmiTag(Register reg, SBit s=LeaveCC)
void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label *out_of_line)
void LoadOnConditionP(Condition cond, Register dst, Register src)
void I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void MovDoubleToInt64(Register dst, DoubleRegister src)
void ConvertFloat32ToInt32(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode)
void F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, DoubleRegister scratch2, Simd128Register scratch3)
void PushArray(Register array, Register size, Register scratch, PushArrayOrder order=PushArrayOrder::kNormal)
void I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3)
void Sqrt(DoubleRegister result, DoubleRegister input)
void EnterExitFrame(Register scratch, int stack_space, StackFrame::Type frame_type)
void TestIfInt32(Register value, Register scratch, CRegister cr=cr0)
void ConvertDoubleToUnsignedInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void MovToFloatResult(DwVfpRegister src)
void AddS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst)
void RecordWriteField(Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void I16x8ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void CallJSEntry(Register target)
void SubS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void ShiftRightU32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void StoreF64LE(DoubleRegister src, const MemOperand &mem, Register scratch, Register scratch2)
MemOperand ExternalReferenceAsOperand(ExternalReference reference, Register scratch)
void Zero(const MemOperand &dest)
void ShiftRightS32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void MovToFloatParameter(DwVfpRegister src)
void MovFloatToInt(Register dst, DoubleRegister src, DoubleRegister scratch)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void I16x8ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void PushCommonFrame(Register marker_reg=no_reg)
void LoadAndSub64(Register dst, Register src, const MemOperand &opnd)
void JumpIfEqual(Register x, int32_t y, Label *dest)
void Not32(Register dst, Register src=no_reg)
int LeaveFrame(StackFrame::Type type)
void GetLabelAddress(Register dst, Label *target)
void StoreF32(DoubleRegister src, const MemOperand &mem, Register scratch=no_reg)
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void MulHighS64(Register dst, Register src1, Register src2)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void V128AnyTrue(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3)
void JumpToExternalReference(const ExternalReference &builtin, bool builtin_exit_frame=false)
void ShiftLeftU32(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
Operand ClearedValue() const
void SwapSimd128(Simd128Register src, Simd128Register dst, Simd128Register scratch)
void CmpU64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void MultiPop(RegList regs)
void LoadF64(DoubleRegister dst, const MemOperand &mem, Register scratch=no_reg)
void I32x4Splat(Simd128Register dst, Register src)
void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, InvokeType type)
void AddS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
void I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2)
void Jump(Register target, Condition cond=al)
void StoreU32(Register src, const MemOperand &mem, Register scratch)
void LoadRoot(Register destination, RootIndex index) final
void MultiPushF64OrV128(DoubleRegList dregs, Register scratch, Register location=sp)
void RecordWrite(Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, SmiCheck smi_check=SmiCheck::kInline)
void ClearRightImm(Register dst, Register src, const Operand &val)
void LoadAndTest32(Register dst, Register src)
void I8x16ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void StoreTaggedField(const Register &value, const MemOperand &dst_field_operand)
void MovInt64ToDouble(DoubleRegister dst, Register src)
void LoadF32LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, InvokeType type)
void LoadU32(Register dst, const MemOperand &mem, Register scratch=no_reg)
void Popcnt32(Register dst, Register src)
void CanonicalizeNaN(const VRegister &dst, const VRegister &src)
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src)
void DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void SwapP(Register src, Register dst, Register scratch)
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void CompareRange(Register value, Register scratch, unsigned lower_limit, unsigned higher_limit)
void S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2)
void JumpCodeObject(Register code_object, JumpMode jump_mode=JumpMode::kJump)
void I64x2Splat(Simd128Register dst, Register src)
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst)
void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst)
void MulHighS32(Register dst, Register src1, const MemOperand &src2)
void BranchOnCount(Register r1, Label *l)
void JumpIfCodeIsTurbofanned(Register code, Register scratch, Label *if_turbofanned)
void EmitDecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
static int ActivationFrameAlignment()
void LoadFromConstantsTable(Register destination, int constant_index) final
void AddU32(Register dst, Register src1, Register src2)
void ShiftRightS64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
MemOperand EntryFromBuiltinAsOperand(Builtin builtin)
void LoadS8(Register dst, const MemOperand &mem, Register scratch=no_reg)
void CompareObjectTypeRange(Register heap_object, Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void ConvertDoubleToInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void CeilF32(DoubleRegister dst, DoubleRegister src)
void ComputeCodeStartAddress(Register dst)
void MaybeSaveRegisters(RegList registers)
void LoadV128LE(DoubleRegister dst, const MemOperand &mem, Register scratch0, Register scratch1)
void CheckPageFlag(Register object, int mask, Condition cc, Label *condition_met)
void LoadTaggedRoot(Register destination, RootIndex index)
void FloorF64(DoubleRegister dst, DoubleRegister src)
void LoadPositive32(Register result, Register input)
void LoadPositiveP(Register result, Register input)
void I16x8BitMask(Register dst, VRegister src)
void I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch)
void SubS32(Register dst, Register src, const Operand &value, Register scratch=r0, RCBit r=LeaveRC)
int CallCFunction(ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots=SetIsolateDataSlots::kYes, Label *return_label=nullptr)
void JumpJSFunction(Register function_object, JumpMode jump_mode=JumpMode::kJump)
void I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void I64x2BitMask(Register dst, QwNeonRegister src)
void AtomicExchangeHelper(Register addr, Register value, Register output, int start, int end, int shift_amount, int offset, Register scratch)
void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE
void ConvertDoubleToUnsignedInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void LoadCompressedMap(Register dst, Register object)
Condition LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void LoadWeakValue(Register out, Register in, Label *target_if_cleared)
void CallBuiltin(Builtin builtin, Condition cond=al)
void DivU32(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void Popcnt64(Register dst, Register src)
void SubFloat64(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void F64x2ConvertLowI32x4S(QwNeonRegister dst, QwNeonRegister src)
void MulS64(Register dst, Register src, const Operand &value, Register scratch=r0, OEBit s=LeaveOE, RCBit r=LeaveRC)
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id)
void TruncateDoubleToI(Isolate *isolate, Zone *zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode)
void ShiftLeftU64(Register dst, Register src, const Operand &value, RCBit r=LeaveRC)
void CmpS64(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void StoreMultipleP(Register dst1, Register dst2, const MemOperand &mem)
void AssertJSAny(Register object, Register map_tmp, Register tmp, AbortReason abort_reason) NOOP_UNLESS_DEBUG_CODE
void FloorF32(DoubleRegister dst, DoubleRegister src)
void LoadSmiLiteral(Register dst, Tagged< Smi > smi)
void Xor(Register dst, Register src)
void CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode)
void Check(Condition cond, AbortReason reason)
void DivU64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2, Register scratch3, Register scratch4)
void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void LoadU32LE(Register dst, const MemOperand &mem, Register scratch)
void OrP(Register dst, Register src)
void MovIntToFloat(DoubleRegister dst, Register src, Register scratch)
void Or(Register dst, Register src)
void CallForDeoptimization(Builtin target, int deopt_id, Label *exit, DeoptimizeKind kind, Label *ret, Label *jump_deoptimization_entry_label)
void I32x4DotI8x16AddS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register src3)
void DropArgumentsAndPushNewReceiver(Register argc, Register receiver)
void F64x2ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2)
void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch, Label *if_marked_for_deoptimization)
void LoadMultipleW(Register dst1, Register dst2, const MemOperand &mem)
void NotP(Register dst, Register src=no_reg)
void LoadEntryFromBuiltinIndex(Register builtin_index, Register target)
void MoveChar(const MemOperand &opnd1, const MemOperand &opnd2, const Operand &length)
void AssertZeroExtended(Register int32_register)
void AddFloat32(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void CmpS32(Register src1, const Operand &src2, Register scratch, CRegister cr=cr0)
void RestoreFrameStateForTailCall()
void StoreU64(Register src, const MemOperand &mem, Register scratch=no_reg)
void MulFloat64(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void StoreU16(Register src, const MemOperand &mem, Register scratch)
void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure)
void AddFloat64(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void StoreU8(Register src, const MemOperand &mem, Register scratch)
void LoadMultipleP(Register dst1, Register dst2, const MemOperand &mem)
void MultiPushV128(Simd128RegList dregs, Register scratch, Register location=sp)
void ExclusiveOrChar(const MemOperand &opnd1, const MemOperand &opnd2, const Operand &length)
void JumpIfLessThan(Register x, int32_t y, Label *dest)
void AssertBoundFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void CeilF64(DoubleRegister dst, DoubleRegister src)
void TestBitMask(Register value, uintptr_t mask, Register scratch=r0)
void CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void Branch(Label *label, bool need_link=false)
void Not64(Register dst, Register src=no_reg)
void AddU64(Register dst, const Operand &imm)
void I8x16BitMask(Register dst, VRegister src, VRegister temp=NoVReg)
void DivS64(Register dst, Register src, Register value, OEBit s=LeaveOE, RCBit r=LeaveRC)
void I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch)
void CompareInstanceTypeRange(Register map, Register type_reg, Register scratch, InstanceType lower_limit, InstanceType higher_limit)
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, Register feedback_vector)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers=0, Register scratch=no_reg)
void LoadIsolateField(Register dst, IsolateFieldId id)
void LoadU64(Register dst, const MemOperand &mem, Register scratch=no_reg)
void Mul32WithOverflowIfCCUnequal(Register dst, Register src1, const MemOperand &src2)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void MaybeRestoreRegisters(RegList registers)
void CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode=StubCallMode::kCallBuiltinPointer)
void ConvertFloat32ToInt64(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode=kRoundToZero)
void CmpF64(DoubleRegister src1, DoubleRegister src2)
void TryLoadOptimizedOsrCode(Register scratch_and_result, CodeKind min_opt_level, Register feedback_vector, FeedbackSlot slot, Label *on_result, Label::Distance distance)
void I8x16Splat(Simd128Register dst, Register src)
void CmpSmiLiteral(Register src1, Tagged< Smi > smi, Register scratch, CRegister cr=cr0)
void LoadF64LE(DoubleRegister dst, const MemOperand &mem, Register scratch, Register scratch2)
void F32x4Splat(Simd128Register dst, DoubleRegister src, DoubleRegister scratch1, Register scratch2)
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void AssertUndefinedOrAllocationSite(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE
void MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r=LeaveRC)
void LoadAndTestP(Register dst, Register src)
void StoreMultipleW(Register dst1, Register dst2, const MemOperand &mem)
void NearestIntF64(DoubleRegister dst, DoubleRegister src)
void SmiUntagField(Register dst, const MemOperand &src)
void PopAll(RegList registers)
void StubPrologue(StackFrame::Type type)
void TruncF32(DoubleRegister dst, DoubleRegister src)
void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2)
void StoreU32LE(Register src, const MemOperand &mem, Register scratch)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch)
void StoreRootRelative(int32_t offset, Register value) final
void DoubleMin(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg)
void F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, Simd128Register scratch2)
void CmpF32(DoubleRegister src1, DoubleRegister src2)
void CountLeadingZerosU64(Register dst, Register src, RCBit r=LeaveRC)
void LoadTaggedSignedField(const Register &destination, const MemOperand &field_operand)
void LoadMap(Register destination, Register object)
void CmpAndSwap(Register old_val, Register new_val, const MemOperand &opnd)
void TailCallRuntime(Runtime::FunctionId fid)
void MultiPopF64OrV128(DoubleRegList dregs, Register scratch, Register location=sp)
void TestBit(Register value, int bitNumber, Register scratch=r0)
void LoadNativeContextSlot(Register dst, int index)
void I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2)
void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label *out_of_line)
void AtomicCmpExchangeU8(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1)
void ConvertInt64ToDouble(Register src, DoubleRegister double_dst)
static const int kSmiShift
void LoadTaggedFieldWithoutDecompressing(const Register &destination, const MemOperand &field_operand)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst)
void MulFloat32(DoubleRegister dst, const MemOperand &opnd, DoubleRegister scratch)
void F64x2Splat(Simd128Register dst, DoubleRegister src, Register scratch)
void AtomicExchangeU16(Register addr, Register value, Register output, Register scratch)
void I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src, Simd128Register scratch)
void LoadS16(Register dst, const MemOperand &mem, Register scratch=no_reg)
void ConvertDoubleToInt64(const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode=kRoundToZero)
void LoadV128(Simd128Register dst, const MemOperand &mem, Register scratch)
void Switch(Register scratch, Register value, int case_value_base, Label **labels, int num_labels)
void I8x16Swizzle(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Simd128Register scratch3)
void AndP(Register dst, Register src)
void DropArguments(Register count)
void AssertCallableFunction(Register object) NOOP_UNLESS_DEBUG_CODE
void MultiPushDoubles(DoubleRegList dregs, Register location=sp)
static constexpr MainThreadFlags kPointersToHereAreInterestingMask
static constexpr intptr_t FlagsOffset()
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask
static V8_INLINE Operand Zero()
static constexpr int8_t kNumRegisters
static constexpr DwVfpRegister from_code(int8_t code)
static const RegisterConfiguration * Default()
static constexpr Register from_code(int code)
static constexpr bool IsCompressedEmbeddedObject(Mode mode)
static constexpr bool IsCodeTarget(Mode mode)
static constexpr bool IsFullEmbeddedObject(Mode mode)
static constexpr bool IsImmortalImmovable(RootIndex root_index)
static V8_EXPORT_PRIVATE const Function * FunctionForId(FunctionId id)
void S128Not(XMMRegister dst, XMMRegister src, XMMRegister scratch)
void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src, XMMRegister tmp)
void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src, XMMRegister scratch, Register tmp)
void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src, Register scratch)
void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src, Register scratch)
static constexpr Tagged< Smi > FromInt(int value)
static constexpr int32_t TypeToMarker(Type type)
static bool IsJavaScript(Type t)
static const int kNextOffset
static constexpr int OffsetOfElementAt(int index)
static constexpr int kFixedFrameSizeFromFp
static constexpr Register ObjectRegister()
static constexpr RegList ComputeSavedRegisters(Register object, Register slot_address=no_reg)
static constexpr Register SlotAddressRegister()
static constexpr Builtin GetRecordWriteBuiltin(SaveFPRegsMode fp_mode)
#define EMIT_SIMD_QFM(name)
#define EMIT_SIMD_ALL_TRUE(name)
#define EMIT_SIMD_SHIFT(name)
#define LOAD_EXTEND(type)
#define EMIT_SIMD_EXT_MUL(name)
#define STORE_LANE(type, lane)
#define LOAD_LANE(type, lane)
#define ASM_CODE_COMMENT_STRING(asm,...)
#define ASM_CODE_COMMENT(asm)
#define V8_ENABLE_LEAPTIERING_BOOL
#define COMPRESS_POINTERS_BOOL
#define V8_EMBEDDED_CONSTANT_POOL_BOOL
#define V8_ENABLE_SANDBOX_BOOL
DirectHandle< Object > new_target
std::optional< TNode< JSArray > > a
RoundingMode rounding_mode
ZoneVector< RpoNumber > & result
#define SIMD_ALL_TRUE_LIST(V)
#define SIMD_SHIFT_LIST(V)
#define SIMD_EXT_MUL_LIST(V)
RegListBase< RegisterT > registers
InstructionOperand destination
constexpr unsigned CountLeadingZeros64(uint64_t value)
constexpr bool IsPowerOfTwo(T value)
constexpr int WhichPowerOfTwo(T value)
V8_INLINE Dest bit_cast(Source const &source)
constexpr bool IsInRange(T value, U lower_limit, U higher_limit)
uint32_t WasmInterpreterRuntime int64_t r0
constexpr Register no_reg
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
constexpr Register kRootRegister
constexpr VFPRoundingMode kRoundToNearest
RegListBase< DoubleRegister > DoubleRegList
constexpr VFPRoundingMode kRoundToMinusInf
constexpr int kTaggedSize
constexpr int kSimd128Size
const int kStackFrameRASlot
const int kNumRequiredStackFrameSlots
DwVfpRegister DoubleRegister
RegListBase< Register > RegList
constexpr bool CodeKindCanTierUp(CodeKind kind)
constexpr Register kJavaScriptCallTargetRegister
const Address kWeakHeapObjectMask
constexpr Register kJavaScriptCallArgCountRegister
constexpr int kSystemPointerSizeLog2
base::StrongAlias< JSDispatchHandleAliasTag, uint32_t > JSDispatchHandle
const int kNumCallerSavedDoubles
static const int kRegisterPassedArguments
@ ROUND_TO_NEAREST_TO_EVEN
QwNeonRegister Simd128Register
MemOperand FieldMemOperand(Register object, int offset)
constexpr VFPRoundingMode kRoundToPlusInf
constexpr int kSystemPointerSize
const char * GetAbortReason(AbortReason reason)
static constexpr int kMaxCParameters
constexpr uint32_t kZapValue
const int kStackFrameSPSlot
constexpr bool SmiValuesAre31Bits()
Condition NegateCondition(Condition cond)
@ LAST_CALLABLE_JS_FUNCTION_TYPE
@ FIRST_CALLABLE_JS_FUNCTION_TYPE
constexpr Register kWasmImplicitArgRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
constexpr LowDwVfpRegister kDoubleRegZero
V8_EXPORT_PRIVATE FlagValues v8_flags
const DoubleRegList kCallerSavedDoubles
const RegList kJSCallerSaved
Register ToRegister(int num)
constexpr bool SmiValuesAre32Bits()
constexpr Register kJavaScriptCallCodeStartRegister
constexpr int kJSDispatchTableEntrySizeLog2
constexpr Register kPtrComprCageBaseRegister
constexpr VFPRoundingMode kRoundToZero
void CallApiFunctionAndReturn(MacroAssembler *masm, bool with_profiling, Register function_address, ExternalReference thunk_ref, Register thunk_arg, int slots_to_drop_on_return, MemOperand *argc_operand, MemOperand return_value_operand)
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
constexpr Register kCArgRegs[]
constexpr int kDoubleSize
const uint32_t kClearedWeakHeapObjectLower32
@ kFirstStrongOrReadOnlyRoot
@ kLastStrongOrReadOnlyRoot
constexpr uint32_t kMaxUInt32
Condition to_condition(Condition cond)
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
#define DCHECK_LE(v1, v2)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_LT(v1, v2)
#define DCHECK_EQ(v1, v2)
constexpr bool IsAligned(T value, U alignment)
#define OFFSET_OF_DATA_START(Type)