5#ifndef V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
6#define V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
18#ifdef V8_TARGET_ARCH_ARM
20#elif V8_TARGET_ARCH_ARM64
22#elif V8_TARGET_ARCH_RISCV64
24#elif V8_TARGET_ARCH_X64
26#elif V8_TARGET_ARCH_S390X
29#error "Maglev does not supported this architecture."
39template <
typename T,
typename Enable =
void>
48template <
typename T,
typename Enable =
void>
58 T*,
typename std::enable_if<std::is_base_of<NodeBase, T>::value>::type>
63 T, typename
std::enable_if<std::is_arithmetic<T>::value>::type>
67 T, typename
std::enable_if<std::is_enum<T>::value>::type>
118 compiler::ObjectRef, T>::value>::type>
124 std::forward<T>(value));
137template <
typename Function>
141template <
typename C,
typename R,
typename...
A>
145 static constexpr size_t kSize =
sizeof...(A);
148template <
typename R,
typename...
A>
152 static constexpr size_t kSize =
sizeof...(A);
158template <
typename T1,
typename... T>
163template <
typename Function>
171 template <
typename... InArgs>
176 : function(function),
187 masm->set_allow_call(allow_call_);
188 masm->set_allow_deferred_call(allow_call_);
189 masm->set_allow_allocate(allow_allocate_);
192 std::tuple_cat(std::make_tuple(masm), std::move(
args)));
194 masm->set_allow_call(
false);
195 masm->set_allow_deferred_call(
false);
196 masm->set_allow_allocate(
false);
201 void set_allow_call(
bool value) { allow_call_ =
value; }
202 void set_allow_allocate(
bool value) { allow_allocate_ =
value; }
211 bool allow_call_ =
false;
212 bool allow_allocate_ =
false;
218template <
typename Function,
typename... Args>
221 using FunctionPointer =
226 std::declval<MaglevCompilationInfo*>(),
227 std::declval<Args>()))...>,
228 "Parameters of deferred_code_gen function should match arguments into "
233 DeferredCodeInfoT* deferred_code =
236 std::forward<Args>(
args)...);
239 deferred_code->set_allow_call(allow_deferred_call_);
240 deferred_code->set_allow_allocate(allow_allocate_);
244 return &deferred_code->deferred_code_label;
250template <
typename Function,
typename... Args>
258 std::forward<Function>(deferred_code_gen),
259 std::forward<Args>(
args)...));
273 masm->
Move(D::GetRegisterParameter(D::kRequestedSize), size_in_bytes);
287#if !defined(V8_TARGET_ARCH_RISCV64)
293 JumpIf(cond, target, distance);
296template <
typename NodeT>
305template <
typename NodeT>
313template <
typename NodeT>
321template <
typename NodeT>
329template <
typename NodeT>
345 if_false == next_block);
355 if_false == next_block);
365 if_false == next_block);
377 bool fallthrough_when_true,
Label* if_false,
379 bool fallthrough_when_false) {
380 if (fallthrough_when_false) {
381 if (fallthrough_when_true) {
392 if (!fallthrough_when_true) {
393 Jump(if_true, true_distance);
428 offsetof(
Oddball, to_number_raw_));
442 return written_registers.
has(
reg);
447inline bool ClobberedBy(
RegList written_registers,
448 DirectHandle<Object>
handle) {
454inline bool ClobberedBy(
RegList written_registers, Tagged<TaggedIndex> index) {
457inline bool ClobberedBy(
RegList written_registers, int32_t imm) {
463inline bool ClobberedBy(
RegList written_registers,
const Input& input) {
464 if (!input.IsGeneralRegister())
return false;
465 return ClobberedBy(written_registers, input.AssignedGeneralRegister());
472 return written_registers.has(
reg);
475 DirectHandle<Object>
handle) {
482 Tagged<TaggedIndex> index) {
485inline bool ClobberedBy(
DoubleRegList written_registers, int32_t imm) {
491inline bool ClobberedBy(
DoubleRegList written_registers,
const Input& input) {
492 if (!input.IsDoubleRegister())
return false;
493 return ClobberedBy(written_registers, input.AssignedDoubleRegister());
498inline bool MachineTypeMatches(MachineType type, Register
reg) {
504inline bool MachineTypeMatches(MachineType type,
MemOperand reg) {
507inline bool MachineTypeMatches(MachineType type,
508 DirectHandle<HeapObject>
handle) {
509 return type.IsTagged() && !type.IsTaggedSigned();
511inline bool MachineTypeMatches(MachineType type,
Tagged<Smi> smi) {
512 return type.IsTagged() && !type.IsTaggedPointer();
514inline bool MachineTypeMatches(MachineType type, Tagged<TaggedIndex> index) {
517 return type.IsTagged() && !type.IsTaggedPointer();
519inline bool MachineTypeMatches(MachineType type, int32_t imm) {
525inline bool MachineTypeMatches(MachineType type,
RootIndex index) {
526 return type.IsTagged() && !type.IsTaggedSigned();
528inline bool MachineTypeMatches(MachineType type,
const Input& input) {
529 if (type.representation() == input.node()->GetMachineRepresentation()) {
532 if (type.IsTagged()) {
533 return input.node()->is_tagged();
538template <
typename Descriptor,
typename Arg>
539void CheckArg(MaglevAssembler* masm, Arg& arg,
int&
i) {
540 if (
i >= Descriptor::GetParameterCount()) {
541 CHECK(Descriptor::AllowVarArgs());
543 CHECK(MachineTypeMatches(Descriptor::GetParameterType(
i), arg));
547template <
typename Descriptor,
typename Iterator>
548void CheckArg(MaglevAssembler* masm,
549 const base::iterator_range<Iterator>& range,
int&
i) {
550 for (
auto it = range.begin(),
end = range.end(); it !=
end; ++it, ++
i) {
551 if (
i >= Descriptor::GetParameterCount()) {
552 CHECK(Descriptor::AllowVarArgs());
554 CHECK(MachineTypeMatches(Descriptor::GetParameterType(
i), *it));
558template <
typename Descriptor,
typename... Args>
559void CheckArgs(MaglevAssembler* masm,
const std::tuple<Args...>&
args) {
562 [&](
auto&& arg) { CheckArg<Descriptor>(masm, arg,
i); });
563 if (Descriptor::AllowVarArgs()) {
564 CHECK_GE(
i, Descriptor::GetParameterCount());
566 CHECK_EQ(
i, Descriptor::GetParameterCount());
572template <
typename Descriptor,
typename... Args>
577template <
typename Descriptor,
typename... Args>
580 [&](
auto&&... stack_args) {
582 masm->
Push(std::forward<
decltype(stack_args)>(stack_args)...);
584 masm->
PushReverse(std::forward<
decltype(stack_args)>(stack_args)...);
590template <
typename Descriptor>
593template <
Builtin kBuiltin,
typename... Args>
598 std::tuple<Args&&...> args_tuple{std::forward<Args>(
args)...};
604 constexpr size_t context_args = Descriptor::HasContextParameter() ? 1 : 0;
605 static_assert(context_args <= std::tuple_size_v<
decltype(args_tuple)>,
606 "Not enough arguments passed in to builtin (are you missing a "
607 "context argument?)");
612 static_assert(Descriptor::GetRegisterParameterCount() <=
613 std::tuple_size_v<
decltype(args_tuple_without_context)>,
614 "Not enough arguments passed in to builtin (are you missing a "
615 "context argument?)");
618 args_tuple_without_context);
620 args_tuple_without_context);
624 Descriptor::GetStackParameterCount() <=
625 std::tuple_size_v<
decltype(stack_args)>,
626 "Not enough stack arguments passed in to builtin (are you missing a "
627 "context argument?)");
628 auto fixed_stack_args =
630 auto vararg_stack_args =
633 if constexpr (!Descriptor::AllowVarArgs()) {
634 static_assert(std::tuple_size_v<
decltype(vararg_stack_args)> == 0,
635 "Too many arguments passed in to builtin that expects no "
636 "vararg stack arguments");
647 masm, std::forward<
decltype(fixed_stack_args)>(fixed_stack_args));
649 masm, std::forward<
decltype(vararg_stack_args)>(vararg_stack_args));
652 masm, std::forward<
decltype(vararg_stack_args)>(vararg_stack_args));
654 masm, std::forward<
decltype(fixed_stack_args)>(fixed_stack_args));
661 RegList written_registers = {};
666 using Arg =
decltype(arg);
667 static_assert(index < Descriptor::GetRegisterParameterCount());
670 DCHECK(!ClobberedBy(written_registers, arg));
671 DCHECK(!ClobberedBy(written_double_registers, arg));
673 static constexpr bool use_double_register =
675 if constexpr (use_double_register) {
676 DoubleRegister target = Descriptor::GetDoubleRegisterParameter(index);
677 if constexpr (std::is_same_v<Input, std::decay_t<Arg>>) {
678 DCHECK_EQ(target, arg.AssignedDoubleRegister());
681 masm->
Move(target, std::forward<Arg>(arg));
684 written_double_registers.
set(target);
687 Register target = Descriptor::GetRegisterParameter(index);
688 if constexpr (std::is_same_v<Input, std::decay_t<Arg>>) {
689 DCHECK_EQ(target, arg.AssignedGeneralRegister());
692 masm->
Move(target, std::forward<Arg>(arg));
695 written_registers.
set(target);
703 if constexpr (Descriptor::HasContextParameter()) {
704 auto&& context = std::get<0>(args_tuple);
705 DCHECK(!ClobberedBy(written_registers, context));
706 DCHECK(!ClobberedBy(written_double_registers, context));
709 if constexpr (std::is_same_v<
Input, std::decay_t<
decltype(
context)>>) {
711 context.AssignedGeneralRegister());
717 static_assert(!std::is_same_v<
Register, std::decay_t<
decltype(
context)>>);
718 masm->
Move(Descriptor::ContextRegister(), context);
728 DCHECK(allow_call() || builtin == Builtin::kDoubleToI);
748template <
Builtin kBuiltin,
typename... Args>
850 Jump(success, distance);
858 Jump(success, distance);
871 AbortReason::kInputDoesNotFitSmi);
907#if V8_STATIC_ROOTS_BOOL
911 map, InstanceTypeChecker::kStringMapUpperBound,
915#ifdef V8_COMPRESS_POINTERS
918 static_assert(FIRST_STRING_TYPE ==
FIRST_TYPE);
930#ifdef V8_COMPRESS_POINTERS
943#ifdef V8_COMPRESS_POINTERS
954 bool fallthrough_when_false) {
956 if_true, true_distance, fallthrough_when_true,
957 if_false, false_distance, fallthrough_when_false);
964 AbortReason::kUnexpectedValue);
979 Abort(AbortReason::kUnexpectedValue);
986#ifdef V8_COMPRESS_POINTERS
1002 if (!info->HasExceptionHandler())
return;
1023#if defined(V8_ENABLE_DEBUG_CODE) && !V8_DISABLE_WRITE_BARRIERS_BOOL
1024 if (!
v8_flags.slow_debug_code)
return;
1030 masm->set_allow_call(
true);
1033#ifdef V8_COMPRESS_POINTERS
1037 masm->
Push(
object, value);
1039 masm->
CallRuntime(Runtime::kCheckNoWriteBarrierNeeded, 2);
1041 masm->set_allow_call(
false);
1044 ok, object,
value, snapshot);
#define Assert(condition)
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
int pc_offset_for_safepoint()
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Register GetRegisterParameter(int index) const
int GetRegisterParameterCount() const
static constexpr int kMapOffset
static constexpr MachineType AnyTagged()
void Abort(AbortReason msg)
void Cmp(const Register &rn, int imm)
void SmiUntag(Register reg, SBit s=LeaveCC)
void CompareRoot(Register obj, RootIndex index)
void CompareTaggedRoot(Register with, RootIndex index)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void LoadTaggedRoot(Register destination, RootIndex index)
void LoadCompressedMap(Register dst, Register object)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void CallBuiltin(Builtin builtin, Condition cond=al)
void SmiUntagField(Register dst, const MemOperand &src)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
void LoadMap(Register destination, Register object)
Safepoint DefineSafepoint(Assembler *assembler)
constexpr void set(RegisterT reg)
constexpr bool has(RegisterT reg) const
constexpr void clear(RegisterT reg)
static constexpr int kMinValue
static constexpr int kMaxValue
IndirectHandle< NativeContext > object() const
void set_pc_offset(int offset)
void set_deopting_call_return_pc(int pc)
Register AcquireScratch()
void CompareMapWithRoot(Register object, RootIndex index, Register scratch)
void StoreHeapNumberValue(DoubleRegister value, Register heap_number)
void CompareInstanceType(Register map, InstanceType instance_type)
void SmiAddConstant(Register dst, Register src, int value, Label *fail, Label::Distance distance=Label::kFar)
compiler::NativeContextRef native_context() const
void Branch(Condition condition, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void LoadAndUntagTaggedSignedField(Register result, Register object, int offset)
void SmiTagUint32AndJumpIfFail(Register dst, Register src, Label *fail, Label::Distance distance=Label::kFar)
void LoadThinStringValue(Register result, Register string)
void LoadSignedField(Register result, MemOperand operand, int element_size)
void AssertObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, AbortReason reason)
void CompareInt32AndAssert(Register r1, Register r2, Condition cond, AbortReason reason)
void LoadHeapNumberOrOddballValue(DoubleRegister result, Register object)
void StringLength(Register result, Register string)
void PushReverse(T... vals)
void LoadHeapNumberValue(DoubleRegister result, Register heap_number)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void JumpIfStringMap(Register map, Label *target, Label::Distance distance=Label::kFar, bool jump_if_true=true)
void LoadTaggedSignedField(Register result, MemOperand operand)
void CallRuntime(Runtime::FunctionId fid)
void CompareRootAndEmitEagerDeoptIf(Register reg, RootIndex index, Condition cond, DeoptimizeReason reason, NodeT *node)
void SmiTagInt32AndSetFlags(Register dst, Register src)
void LoadInstanceType(Register instance_type, Register heap_object)
void SmiTagInt32AndJumpIfFail(Register dst, Register src, Label *fail, Label::Distance distance=Label::kFar)
void CompareTaggedRootAndEmitEagerDeoptIf(Register reg, RootIndex index, Condition cond, DeoptimizeReason reason, NodeT *node)
MaglevSafepointTableBuilder * safepoint_table_builder() const
void JumpIfString(Register heap_object, Label *target, Label::Distance distance=Label::kFar)
void CompareUInt32AndEmitEagerDeoptIf(Register reg, int imm, Condition cond, DeoptimizeReason reason, NodeT *node)
void BranchOnObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *if_true, Label::Distance true_distance, bool fallthrough_when_true, Label *if_false, Label::Distance false_distance, bool fallthrough_when_false)
void LoadMapForCompare(Register dst, Register obj)
void DefineExceptionHandlerPoint(NodeBase *node)
void CompareInstanceTypeRange(Register map, InstanceType lower_limit, InstanceType higher_limit)
void Move(StackSlot dst, Register src)
void SetMapAsRoot(Register object, RootIndex map)
void CallBuiltin(Builtin builtin)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void JumpToDeferredIf(Condition cond, Function &&deferred_code_gen, Args &&... args)
void JumpIfNotSmi(Register src, Label *on_not_smi, Label::Distance near_jump=Label::kFar)
MaglevCompilationInfo * compilation_info() const
void CompareMapWithRootAndEmitEagerDeoptIf(Register reg, RootIndex index, Register scratch, Condition cond, DeoptimizeReason reason, NodeT *node)
void SmiTagIntPtrAndJumpIfFail(Register dst, Register src, Label *fail, Label::Distance distance=Label::kFar)
void DefineLazyDeoptPoint(LazyDeoptInfo *info)
void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
MaglevCodeGenState * code_gen_state() const
void JumpIfNotString(Register heap_object, Label *target, Label::Distance distance=Label::kFar)
void SmiTagIntPtrAndJumpIfSuccess(Register dst, Register src, Label *success, Label::Distance distance=Label::kFar)
void CompareIntPtrAndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
void CompareInt32AndBranch(Register r1, int32_t value, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void CheckJSAnyIsStringAndBranch(Register heap_object, Label *if_true, Label::Distance true_distance, bool fallthrough_when_true, Label *if_false, Label::Distance false_distance, bool fallthrough_when_false)
void AssertElidedWriteBarrier(Register object, Register value, RegisterSnapshot snapshot)
void EmitEagerDeoptIf(Condition cond, DeoptimizeReason reason, NodeT *node)
void StoreFloat64(MemOperand dst, DoubleRegister src)
void SmiToDouble(DoubleRegister result, Register smi)
void StoreTaggedFieldNoWriteBarrier(Register object, int offset, Register value)
void LoadTaggedField(Register result, MemOperand operand)
void SmiTagInt32AndJumpIfSuccess(Register dst, Register src, Label *success, Label::Distance distance=Label::kFar)
void TestInt32AndJumpIfAnySet(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
void CompareIntPtrAndBranch(Register r1, int32_t value, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void Int32ToDouble(DoubleRegister result, Register src)
void UncheckedSmiTagUint32(Register dst, Register src)
void CheckIntPtrIsSmi(Register obj, Label *fail, Label::Distance distance=Label::kFar)
void SmiSubConstant(Register dst, Register src, int value, Label *fail, Label::Distance distance=Label::kFar)
void CompareInstanceTypeRangeAndEagerDeoptIf(Register map, Register instance_type_out, InstanceType lower_limit, InstanceType higher_limit, Condition cond, DeoptimizeReason reason, NodeT *node)
void UncheckedSmiTagInt32(Register dst, Register src)
void DefineExceptionHandlerAndLazyDeoptPoint(NodeBase *node)
void CompareInstanceTypeAndJumpIf(Register map, InstanceType type, Condition cond, Label *target, Label::Distance distance)
void SmiTagUint32AndJumpIfSuccess(Register dst, Register src, Label *success, Label::Distance distance=Label::kFar)
void MaybeEmitPlaceHolderForDeopt()
void PushDeferredCode(DeferredCodeInfo *deferred_code)
void PushHandlerInfo(NodeBase *node)
void PushLazyDeopt(LazyDeoptInfo *info)
void DefineSafepointWithLazyDeopt(LazyDeoptInfo *lazy_deopt_info)
typename FunctionArgumentsTupleHelper< Function >::FunctionPointer FunctionPointer
void Generate(MaglevAssembler *masm) override
DeferredCodeInfoImpl(DeferredCodeInfoImpl &&)=delete
typename StripFirstTupleArg< typename FunctionArgumentsTupleHelper< Function >::Tuple >::Stripped Tuple
DeferredCodeInfoImpl(const DeferredCodeInfoImpl &)=delete
DeferredCodeInfoImpl(MaglevCompilationInfo *compilation_info, MaglevAssembler::TemporaryRegisterScope::SavedData deferred_scratch, FunctionPointer function, InArgs &&... args)
MaglevAssembler::TemporaryRegisterScope::SavedData deferred_scratch_
#define ASM_CODE_COMMENT(asm)
base::Vector< const DirectHandle< Object > > args
ZoneVector< RpoNumber > & result
constexpr unsigned CountPopulation(T value)
constexpr auto tuple_head(Tuple &&tpl)
constexpr void tuple_for_each_with_index(Tuple &&tpl, Function &&function)
constexpr auto tuple_drop(Tuple &&tpl)
constexpr void tuple_for_each(Tuple &&tpl, Function &&function)
void MoveArgumentsForBuiltin(MaglevAssembler *masm, Args &&... args)
T CopyForDeferred(MaglevCompilationInfo *compilation_info, T &&value)
void PushArgumentsForBuiltin(MaglevAssembler *masm, std::tuple< Args... > args)
void CheckArgs(Args &&... args)
void AllocateSlow(MaglevAssembler *masm, RegisterSnapshot register_snapshot, Register object, Builtin builtin, T size_in_bytes, ZoneLabelRef done)
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
RegListBase< DoubleRegister > DoubleRegList
DwVfpRegister DoubleRegister
constexpr InstanceType LAST_STRING_TYPE
RegListBase< Register > RegList
const uint32_t kThinStringTagBit
MemOperand FieldMemOperand(Register object, int offset)
constexpr bool IsFloatingPoint(MachineRepresentation rep)
constexpr Register kReturnRegister0
Condition NegateCondition(Condition cond)
constexpr Register kContextRegister
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
#define CHECK_GE(lhs, rhs)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
RegList live_tagged_registers
static T Copy(MaglevCompilationInfo *compilation_info, T node)
static void Copy(MaglevCompilationInfo *compilation_info, No_Copy_Helper_Implemented_For_Type< T >)
R(*)(A...) FunctionPointer
R(*)(A...) FunctionPointer
std::tuple< T... > Stripped
#define T1(name, string, precedence)