v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
maglev-assembler-inl.h
Go to the documentation of this file.
1// Copyright 2023 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
6#define V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
7
9// Include the non-inl header before the rest of the headers.
10
11#include <algorithm>
12#include <type_traits>
13
14#include "src/base/iterator.h"
17
18#ifdef V8_TARGET_ARCH_ARM
20#elif V8_TARGET_ARCH_ARM64
22#elif V8_TARGET_ARCH_RISCV64
24#elif V8_TARGET_ARCH_X64
26#elif V8_TARGET_ARCH_S390X
28#else
29#error "Maglev does not supported this architecture."
30#endif
31
32namespace v8 {
33namespace internal {
34namespace maglev {
35
36namespace detail {
37
38// Base case provides an error.
39template <typename T, typename Enable = void>
41 template <typename U>
43 static void Copy(MaglevCompilationInfo* compilation_info,
45};
46
47// Helper for copies by value.
48template <typename T, typename Enable = void>
50 static T Copy(MaglevCompilationInfo* compilation_info, T node) {
51 return node;
52 }
53};
54
55// Node pointers are copied by value.
56template <typename T>
58 T*, typename std::enable_if<std::is_base_of<NodeBase, T>::value>::type>
60// Arithmetic values and enums are copied by value.
61template <typename T>
63 T, typename std::enable_if<std::is_arithmetic<T>::value>::type>
64 : public CopyForDeferredByValue<T> {};
65template <typename T>
67 T, typename std::enable_if<std::is_enum<T>::value>::type>
68 : public CopyForDeferredByValue<T> {};
69// MaglevCompilationInfos are copied by value.
70template <>
73// Machine registers are copied by value.
74template <>
76 : public CopyForDeferredByValue<Register> {};
77template <>
78struct CopyForDeferredHelper<std::optional<Register>>
79 : public CopyForDeferredByValue<std::optional<Register>> {};
80template <>
82 : public CopyForDeferredByValue<DoubleRegister> {};
83// Bytecode offsets are copied by value.
84template <>
86 : public CopyForDeferredByValue<BytecodeOffset> {};
87// EagerDeoptInfo pointers are copied by value.
88template <>
91// LazyDeoptInfo pointers are copied by value.
92template <>
95// ZoneLabelRef is copied by value.
96template <>
98 : public CopyForDeferredByValue<ZoneLabelRef> {};
99// MapCompare is copied by value.
100template <>
102 : public CopyForDeferredByValue<MapCompare> {};
103// RegList are copied by value.
104template <>
106};
107// Register snapshots are copied by value.
108template <>
110 : public CopyForDeferredByValue<RegisterSnapshot> {};
111// Feedback slots are copied by value.
112template <>
114 : public CopyForDeferredByValue<FeedbackSlot> {};
115// Heap Refs are copied by value.
116template <typename T>
117struct CopyForDeferredHelper<T, typename std::enable_if<std::is_base_of<
118 compiler::ObjectRef, T>::value>::type>
119 : public CopyForDeferredByValue<T> {};
120
121template <typename T>
122T CopyForDeferred(MaglevCompilationInfo* compilation_info, T&& value) {
123 return CopyForDeferredHelper<T>::Copy(compilation_info,
124 std::forward<T>(value));
125}
126
127template <typename T>
128T CopyForDeferred(MaglevCompilationInfo* compilation_info, T& value) {
129 return CopyForDeferredHelper<T>::Copy(compilation_info, value);
130}
131
132template <typename T>
133T CopyForDeferred(MaglevCompilationInfo* compilation_info, const T& value) {
134 return CopyForDeferredHelper<T>::Copy(compilation_info, value);
135}
136
137template <typename Function>
139 : public FunctionArgumentsTupleHelper<decltype(&Function::operator())> {};
140
141template <typename C, typename R, typename... A>
142struct FunctionArgumentsTupleHelper<R (C::*)(A...) const> {
143 using FunctionPointer = R (*)(A...);
144 using Tuple = std::tuple<A...>;
145 static constexpr size_t kSize = sizeof...(A);
146};
147
148template <typename R, typename... A>
150 using FunctionPointer = R (*)(A...);
151 using Tuple = std::tuple<A...>;
152 static constexpr size_t kSize = sizeof...(A);
153};
154
155template <typename T>
157
158template <typename T1, typename... T>
159struct StripFirstTupleArg<std::tuple<T1, T...>> {
160 using Stripped = std::tuple<T...>;
161};
162
163template <typename Function>
165 public:
168 using Tuple = typename StripFirstTupleArg<
170
171 template <typename... InArgs>
173 MaglevCompilationInfo* compilation_info,
175 FunctionPointer function, InArgs&&... args)
176 : function(function),
177 args(CopyForDeferred(compilation_info, std::forward<InArgs>(args))...),
178 deferred_scratch_(deferred_scratch) {}
179
182
183 void Generate(MaglevAssembler* masm) override {
186#ifdef DEBUG
187 masm->set_allow_call(allow_call_);
188 masm->set_allow_deferred_call(allow_call_);
189 masm->set_allow_allocate(allow_allocate_);
190#endif // DEBUG
191 std::apply(function,
192 std::tuple_cat(std::make_tuple(masm), std::move(args)));
193#ifdef DEBUG
194 masm->set_allow_call(false);
195 masm->set_allow_deferred_call(false);
196 masm->set_allow_allocate(false);
197#endif // DEBUG
198 }
199
200#ifdef DEBUG
201 void set_allow_call(bool value) { allow_call_ = value; }
202 void set_allow_allocate(bool value) { allow_allocate_ = value; }
203#endif // DEBUG
204
205 private:
209
210#ifdef DEBUG
211 bool allow_call_ = false;
212 bool allow_allocate_ = false;
213#endif // DEBUG
214};
215
216} // namespace detail
217
218template <typename Function, typename... Args>
220 Args&&... args) {
221 using FunctionPointer =
223 static_assert(
224 std::is_invocable_v<FunctionPointer, MaglevAssembler*,
226 std::declval<MaglevCompilationInfo*>(),
227 std::declval<Args>()))...>,
228 "Parameters of deferred_code_gen function should match arguments into "
229 "MakeDeferredCode");
230
231 TemporaryRegisterScope scratch_scope(this);
232 using DeferredCodeInfoT = detail::DeferredCodeInfoImpl<Function>;
233 DeferredCodeInfoT* deferred_code =
234 compilation_info()->zone()->New<DeferredCodeInfoT>(
235 compilation_info(), scratch_scope.CopyForDefer(), deferred_code_gen,
236 std::forward<Args>(args)...);
237
238#ifdef DEBUG
239 deferred_code->set_allow_call(allow_deferred_call_);
240 deferred_code->set_allow_allocate(allow_allocate_);
241#endif // DEBUG
242
243 code_gen_state()->PushDeferredCode(deferred_code);
244 return &deferred_code->deferred_code_label;
245}
246
247// Note this doesn't take capturing lambdas by design, since state may
248// change until `deferred_code_gen` is actually executed. Use either a
249// non-capturing lambda, or a plain function pointer.
250template <typename Function, typename... Args>
252 Function&& deferred_code_gen,
253 Args&&... args) {
254 if (v8_flags.code_comments) {
255 RecordComment("-- Jump to deferred code");
256 }
258 std::forward<Function>(deferred_code_gen),
259 std::forward<Args>(args)...));
260}
261
262template <typename T>
264 RegisterSnapshot register_snapshot, Register object,
265 Builtin builtin, T size_in_bytes, ZoneLabelRef done) {
266 // Remove {object} from snapshot, since it is the returned allocated
267 // HeapObject.
268 register_snapshot.live_registers.clear(object);
269 register_snapshot.live_tagged_registers.clear(object);
270 {
271 SaveRegisterStateForCall save_register_state(masm, register_snapshot);
272 using D = AllocateDescriptor;
273 masm->Move(D::GetRegisterParameter(D::kRequestedSize), size_in_bytes);
274 masm->CallBuiltin(builtin);
275 save_register_state.DefineSafepoint();
276 masm->Move(object, kReturnRegister0);
277 }
278 masm->Jump(*done);
279}
280
282 AssertSmi(smi);
283 SmiUntag(smi);
284 Int32ToDouble(result, smi);
285}
286
287#if !defined(V8_TARGET_ARCH_RISCV64)
288
290 Register map, InstanceType type, Condition cond, Label* target,
291 Label::Distance distance) {
292 CompareInstanceType(map, type);
293 JumpIf(cond, target, distance);
294}
295
296template <typename NodeT>
298 Register map, Register instance_type_out, InstanceType lower_limit,
299 InstanceType higher_limit, Condition cond, DeoptimizeReason reason,
300 NodeT* node) {
301 CompareInstanceTypeRange(map, instance_type_out, lower_limit, higher_limit);
302 EmitEagerDeoptIf(cond, reason, node);
303}
304
305template <typename NodeT>
307 Register reg, RootIndex index, Condition cond, DeoptimizeReason reason,
308 NodeT* node) {
309 CompareRoot(reg, index);
310 EmitEagerDeoptIf(cond, reason, node);
311}
312
313template <typename NodeT>
315 Register reg, RootIndex index, Register scratch, Condition cond,
316 DeoptimizeReason reason, NodeT* node) {
317 CompareMapWithRoot(reg, index, scratch);
318 EmitEagerDeoptIf(cond, reason, node);
319}
320
321template <typename NodeT>
323 Register reg, RootIndex index, Condition cond, DeoptimizeReason reason,
324 NodeT* node) {
325 CompareTaggedRoot(reg, index);
326 EmitEagerDeoptIf(cond, reason, node);
327}
328
329template <typename NodeT>
331 Register reg, int imm, Condition cond, DeoptimizeReason reason,
332 NodeT* node) {
333 Cmp(reg, imm);
334 EmitEagerDeoptIf(cond, reason, node);
335}
336#endif
337
339 Condition cond,
340 BasicBlock* if_true,
341 BasicBlock* if_false,
342 BasicBlock* next_block) {
343 CompareInt32AndBranch(r1, value, cond, if_true->label(), Label::kFar,
344 if_true == next_block, if_false->label(), Label::kFar,
345 if_false == next_block);
346}
347
349 Condition cond,
350 BasicBlock* if_true,
351 BasicBlock* if_false,
352 BasicBlock* next_block) {
353 CompareInt32AndBranch(r1, r2, cond, if_true->label(), Label::kFar,
354 if_true == next_block, if_false->label(), Label::kFar,
355 if_false == next_block);
356}
357
359 Condition cond,
360 BasicBlock* if_true,
361 BasicBlock* if_false,
362 BasicBlock* next_block) {
363 CompareIntPtrAndBranch(r1, value, cond, if_true->label(), Label::kFar,
364 if_true == next_block, if_false->label(), Label::kFar,
365 if_false == next_block);
366}
367
369 BasicBlock* if_false,
370 BasicBlock* next_block) {
371 Branch(condition, if_true->label(), Label::kFar, if_true == next_block,
372 if_false->label(), Label::kFar, if_false == next_block);
373}
374
376 Label::Distance true_distance,
377 bool fallthrough_when_true, Label* if_false,
378 Label::Distance false_distance,
379 bool fallthrough_when_false) {
380 if (fallthrough_when_false) {
381 if (fallthrough_when_true) {
382 // If both paths are a fallthrough, do nothing.
383 DCHECK_EQ(if_true, if_false);
384 return;
385 }
386 // Jump over the false block if true, otherwise fall through into it.
387 JumpIf(condition, if_true, true_distance);
388 } else {
389 // Jump to the false block if true.
390 JumpIf(NegateCondition(condition), if_false, false_distance);
391 // Jump to the true block if it's not the next block.
392 if (!fallthrough_when_true) {
393 Jump(if_true, true_distance);
394 }
395 }
396}
397
402
407
412
418
424
426 Register object) {
427 static_assert(offsetof(HeapNumber, value_) ==
428 offsetof(Oddball, to_number_raw_));
430}
431
433 Register heap_number) {
434 StoreFloat64(FieldMemOperand(heap_number, offsetof(HeapNumber, value_)),
435 value);
436}
437
438namespace detail {
439
440#ifdef DEBUG
441inline bool ClobberedBy(RegList written_registers, Register reg) {
442 return written_registers.has(reg);
443}
444inline bool ClobberedBy(RegList written_registers, DoubleRegister reg) {
445 return false;
446}
447inline bool ClobberedBy(RegList written_registers,
448 DirectHandle<Object> handle) {
449 return false;
450}
451inline bool ClobberedBy(RegList written_registers, Tagged<Smi> smi) {
452 return false;
453}
454inline bool ClobberedBy(RegList written_registers, Tagged<TaggedIndex> index) {
455 return false;
456}
457inline bool ClobberedBy(RegList written_registers, int32_t imm) {
458 return false;
459}
460inline bool ClobberedBy(RegList written_registers, RootIndex index) {
461 return false;
462}
463inline bool ClobberedBy(RegList written_registers, const Input& input) {
464 if (!input.IsGeneralRegister()) return false;
465 return ClobberedBy(written_registers, input.AssignedGeneralRegister());
466}
467
468inline bool ClobberedBy(DoubleRegList written_registers, Register reg) {
469 return false;
470}
471inline bool ClobberedBy(DoubleRegList written_registers, DoubleRegister reg) {
472 return written_registers.has(reg);
473}
474inline bool ClobberedBy(DoubleRegList written_registers,
475 DirectHandle<Object> handle) {
476 return false;
477}
478inline bool ClobberedBy(DoubleRegList written_registers, Tagged<Smi> smi) {
479 return false;
480}
481inline bool ClobberedBy(DoubleRegList written_registers,
482 Tagged<TaggedIndex> index) {
483 return false;
484}
485inline bool ClobberedBy(DoubleRegList written_registers, int32_t imm) {
486 return false;
487}
488inline bool ClobberedBy(DoubleRegList written_registers, RootIndex index) {
489 return false;
490}
491inline bool ClobberedBy(DoubleRegList written_registers, const Input& input) {
492 if (!input.IsDoubleRegister()) return false;
493 return ClobberedBy(written_registers, input.AssignedDoubleRegister());
494}
495
496// We don't know what's inside machine registers or operands, so assume they
497// match.
498inline bool MachineTypeMatches(MachineType type, Register reg) {
499 return !IsFloatingPoint(type.representation());
500}
501inline bool MachineTypeMatches(MachineType type, DoubleRegister reg) {
502 return IsFloatingPoint(type.representation());
503}
504inline bool MachineTypeMatches(MachineType type, MemOperand reg) {
505 return true;
506}
507inline bool MachineTypeMatches(MachineType type,
508 DirectHandle<HeapObject> handle) {
509 return type.IsTagged() && !type.IsTaggedSigned();
510}
511inline bool MachineTypeMatches(MachineType type, Tagged<Smi> smi) {
512 return type.IsTagged() && !type.IsTaggedPointer();
513}
514inline bool MachineTypeMatches(MachineType type, Tagged<TaggedIndex> index) {
515 // TaggedIndex doesn't have a separate type, so check for the same type as for
516 // Smis.
517 return type.IsTagged() && !type.IsTaggedPointer();
518}
519inline bool MachineTypeMatches(MachineType type, int32_t imm) {
520 // 32-bit immediates can be used for 64-bit params -- they'll be
521 // zero-extended.
522 return type.representation() == MachineRepresentation::kWord32 ||
523 type.representation() == MachineRepresentation::kWord64;
524}
525inline bool MachineTypeMatches(MachineType type, RootIndex index) {
526 return type.IsTagged() && !type.IsTaggedSigned();
527}
528inline bool MachineTypeMatches(MachineType type, const Input& input) {
529 if (type.representation() == input.node()->GetMachineRepresentation()) {
530 return true;
531 }
532 if (type.IsTagged()) {
533 return input.node()->is_tagged();
534 }
535 return false;
536}
537
538template <typename Descriptor, typename Arg>
539void CheckArg(MaglevAssembler* masm, Arg& arg, int& i) {
540 if (i >= Descriptor::GetParameterCount()) {
541 CHECK(Descriptor::AllowVarArgs());
542 }
543 CHECK(MachineTypeMatches(Descriptor::GetParameterType(i), arg));
544 ++i;
545}
546
547template <typename Descriptor, typename Iterator>
548void CheckArg(MaglevAssembler* masm,
549 const base::iterator_range<Iterator>& range, int& i) {
550 for (auto it = range.begin(), end = range.end(); it != end; ++it, ++i) {
551 if (i >= Descriptor::GetParameterCount()) {
552 CHECK(Descriptor::AllowVarArgs());
553 }
554 CHECK(MachineTypeMatches(Descriptor::GetParameterType(i), *it));
555 }
556}
557
558template <typename Descriptor, typename... Args>
559void CheckArgs(MaglevAssembler* masm, const std::tuple<Args...>& args) {
560 int i = 0;
562 [&](auto&& arg) { CheckArg<Descriptor>(masm, arg, i); });
563 if (Descriptor::AllowVarArgs()) {
564 CHECK_GE(i, Descriptor::GetParameterCount());
565 } else {
566 CHECK_EQ(i, Descriptor::GetParameterCount());
567 }
568}
569
570#else // DEBUG
571
572template <typename Descriptor, typename... Args>
573void CheckArgs(Args&&... args) {}
574
575#endif // DEBUG
576
577template <typename Descriptor, typename... Args>
578void PushArgumentsForBuiltin(MaglevAssembler* masm, std::tuple<Args...> args) {
579 std::apply(
580 [&](auto&&... stack_args) {
581 if (Descriptor::kStackArgumentOrder == StackArgumentOrder::kDefault) {
582 masm->Push(std::forward<decltype(stack_args)>(stack_args)...);
583 } else {
584 masm->PushReverse(std::forward<decltype(stack_args)>(stack_args)...);
585 }
586 },
587 args);
588}
589
590template <typename Descriptor>
591void PushArgumentsForBuiltin(MaglevAssembler* masm, std::tuple<> empty_args) {}
592
593template <Builtin kBuiltin, typename... Args>
596
597 // Put the args into a tuple for easier manipulation.
598 std::tuple<Args&&...> args_tuple{std::forward<Args>(args)...};
599
600 // If there is a context, the first argument is the context parameter. Use
601 // the remaining args as the actual arguments. We pass the context first
602 // instead of last to avoid ambiguity around dealing with on-stack
603 // arguments.
604 constexpr size_t context_args = Descriptor::HasContextParameter() ? 1 : 0;
605 static_assert(context_args <= std::tuple_size_v<decltype(args_tuple)>,
606 "Not enough arguments passed in to builtin (are you missing a "
607 "context argument?)");
608 auto args_tuple_without_context = base::tuple_drop<context_args>(args_tuple);
609 CheckArgs<Descriptor>(masm, args_tuple_without_context);
610
611 // Split args into register and stack args.
612 static_assert(Descriptor::GetRegisterParameterCount() <=
613 std::tuple_size_v<decltype(args_tuple_without_context)>,
614 "Not enough arguments passed in to builtin (are you missing a "
615 "context argument?)");
616 auto register_args =
618 args_tuple_without_context);
620 args_tuple_without_context);
621
622 // Split stack args into fixed and variable.
623 static_assert(
624 Descriptor::GetStackParameterCount() <=
625 std::tuple_size_v<decltype(stack_args)>,
626 "Not enough stack arguments passed in to builtin (are you missing a "
627 "context argument?)");
628 auto fixed_stack_args =
630 auto vararg_stack_args =
632
633 if constexpr (!Descriptor::AllowVarArgs()) {
634 static_assert(std::tuple_size_v<decltype(vararg_stack_args)> == 0,
635 "Too many arguments passed in to builtin that expects no "
636 "vararg stack arguments");
637 }
638
639 // First push stack arguments (if any), since some of these may be in
640 // registers and we don't want to clobber them. This supports any thing
641 // `masm->Push` supports, including iterator ranges, so the tuple size may be
642 // smaller than the number of arguments actually pushed. We push fixed and
643 // vararg stack arguments separately, so that there's an appropriate amount
644 // of padding between them.
645 if (Descriptor::kStackArgumentOrder == StackArgumentOrder::kDefault) {
647 masm, std::forward<decltype(fixed_stack_args)>(fixed_stack_args));
649 masm, std::forward<decltype(vararg_stack_args)>(vararg_stack_args));
650 } else {
652 masm, std::forward<decltype(vararg_stack_args)>(vararg_stack_args));
654 masm, std::forward<decltype(fixed_stack_args)>(fixed_stack_args));
655 }
656
657// Then, set register arguments.
658// TODO(leszeks): Use the parallel move helper to do register moves, instead
659// of detecting clobbering.
660#ifdef DEBUG
661 RegList written_registers = {};
662 DoubleRegList written_double_registers = {};
663#endif // DEBUG
664
665 base::tuple_for_each_with_index(register_args, [&](auto&& arg, auto index) {
666 using Arg = decltype(arg);
667 static_assert(index < Descriptor::GetRegisterParameterCount());
668
669 // Make sure the argument wasn't clobbered by any previous write.
670 DCHECK(!ClobberedBy(written_registers, arg));
671 DCHECK(!ClobberedBy(written_double_registers, arg));
672
673 static constexpr bool use_double_register =
674 IsFloatingPoint(Descriptor::GetParameterType(index).representation());
675 if constexpr (use_double_register) {
676 DoubleRegister target = Descriptor::GetDoubleRegisterParameter(index);
677 if constexpr (std::is_same_v<Input, std::decay_t<Arg>>) {
678 DCHECK_EQ(target, arg.AssignedDoubleRegister());
679 USE(target);
680 } else {
681 masm->Move(target, std::forward<Arg>(arg));
682 }
683#ifdef DEBUG
684 written_double_registers.set(target);
685#endif // DEBUG
686 } else {
687 Register target = Descriptor::GetRegisterParameter(index);
688 if constexpr (std::is_same_v<Input, std::decay_t<Arg>>) {
689 DCHECK_EQ(target, arg.AssignedGeneralRegister());
690 USE(target);
691 } else {
692 masm->Move(target, std::forward<Arg>(arg));
693 }
694#ifdef DEBUG
695 written_registers.set(target);
696#endif // DEBUG
697 }
698
699 // TODO(leszeks): Support iterator range for register args.
700 });
701
702 // Set the context last (to avoid clobbering).
703 if constexpr (Descriptor::HasContextParameter()) {
704 auto&& context = std::get<0>(args_tuple);
705 DCHECK(!ClobberedBy(written_registers, context));
706 DCHECK(!ClobberedBy(written_double_registers, context));
707 DCHECK(MachineTypeMatches(MachineType::AnyTagged(), context));
708
709 if constexpr (std::is_same_v<Input, std::decay_t<decltype(context)>>) {
710 DCHECK_EQ(Descriptor::ContextRegister(),
711 context.AssignedGeneralRegister());
712 } else {
713 // Don't allow raw Register here, force materialisation from a constant.
714 // This is because setting parameters could have clobbered the register.
715 // TODO(leszeks): Include the context register in the parallel moves
716 // described above.
717 static_assert(!std::is_same_v<Register, std::decay_t<decltype(context)>>);
718 masm->Move(Descriptor::ContextRegister(), context);
719 }
720 }
721}
722
723} // namespace detail
724
726 // Special case allowing calls to DoubleToI, which takes care to preserve all
727 // registers and therefore doesn't require special spill handling.
728 DCHECK(allow_call() || builtin == Builtin::kDoubleToI);
729
730 // Temporaries have to be reset before calling CallBuiltin, in case it uses
731 // temporaries that alias register parameters.
732 TemporaryRegisterScope reset_temps(this);
733 reset_temps.ResetToDefault();
734
735 // Make sure that none of the register parameters alias the default
736 // temporaries.
737#ifdef DEBUG
738 CallInterfaceDescriptor descriptor =
740 for (int i = 0; i < descriptor.GetRegisterParameterCount(); ++i) {
741 DCHECK(!reset_temps.Available().has(descriptor.GetRegisterParameter(i)));
742 }
743#endif
744
746}
747
748template <Builtin kBuiltin, typename... Args>
749inline void MaglevAssembler::CallBuiltin(Args&&... args) {
750 ASM_CODE_COMMENT(this);
751 detail::MoveArgumentsForBuiltin<kBuiltin>(this, std::forward<Args>(args)...);
752 CallBuiltin(kBuiltin);
753}
754
756 DCHECK(allow_call());
757 // Temporaries have to be reset before calling CallRuntime, in case it uses
758 // temporaries that alias register parameters.
759 TemporaryRegisterScope reset_temps(this);
760 reset_temps.ResetToDefault();
762}
763
765 int num_args) {
766 DCHECK(allow_call());
767 // Temporaries have to be reset before calling CallRuntime, in case it uses
768 // temporaries that alias register parameters.
769 TemporaryRegisterScope reset_temps(this);
770 reset_temps.ResetToDefault();
771 MacroAssembler::CallRuntime(fid, num_args);
772}
773
775 TemporaryRegisterScope temps(this);
776 Register scratch = temps.AcquireScratch();
777 LoadTaggedRoot(scratch, map);
779}
780
782 Register dst, Register src, Label* fail, Label::Distance distance) {
783 SmiTagInt32AndSetFlags(dst, src);
784 if (!SmiValuesAre32Bits()) {
785 JumpIf(kOverflow, fail, distance);
786 }
787}
788
790 Register reg, Label* fail, Label::Distance distance) {
791 SmiTagInt32AndJumpIfFail(reg, reg, fail, distance);
792}
793
795 Register dst, Register src, Label* success, Label::Distance distance) {
796 SmiTagInt32AndSetFlags(dst, src);
797 if (!SmiValuesAre32Bits()) {
798 JumpIf(kNoOverflow, success, distance);
799 } else {
800 jmp(success);
801 }
802}
803
805 Register reg, Label* success, Label::Distance distance) {
806 SmiTagInt32AndJumpIfSuccess(reg, reg, success, distance);
807}
808
810 SmiTagInt32AndSetFlags(dst, src);
811 if (!SmiValuesAre32Bits()) {
812 Assert(kNoOverflow, AbortReason::kInputDoesNotFitSmi);
813 }
814}
815
819
821 Register dst, Register src, Label* fail, Label::Distance distance) {
822 // Perform an unsigned comparison against Smi::kMaxValue.
824 distance);
825 SmiTagInt32AndSetFlags(dst, src);
826 if (!SmiValuesAre32Bits()) {
827 Assert(kNoOverflow, AbortReason::kInputDoesNotFitSmi);
828 }
829}
830
832 Register reg, Label* fail, Label::Distance distance) {
833 SmiTagUint32AndJumpIfFail(reg, reg, fail, distance);
834}
835
837 Register dst, Register src, Label* fail, Label::Distance distance) {
838 CheckIntPtrIsSmi(src, fail, distance);
839 // If the IntPtr is in the Smi range, we can treat it as Int32.
840 SmiTagInt32AndSetFlags(dst, src);
841 if (!SmiValuesAre32Bits()) {
842 Assert(kNoOverflow, AbortReason::kInputDoesNotFitSmi);
843 }
844}
845
847 Register dst, Register src, Label* success, Label::Distance distance) {
848 Label done;
849 SmiTagIntPtrAndJumpIfFail(dst, src, &done);
850 Jump(success, distance);
851 bind(&done);
852}
853
855 Register dst, Register src, Label* success, Label::Distance distance) {
856 Label fail;
858 Jump(success, distance);
859 bind(&fail);
860}
861
863 Register reg, Label* success, Label::Distance distance) {
864 SmiTagUint32AndJumpIfSuccess(reg, reg, success, distance);
865}
866
868 if (v8_flags.debug_code) {
869 // Perform an unsigned comparison against Smi::kMaxValue.
871 AbortReason::kInputDoesNotFitSmi);
872 }
873 SmiTagInt32AndSetFlags(dst, src);
874 if (!SmiValuesAre32Bits()) {
875 Assert(kNoOverflow, AbortReason::kInputDoesNotFitSmi);
876 }
877}
878
882
884 Label::Distance distance) {
885 // TODO(388844115): Optimize this per platform.
886 int32_t kSmiMaxValueInt32 = static_cast<int32_t>(Smi::kMaxValue);
887 int32_t kSmiMinValueInt32 = static_cast<int32_t>(Smi::kMinValue);
888 CompareIntPtrAndJumpIf(obj, kSmiMaxValueInt32, kGreaterThan, fail, distance);
889 CompareIntPtrAndJumpIf(obj, kSmiMinValueInt32, kLessThan, fail, distance);
890}
891
893 Label* fail,
894 Label::Distance distance) {
895 return SmiAddConstant(reg, reg, value, fail, distance);
896}
897
899 Label* fail,
900 Label::Distance distance) {
901 return SmiSubConstant(reg, reg, value, fail, distance);
902}
903
905 Label::Distance distance,
906 bool jump_if_true) {
907#if V8_STATIC_ROOTS_BOOL
908 // All string maps are allocated at the start of the read only heap. Thus,
909 // non-strings must have maps with larger (compressed) addresses.
911 map, InstanceTypeChecker::kStringMapUpperBound,
912 jump_if_true ? kUnsignedLessThanEqual : kUnsignedGreaterThan, target,
913 distance);
914#else
915#ifdef V8_COMPRESS_POINTERS
916 DecompressTagged(map, map);
917#endif
918 static_assert(FIRST_STRING_TYPE == FIRST_TYPE);
920 map, LAST_STRING_TYPE,
921 jump_if_true ? kUnsignedLessThanEqual : kUnsignedGreaterThan, target,
922 distance);
923#endif
924}
925
926inline void MaglevAssembler::JumpIfString(Register heap_object, Label* target,
927 Label::Distance distance) {
928 TemporaryRegisterScope temps(this);
929 Register scratch = temps.AcquireScratch();
930#ifdef V8_COMPRESS_POINTERS
931 LoadCompressedMap(scratch, heap_object);
932#else
933 LoadMap(scratch, heap_object);
934#endif
935 JumpIfStringMap(scratch, target, distance, true);
936}
937
939 Label* target,
940 Label::Distance distance) {
941 TemporaryRegisterScope temps(this);
942 Register scratch = temps.AcquireScratch();
943#ifdef V8_COMPRESS_POINTERS
944 LoadCompressedMap(scratch, heap_object);
945#else
946 LoadMap(scratch, heap_object);
947#endif
948 JumpIfStringMap(scratch, target, distance, false);
949}
950
952 Register heap_object, Label* if_true, Label::Distance true_distance,
953 bool fallthrough_when_true, Label* if_false, Label::Distance false_distance,
954 bool fallthrough_when_false) {
955 BranchOnObjectTypeInRange(heap_object, FIRST_STRING_TYPE, LAST_STRING_TYPE,
956 if_true, true_distance, fallthrough_when_true,
957 if_false, false_distance, fallthrough_when_false);
958}
959
961 if (v8_flags.debug_code) {
962 // Check if {string} is a string.
963 AssertObjectTypeInRange(string, FIRST_STRING_TYPE, LAST_STRING_TYPE,
964 AbortReason::kUnexpectedValue);
965 }
967 sizeof(int32_t));
968}
969
971 Register string) {
972 if (v8_flags.slow_debug_code) {
973 TemporaryRegisterScope temps(this);
974 Register scratch = temps.AcquireScratch();
975 LoadInstanceType(scratch, string);
976 Label ok;
979 Abort(AbortReason::kUnexpectedValue);
980 bind(&ok);
981 }
982 LoadTaggedField(result, string, offsetof(ThinString, actual_));
983}
984
986#ifdef V8_COMPRESS_POINTERS
988#else
989 MacroAssembler::LoadMap(dst, obj);
990#endif
991}
992
999
1001 ExceptionHandlerInfo* info = node->exception_handler_info();
1002 if (!info->HasExceptionHandler()) return;
1005}
1006
1008 NodeBase* node) {
1010 DefineLazyDeoptPoint(node->lazy_deopt_info());
1011}
1012
1020
1022 Register object, Register value, RegisterSnapshot snapshot) {
1023#if defined(V8_ENABLE_DEBUG_CODE) && !V8_DISABLE_WRITE_BARRIERS_BOOL
1024 if (!v8_flags.slow_debug_code) return;
1025
1026 ZoneLabelRef ok(this);
1027 Label* deferred_write_barrier_check = MakeDeferredCode(
1028 [](MaglevAssembler* masm, ZoneLabelRef ok, Register object,
1029 Register value, RegisterSnapshot snapshot) {
1030 masm->set_allow_call(true);
1031 {
1032 SaveRegisterStateForCall save_register_state(masm, snapshot);
1033#ifdef V8_COMPRESS_POINTERS
1034 masm->DecompressTagged(object, object);
1035 masm->DecompressTagged(value, value);
1036#endif
1037 masm->Push(object, value);
1038 masm->Move(kContextRegister, masm->native_context().object());
1039 masm->CallRuntime(Runtime::kCheckNoWriteBarrierNeeded, 2);
1040 }
1041 masm->set_allow_call(false);
1042 masm->Jump(*ok);
1043 },
1044 ok, object, value, snapshot);
1045
1046 JumpIfNotSmi(value, deferred_write_barrier_check);
1047 bind(*ok);
1048#endif // V8_ENABLE_DEBUG_CODE && !V8_DISABLE_WRITE_BARRIERS
1049}
1050
1051} // namespace maglev
1052} // namespace internal
1053} // namespace v8
1054
1055#endif // V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
#define Assert(condition)
V8_INLINE void RecordComment(const char *comment, const SourceLocation &loc=SourceLocation::Current())
Definition assembler.h:417
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition builtins.cc:189
static constexpr int kMapOffset
static constexpr MachineType AnyTagged()
void Cmp(const Register &rn, int imm)
void SmiUntag(Register reg, SBit s=LeaveCC)
void CompareRoot(Register obj, RootIndex index)
void CompareTaggedRoot(Register with, RootIndex index)
void AssertSmi(Register object, AbortReason reason=AbortReason::kOperandIsNotASmi) NOOP_UNLESS_DEBUG_CODE
void LoadTaggedField(const Register &destination, const MemOperand &field_operand)
void LoadTaggedRoot(Register destination, RootIndex index)
void LoadCompressedMap(Register dst, Register object)
void CallRuntime(const Runtime::Function *f, int num_arguments)
void CallBuiltin(Builtin builtin, Condition cond=al)
void SmiUntagField(Register dst, const MemOperand &src)
void DecompressTagged(const Register &destination, const MemOperand &field_operand)
void LoadMap(Register destination, Register object)
Safepoint DefineSafepoint(Assembler *assembler)
constexpr void set(RegisterT reg)
constexpr bool has(RegisterT reg) const
constexpr void clear(RegisterT reg)
static constexpr int kMinValue
Definition smi.h:100
static constexpr int kMaxValue
Definition smi.h:101
T * New(Args &&... args)
Definition zone.h:114
IndirectHandle< NativeContext > object() const
void CompareMapWithRoot(Register object, RootIndex index, Register scratch)
void StoreHeapNumberValue(DoubleRegister value, Register heap_number)
void CompareInstanceType(Register map, InstanceType instance_type)
void SmiAddConstant(Register dst, Register src, int value, Label *fail, Label::Distance distance=Label::kFar)
compiler::NativeContextRef native_context() const
void Branch(Condition condition, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void LoadAndUntagTaggedSignedField(Register result, Register object, int offset)
void SmiTagUint32AndJumpIfFail(Register dst, Register src, Label *fail, Label::Distance distance=Label::kFar)
void LoadThinStringValue(Register result, Register string)
void LoadSignedField(Register result, MemOperand operand, int element_size)
void AssertObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, AbortReason reason)
void CompareInt32AndAssert(Register r1, Register r2, Condition cond, AbortReason reason)
void LoadHeapNumberOrOddballValue(DoubleRegister result, Register object)
void StringLength(Register result, Register string)
void LoadHeapNumberValue(DoubleRegister result, Register heap_number)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void JumpIfStringMap(Register map, Label *target, Label::Distance distance=Label::kFar, bool jump_if_true=true)
void LoadTaggedSignedField(Register result, MemOperand operand)
void CallRuntime(Runtime::FunctionId fid)
void CompareRootAndEmitEagerDeoptIf(Register reg, RootIndex index, Condition cond, DeoptimizeReason reason, NodeT *node)
void SmiTagInt32AndSetFlags(Register dst, Register src)
void LoadInstanceType(Register instance_type, Register heap_object)
void SmiTagInt32AndJumpIfFail(Register dst, Register src, Label *fail, Label::Distance distance=Label::kFar)
void CompareTaggedRootAndEmitEagerDeoptIf(Register reg, RootIndex index, Condition cond, DeoptimizeReason reason, NodeT *node)
MaglevSafepointTableBuilder * safepoint_table_builder() const
void JumpIfString(Register heap_object, Label *target, Label::Distance distance=Label::kFar)
void CompareUInt32AndEmitEagerDeoptIf(Register reg, int imm, Condition cond, DeoptimizeReason reason, NodeT *node)
void BranchOnObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, Label *if_true, Label::Distance true_distance, bool fallthrough_when_true, Label *if_false, Label::Distance false_distance, bool fallthrough_when_false)
void LoadMapForCompare(Register dst, Register obj)
void CompareInstanceTypeRange(Register map, InstanceType lower_limit, InstanceType higher_limit)
void Move(StackSlot dst, Register src)
void SetMapAsRoot(Register object, RootIndex map)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void JumpToDeferredIf(Condition cond, Function &&deferred_code_gen, Args &&... args)
void JumpIfNotSmi(Register src, Label *on_not_smi, Label::Distance near_jump=Label::kFar)
MaglevCompilationInfo * compilation_info() const
void CompareMapWithRootAndEmitEagerDeoptIf(Register reg, RootIndex index, Register scratch, Condition cond, DeoptimizeReason reason, NodeT *node)
void SmiTagIntPtrAndJumpIfFail(Register dst, Register src, Label *fail, Label::Distance distance=Label::kFar)
void DefineLazyDeoptPoint(LazyDeoptInfo *info)
void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
MaglevCodeGenState * code_gen_state() const
void JumpIfNotString(Register heap_object, Label *target, Label::Distance distance=Label::kFar)
void SmiTagIntPtrAndJumpIfSuccess(Register dst, Register src, Label *success, Label::Distance distance=Label::kFar)
void CompareIntPtrAndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
void CompareInt32AndBranch(Register r1, int32_t value, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void CheckJSAnyIsStringAndBranch(Register heap_object, Label *if_true, Label::Distance true_distance, bool fallthrough_when_true, Label *if_false, Label::Distance false_distance, bool fallthrough_when_false)
void AssertElidedWriteBarrier(Register object, Register value, RegisterSnapshot snapshot)
void EmitEagerDeoptIf(Condition cond, DeoptimizeReason reason, NodeT *node)
void StoreFloat64(MemOperand dst, DoubleRegister src)
void SmiToDouble(DoubleRegister result, Register smi)
void StoreTaggedFieldNoWriteBarrier(Register object, int offset, Register value)
void LoadTaggedField(Register result, MemOperand operand)
void SmiTagInt32AndJumpIfSuccess(Register dst, Register src, Label *success, Label::Distance distance=Label::kFar)
void TestInt32AndJumpIfAnySet(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
void CompareIntPtrAndBranch(Register r1, int32_t value, Condition cond, BasicBlock *if_true, BasicBlock *if_false, BasicBlock *next_block)
void Int32ToDouble(DoubleRegister result, Register src)
void UncheckedSmiTagUint32(Register dst, Register src)
void CheckIntPtrIsSmi(Register obj, Label *fail, Label::Distance distance=Label::kFar)
void SmiSubConstant(Register dst, Register src, int value, Label *fail, Label::Distance distance=Label::kFar)
void CompareInstanceTypeRangeAndEagerDeoptIf(Register map, Register instance_type_out, InstanceType lower_limit, InstanceType higher_limit, Condition cond, DeoptimizeReason reason, NodeT *node)
void UncheckedSmiTagInt32(Register dst, Register src)
void DefineExceptionHandlerAndLazyDeoptPoint(NodeBase *node)
void CompareInstanceTypeAndJumpIf(Register map, InstanceType type, Condition cond, Label *target, Label::Distance distance)
void SmiTagUint32AndJumpIfSuccess(Register dst, Register src, Label *success, Label::Distance distance=Label::kFar)
void PushDeferredCode(DeferredCodeInfo *deferred_code)
void DefineSafepointWithLazyDeopt(LazyDeoptInfo *lazy_deopt_info)
typename FunctionArgumentsTupleHelper< Function >::FunctionPointer FunctionPointer
DeferredCodeInfoImpl(DeferredCodeInfoImpl &&)=delete
typename StripFirstTupleArg< typename FunctionArgumentsTupleHelper< Function >::Tuple >::Stripped Tuple
DeferredCodeInfoImpl(const DeferredCodeInfoImpl &)=delete
DeferredCodeInfoImpl(MaglevCompilationInfo *compilation_info, MaglevAssembler::TemporaryRegisterScope::SavedData deferred_scratch, FunctionPointer function, InArgs &&... args)
MaglevAssembler::TemporaryRegisterScope::SavedData deferred_scratch_
Register const value_
#define ASM_CODE_COMMENT(asm)
Definition assembler.h:617
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
int32_t offset
TNode< Context > context
Node * node
ZoneVector< RpoNumber > & result
LiftoffRegister reg
const int length_
Definition mul-fft.cc:473
STL namespace.
constexpr unsigned CountPopulation(T value)
Definition bits.h:26
constexpr auto tuple_head(Tuple &&tpl)
constexpr void tuple_for_each_with_index(Tuple &&tpl, Function &&function)
constexpr auto tuple_drop(Tuple &&tpl)
constexpr void tuple_for_each(Tuple &&tpl, Function &&function)
void MoveArgumentsForBuiltin(MaglevAssembler *masm, Args &&... args)
T CopyForDeferred(MaglevCompilationInfo *compilation_info, T &&value)
void PushArgumentsForBuiltin(MaglevAssembler *masm, std::tuple< Args... > args)
void AllocateSlow(MaglevAssembler *masm, RegisterSnapshot register_snapshot, Register object, Builtin builtin, T size_in_bytes, ZoneLabelRef done)
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
RegListBase< DoubleRegister > DoubleRegList
Definition reglist-arm.h:15
DwVfpRegister DoubleRegister
constexpr InstanceType LAST_STRING_TYPE
RegListBase< Register > RegList
Definition reglist-arm.h:14
const uint32_t kThinStringTagBit
MemOperand FieldMemOperand(Register object, int offset)
constexpr bool IsFloatingPoint(MachineRepresentation rep)
constexpr Register kReturnRegister0
Condition NegateCondition(Condition cond)
constexpr Register kContextRegister
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool SmiValuesAre32Bits()
return value
Definition map-inl.h:893
constexpr int A
#define CHECK_GE(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
static T Copy(MaglevCompilationInfo *compilation_info, T node)
static void Copy(MaglevCompilationInfo *compilation_info, No_Copy_Helper_Implemented_For_Type< T >)
#define T1(name, string, precedence)
Definition token.cc:28