19 Register size_in_bytes) {
20 __ Sub(
object,
object, size_in_bytes);
30void AllocateRaw(MaglevAssembler* masm, Isolate* isolate,
31 RegisterSnapshot register_snapshot, Register
object,
36 DCHECK(masm->allow_allocate());
43 ZoneLabelRef done(masm);
44 MaglevAssembler::TemporaryRegisterScope temps(masm);
45 Register scratch = temps.AcquireScratch();
51 Register new_top = object;
53 __ Ldr(
object,
__ ExternalReferenceAsOperand(top, scratch));
54 __ Add(new_top,
object, size_in_bytes);
55 __ Ldr(scratch,
__ ExternalReferenceAsOperand(limit, scratch));
56 __ Cmp(new_top, scratch);
62 __ Move(
__ ExternalReferenceAsOperand(top, scratch), new_top);
69 Register
object,
int size_in_bytes,
73 alloc_type, alignment);
77 Register
object, Register size_in_bytes,
81 alloc_type, alignment);
86 CHECK(!graph->has_recursive_calls());
88 uint32_t source_frame_size =
89 graph->min_maglev_stackslots_for_unoptimized_frame_size();
92 if (source_frame_size % 2 == 0) source_frame_size++;
95 TemporaryRegisterScope temps(
this);
96 Register scratch = temps.AcquireScratch();
101 SbxCheck(
eq, AbortReason::kOsrUnexpectedStackSize);
104 uint32_t target_frame_size =
105 graph->tagged_stack_slots() + graph->untagged_stack_slots();
107 CHECK_LE(source_frame_size, target_frame_size);
108 if (source_frame_size < target_frame_size) {
110 uint32_t additional_tagged =
111 source_frame_size < graph->tagged_stack_slots()
112 ? graph->tagged_stack_slots() - source_frame_size
114 uint32_t additional_tagged_double =
115 additional_tagged / 2 + additional_tagged % 2;
116 for (
size_t i = 0;
i < additional_tagged_double; ++
i) {
119 uint32_t size_so_far = source_frame_size + additional_tagged_double * 2;
120 CHECK_LE(size_so_far, target_frame_size);
121 if (size_so_far < target_frame_size) {
129 TemporaryRegisterScope temps(
this);
137 temps.Include({x14, x15});
144 if (graph->has_recursive_calls()) {
148#ifndef V8_ENABLE_LEAPTIERING
151 using D = MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor;
152 Register flags = D::GetRegisterParameter(D::kFlags);
153 Register feedback_vector = D::GetRegisterParameter(D::kFeedbackVector);
158 DCHECK(!temps.Available().has(flags));
159 DCHECK(!temps.Available().has(feedback_vector));
160 Move(feedback_vector,
165 TailCallBuiltin(Builtin::kMaglevOptimizeCodeOrTailCallOptimizedCodeSlot,
183 if (graph->tagged_stack_slots() > 0) {
189 int tagged_two_slots_count = graph->tagged_stack_slots() / 2;
190 remaining_stack_slots -= 2 * tagged_two_slots_count;
194 const int kLoopUnrollSize = 8;
195 if (tagged_two_slots_count < kLoopUnrollSize) {
196 for (
int i = 0;
i < tagged_two_slots_count;
i++) {
200 TemporaryRegisterScope temps(
this);
201 Register count = temps.AcquireScratch();
203 int first_slots = tagged_two_slots_count % kLoopUnrollSize;
204 for (
int i = 0;
i < first_slots; ++
i) {
207 Move(count, tagged_two_slots_count / kLoopUnrollSize);
210 DCHECK_GT(tagged_two_slots_count / kLoopUnrollSize, 0);
213 for (
int i = 0;
i < kLoopUnrollSize; ++
i) {
216 Subs(count, count, Immediate(1));
220 if (remaining_stack_slots > 0) {
222 remaining_stack_slots += (remaining_stack_slots % 2);
230 Label* eager_deopt_entry,
231 size_t lazy_deopt_count,
232 Label* lazy_deopt_entry) {
236 size_t deopt_count = eager_deopt_count + lazy_deopt_count;
241 TemporaryRegisterScope scope(
this);
242 Register scratch = scope.AcquireScratch();
243 if (eager_deopt_count > 0) {
244 Bind(eager_deopt_entry);
248 if (lazy_deopt_count > 0) {
249 Bind(lazy_deopt_entry);
261 Assert(
ls, AbortReason::kUnexpectedValue);
264 LoadRoot(table, RootIndex::kSingleCharacterStringTable);
270 Label* char_code_fits_one_byte,
271 Register
result, Register char_code,
273 CharCodeMaskMode mask_mode) {
276 ZoneLabelRef done(
this);
278 And(char_code, char_code, Immediate(0xFFFF));
284 ZoneLabelRef done, Register
result, Register char_code,
291 if (char_code.Aliases(
result)) {
292 __ Move(scratch, char_code);
296 DCHECK(!register_snapshot.live_tagged_registers.has(char_code));
297 register_snapshot.live_registers.set(char_code);
304 register_snapshot, done,
result, char_code, scratch);
305 if (char_code_fits_one_byte !=
nullptr) {
306 bind(char_code_fits_one_byte);
314 RegisterSnapshot& register_snapshot, Register
result, Register
string,
315 Register index, Register scratch1, Register scratch2,
316 Label* result_fits_one_byte) {
317 ZoneLabelRef done(
this);
325 RegisterSnapshot register_snapshot, ZoneLabelRef done, Register
result,
326 Register
string, Register index) {
328 DCHECK(!register_snapshot.live_registers.has(
string));
329 DCHECK(!register_snapshot.live_registers.has(index));
331 SaveRegisterStateForCall save_register_state(masm, register_snapshot);
344 save_register_state.DefineSafepoint();
360 AbortReason::kUnexpectedValue);
363 Cmp(index.W(), scratch1.W());
364 Check(
lo, AbortReason::kUnexpectedValue);
367#if V8_STATIC_ROOTS_BOOL
377#if V8_STATIC_ROOTS_BOOL
378 using StringTypeRange = InstanceTypeChecker::kUniqueMapRangeOfStringType;
381 static_assert(StringTypeRange::kSeqString.first == 0);
385 static_assert(StringTypeRange::kSeqString.second + Map::kSize ==
386 StringTypeRange::kExternalString.first);
391 static_assert(StringTypeRange::kExternalString.second + Map::kSize ==
392 StringTypeRange::kConsString.first);
396 static_assert(StringTypeRange::kConsString.second + Map::kSize ==
397 StringTypeRange::kSlicedString.first);
401 static_assert(StringTypeRange::kSlicedString.second + Map::kSize ==
402 StringTypeRange::kThinString.first);
404 static_assert(StringTypeRange::kThinString.second ==
405 InstanceTypeChecker::kStringMapUpperBound);
408 TemporaryRegisterScope temps(
this);
409 Register representation = temps.AcquireScratch().W();
412 And(representation, instance_type.W(),
421 deferred_runtime_call);
432 bind(&sliced_string);
434 TemporaryRegisterScope temps(
this);
438 offsetof(SlicedString,
offset_));
450 offsetof(ConsString, second_));
451 CompareRoot(second_string, RootIndex::kempty_string);
452 B(deferred_runtime_call,
ne);
459 Label two_byte_string;
460#if V8_STATIC_ROOTS_BOOL
461 if (InstanceTypeChecker::kTwoByteStringMapBit == 0) {
463 InstanceTypeChecker::kStringMapEncodingMask,
477 B(result_fits_one_byte);
479 bind(&two_byte_string);
482 Lsl(scratch, index, 1);
483 Add(scratch, scratch,
493 string_backup = scratch2;
494 Mov(string_backup,
string);
498 Register first_code_point = scratch;
499 And(first_code_point.W(),
result.W(), Immediate(0xfc00));
505 Add(index.W(), index.W(), Immediate(1));
508 Register second_code_point = scratch;
509 Lsl(index, index, 1);
512 Ldrh(second_code_point,
MemOperand(string_backup, index));
516 And(scratch2.W(), second_code_point.W(), Immediate(0xfc00));
519 int surrogate_offset = 0x10000 - (0xd800 << 10) - 0xdc00;
520 Add(second_code_point, second_code_point, Immediate(surrogate_offset));
534 Mov(
string, Immediate(0xdeadbeef));
537 Mov(index, Immediate(0xdeadbeef));
548 ZoneLabelRef done(
this);
574 DCHECK_EQ(xzr.SizeInBytes(), src.SizeInBytes());
582 Mov(dst, Operand(dst.W(),
UXTW));
587 TemporaryRegisterScope temps(
this);
593 Scvtf(converted_back, dst.W());
596 Fcmp(src, converted_back);
601 Cbnz(dst, &check_done);
604 Register input_bits = temps.AcquireScratch();
605 Fmov(input_bits, src);
606 Cbnz(input_bits, fail);
614 TemporaryRegisterScope temps(
this);
620 Ucvtf(converted_back, dst);
623 Fcmp(src, converted_back);
628 Cbnz(dst, &check_done);
631 Register input_bits = temps.AcquireScratch();
632 Fmov(input_bits, src);
633 Cbnz(input_bits, fail);
640 Label* success, Label* fail) {
641 TemporaryRegisterScope temps(
this);
649 Fcmp(value, converted_back);
#define Assert(condition)
void ForceConstantPoolEmissionWithoutJump()
void CheckVeneerPool(bool force_emit, bool require_jump, size_t margin=kVeneerDistanceMargin)
static bool IsSupported(CpuFeature f)
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
void Cmp(const Register &rn, int imm)
void Drop(int count, Condition cond=al)
void Add(const Register &rd, const Register &rn, const Operand &operand)
void SmiUntag(Register reg, SBit s=LeaveCC)
void Bind(Label *label, BranchTargetIdentifier id=BranchTargetIdentifier::kNone)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void Fcvtzu(const Register &rd, const VRegister &fn)
void CompareAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void Fmov(VRegister fd, VRegister fn)
void CompareRoot(Register obj, RootIndex index)
void TestAndBranchIfAllClear(const Register ®, const uint64_t bit_pattern, Label *label)
void BailoutIfDeoptimized()
void Scvtf(const VRegister &fd, const Register &rn, unsigned fbits=0)
void Lsl(const Register &rd, const Register &rn, unsigned shift)
void SmiTag(Register reg, SBit s=LeaveCC)
void SbxCheck(Condition cc, AbortReason reason)
void Fjcvtzs(const Register &rd, const VRegister &vn)
void Ldr(const CPURegister &rt, const Operand &imm)
void Mov(const Register &rd, const Operand &operand, DiscardMoveMode discard_mode=kDontDiscardForSameWReg)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void Jump(Register target, Condition cond=al)
void LoadRoot(Register destination, RootIndex index) final
void Fcmp(const VRegister &fn, const VRegister &fm)
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void Ucvtf(const VRegister &fd, const Register &rn, unsigned fbits=0)
void Subs(const Register &rd, const Register &rn, const Operand &operand)
Condition LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind)
void Fcvtzs(const Register &rd, const VRegister &fn)
void Check(Condition cond, AbortReason reason)
void AssertZeroExtended(Register int32_register)
void Ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void Sub(const Register &rd, const Register &rn, const Operand &operand)
void Cbnz(const Register &rt, Label *label)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void BindCallTarget(Label *label)
static constexpr int kFixedFrameSizeFromFp
static constexpr int kFixedSlotCount
static const int32_t kMaxOneByteCharCode
void LoadSingleCharacterString(Register result, int char_code)
void LoadAndUntagTaggedSignedField(Register result, Register object, int offset)
MaglevAssembler(Isolate *isolate, Zone *zone, MaglevCodeGenState *code_gen_state)
void AssertObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, AbortReason reason)
void StringFromCharCode(RegisterSnapshot register_snapshot, Label *char_code_fits_one_byte, Register result, Register char_code, Register scratch, CharCodeMaskMode mask_mode)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId fid)
void LoadInstanceType(Register instance_type, Register heap_object)
void Allocate(RegisterSnapshot register_snapshot, Register result, int size_in_bytes, AllocationType alloc_type=AllocationType::kYoung, AllocationAlignment alignment=kTaggedAligned)
void LoadMapForCompare(Register dst, Register obj)
void TryTruncateDoubleToUint32(Register dst, DoubleRegister src, Label *fail)
void StringCharCodeOrCodePointAt(BuiltinStringPrototypeCharCodeOrCodePointAt::Mode mode, RegisterSnapshot ®ister_snapshot, Register result, Register string, Register index, Register scratch1, Register scratch2, Label *result_fits_one_byte)
void AllocateTwoByteString(RegisterSnapshot register_snapshot, Register result, int length)
void Move(StackSlot dst, Register src)
void CallBuiltin(Builtin builtin)
void TryTruncateDoubleToInt32(Register dst, DoubleRegister src, Label *fail)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void JumpToDeferredIf(Condition cond, Function &&deferred_code_gen, Args &&... args)
void TruncateDoubleToInt32(Register dst, DoubleRegister src)
void MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count, Label *eager_deopt_entry, size_t lazy_deopt_count, Label *lazy_deopt_entry)
MaglevCompilationInfo * compilation_info() const
void TestInt32AndJumpIfAllClear(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond, Label *target, Label::Distance distance=Label::kFar)
MaglevCodeGenState * code_gen_state() const
void OSRPrologue(Graph *graph)
void LoadTaggedFieldByIndex(Register result, Register object, Register index, int scale, int offset)
void TryChangeFloat64ToIndex(Register result, DoubleRegister value, Label *success, Label *fail)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void LoadTaggedFieldWithoutDecompressing(Register result, Register object, int offset)
void LoadTaggedField(Register result, MemOperand operand)
void TestInt32AndJumpIfAnySet(Register r1, int32_t mask, Label *target, Label::Distance distance=Label::kFar)
#define ASM_CODE_COMMENT_STRING(asm,...)
#define V8_ENABLE_SANDBOX_BOOL
ZoneVector< RpoNumber > & result
void AllocateSlow(MaglevAssembler *masm, RegisterSnapshot register_snapshot, Register object, Builtin builtin, T size_in_bytes, ZoneLabelRef done)
Builtin AllocateBuiltin(AllocationType alloc_type)
ExternalReference SpaceAllocationTopAddress(Isolate *isolate, AllocationType alloc_type)
void SubSizeAndTagObject(MaglevAssembler *masm, Register object, Register size_in_bytes)
void AllocateRaw(MaglevAssembler *masm, Isolate *isolate, RegisterSnapshot register_snapshot, Register object, T size_in_bytes, AllocationType alloc_type, AllocationAlignment alignment)
ExternalReference SpaceAllocationLimitAddress(Isolate *isolate, AllocationType alloc_type)
constexpr int kTaggedSize
@ kUnsignedGreaterThanEqual
DwVfpRegister DoubleRegister
constexpr InstanceType LAST_STRING_TYPE
constexpr Register kJavaScriptCallArgCountRegister
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
const uint32_t kOneByteStringTag
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
constexpr Register kReturnRegister0
const uint32_t kStringRepresentationMask
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr Register kJavaScriptCallDispatchHandleRegister
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
template const char * string
BytecodeSequenceNode * parent_
#define CHECK_LE(lhs, rhs)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
#define OFFSET_OF_DATA_START(Type)