17 __ SubWord(
object,
object,
Operand(size_in_bytes));
31 DCHECK(masm->allow_allocate());
51 __ LoadWord(
object,
__ ExternalReferenceAsOperand(top, scratch));
52 __ AddWord(new_top,
object,
Operand(size_in_bytes));
53 __ LoadWord(scratch,
__ ExternalReferenceAsOperand(limit, scratch));
64 register_snapshot, object, alloc_type, size_in_bytes, done),
68 __ Move(
__ ExternalReferenceAsOperand(top, scratch), new_top);
78 alloc_type, alignment);
82 Register
object, Register size_in_bytes,
86 alloc_type, alignment);
91 CHECK(!graph->has_recursive_calls());
93 uint32_t source_frame_size =
94 graph->min_maglev_stackslots_for_unoptimized_frame_size();
97 MaglevAssembler::TemporaryRegisterScope temps(
this);
98 Register scratch = temps.AcquireScratch();
99 int32_t expected_osr_stack_size =
102 AddWord(scratch, sp, Operand(expected_osr_stack_size));
107 uint32_t target_frame_size =
108 graph->tagged_stack_slots() + graph->untagged_stack_slots();
110 CHECK_LE(source_frame_size, target_frame_size);
111 if (source_frame_size < target_frame_size) {
113 uint32_t additional_tagged =
114 source_frame_size < graph->tagged_stack_slots()
115 ? graph->tagged_stack_slots() - source_frame_size
117 for (
size_t i = 0;
i < additional_tagged; ++
i) {
120 uint32_t size_so_far = source_frame_size + additional_tagged;
121 CHECK_LE(size_so_far, target_frame_size);
122 if (size_so_far < target_frame_size) {
131 MaglevAssembler::TemporaryRegisterScope temps(
this);
139 temps.Include({s7, s8});
146 if (graph->has_recursive_calls()) {
151#ifndef V8_ENABLE_LEAPTIERING
153 using D = MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor;
154 Register flags = D::GetRegisterParameter(D::kFlags);
155 Register feedback_vector = D::GetRegisterParameter(D::kFeedbackVector);
157 flags, feedback_vector,
163 DCHECK(!temps.Available().has(flags));
164 DCHECK(!temps.Available().has(feedback_vector));
165 Move(feedback_vector,
167 Label needs_processing, done;
169 flags, feedback_vector, CodeKind::MAGLEV, &needs_processing);
171 bind(&needs_processing);
172 TailCallBuiltin(Builtin::kMaglevOptimizeCodeOrTailCallOptimizedCodeSlot);
187 if (graph->tagged_stack_slots() > 0) {
192 const int kLoopUnrollSize = 8;
204 for (
int i = 0;
i < first_slots; ++
i) {
207 MaglevAssembler::TemporaryRegisterScope temps(
this);
208 Register count = temps.AcquireScratch();
215 for (
int i = 0;
i < kLoopUnrollSize; ++
i) {
218 Sub64(count, count, Operand(1));
222 if (graph->untagged_stack_slots() > 0) {
230 Label* eager_deopt_entry,
231 size_t lazy_deopt_count,
232 Label* lazy_deopt_entry) {
237 MaglevAssembler::TemporaryRegisterScope scope(
this);
238 Register scratch = scope.AcquireScratch();
239 if (eager_deopt_count > 0) {
240 bind(eager_deopt_entry);
244 if (lazy_deopt_count > 0) {
245 bind(lazy_deopt_entry);
260 LoadRoot(table, RootIndex::kSingleCharacterStringTable);
266 Label* char_code_fits_one_byte,
267 Register
result, Register char_code,
269 CharCodeMaskMode mask_mode) {
270 ZeroExtendWord(char_code, char_code);
273 ZoneLabelRef done(
this);
275 And(char_code, char_code, Operand(0xFFFF));
281 ZoneLabelRef done, Register
result, Register char_code,
283 MaglevAssembler::TemporaryRegisterScope temps(masm);
286 const bool need_restore_result = (scratch ==
result);
288 need_restore_result ? temps.AcquireScratch() :
result;
291 if (char_code ==
result) {
292 __ Move(scratch, char_code);
295 DCHECK(char_code !=
string);
296 DCHECK(scratch !=
string);
297 DCHECK(!register_snapshot.live_tagged_registers.has(char_code));
298 register_snapshot.live_registers.set(char_code);
300 __ And(scratch, char_code, Operand(0xFFFF));
303 if (need_restore_result) {
308 register_snapshot, done,
result, char_code, scratch),
311 if (char_code_fits_one_byte !=
nullptr) {
312 bind(char_code_fits_one_byte);
321 constexpr Register flags = MaglevAssembler::GetFlagsRegister();
322 Label ConditionMet, Done;
326 std::optional<RootIndex> expected =
329 li(scratch2, expected_ptr);
330 Sll32(scratch2, scratch2, Operand(0));
348 RegisterSnapshot& register_snapshot, Register
result, Register
string,
349 Register index, Register instance_type, [[maybe_unused]] Register scratch2,
350 Label* result_fits_one_byte) {
351 ZoneLabelRef done(
this);
359 RegisterSnapshot register_snapshot, ZoneLabelRef done, Register
result,
360 Register
string, Register index) {
362 DCHECK(!register_snapshot.live_registers.has(
string));
363 DCHECK(!register_snapshot.live_registers.has(index));
365 SaveRegisterStateForCall save_register_state(masm, register_snapshot);
378 save_register_state.DefineSafepoint();
396 AbortReason::kUnexpectedValue);
407 MaglevAssembler::TemporaryRegisterScope temps(
this);
408 Register representation = temps.AcquireScratch();
429 bind(&sliced_string);
431 MaglevAssembler::TemporaryRegisterScope temps(
this);
435 offsetof(SlicedString,
offset_));
437 Add32(index, index, Operand(
offset));
445 Register second_string = instance_type;
447 offsetof(ConsString, second_));
449 RootIndex::kempty_string);
458 Label two_byte_string;
465 AddWord(
result,
string, Operand(index));
470 bind(&two_byte_string);
475 Sll32(scaled_index, index, Operand(1));
476 AddWord(
result,
string, Operand(scaled_index));
481 Register first_code_point = scratch;
482 And(first_code_point,
result, Operand(0xfc00));
488 Add32(index, index, Operand(1));
492 Register second_code_point = scratch;
493 Sll32(second_code_point, index, Operand(1));
494 AddWord(second_code_point,
string, second_code_point);
495 Lhu(second_code_point,
501 And(scratch2, second_code_point, Operand(0xfc00));
505 int surrogate_offset = 0x10000 - (0xd800 << 10) - 0xdc00;
506 Add32(second_code_point, second_code_point, Operand(surrogate_offset));
520 Li(
string, 0xdeadbeef);
523 Li(index, 0xdeadbeef);
529 ZoneLabelRef done(
this);
546 ZeroExtendWord(dst, dst);
551 MaglevAssembler::TemporaryRegisterScope temps(
this);
553 Register rcmp = temps.AcquireScratch();
563 fail,
eq, rcmp, Operand(zero_reg));
582 MaglevAssembler::TemporaryRegisterScope temps(
this);
584 Register rcmp = temps.AcquireScratch();
610 Label* success, Label* fail) {
611 MaglevAssembler::TemporaryRegisterScope temps(
this);
613 Register rcmp = temps.AcquireScratch();
Simd128Register Simd128Register ra
void ForceConstantPoolEmissionWithoutJump()
static V8_EXPORT_PRIVATE const int kEagerDeoptExitSize
static V8_EXPORT_PRIVATE const int kLazyDeoptExitSize
Tagged_t ReadOnlyRootPtr(RootIndex index)
void Lbu(Register rd, const MemOperand &rs)
void Sh(Register rd, const MemOperand &rs)
void IsObjectType(Register heap_object, Register scratch1, Register scratch2, InstanceType type)
void SmiUntag(Register reg, SBit s=LeaveCC)
void LoadEntryFromBuiltin(Builtin builtin, Register destination)
void CompareRoot(Register obj, RootIndex index)
void Move(Register dst, Tagged< Smi > smi)
void Assert(Condition cond, AbortReason reason) NOOP_UNLESS_DEBUG_CODE
void BranchShort(Label *label, Condition cond, Register r1, const Operand &r2, bool need_link=false)
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(Register flags, Register feedback_vector, CodeKind current_code_kind, Label *flags_need_processing)
void Trunc_w_d(FPURegister fd, FPURegister fs)
void BailoutIfDeoptimized()
void SmiTag(Register reg, SBit s=LeaveCC)
void CompareObjectTypeAndJump(Register heap_object, Register map, Register type_reg, InstanceType type, Condition cond, Label *target, Label::Distance distance)
void SbxCheck(Condition cc, AbortReason reason)
void StoreDouble(FPURegister fs, const MemOperand &dst, Trapper &&trapper=[](int){})
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void Lhu(Register rd, const MemOperand &rs)
void Jump(Register target, Condition cond=al)
void LoadRoot(Register destination, RootIndex index) final
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg=false)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
void Cvt_d_w(FPURegister fd, Register rs)
void Lw(Register rd, const MemOperand &rs)
void LoadCompressedMap(Register dst, Register object)
void Cvt_d_uw(FPURegister fd, FPURegister fs)
void Check(Condition cond, AbortReason reason)
void AllocateStackSpace(Register bytes)
void AssertZeroExtended(Register int32_register)
void Branch(Label *label, bool need_link=false)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd=FCC0)
void TailCallBuiltin(Builtin builtin, Condition cond=al)
void BindCallTarget(Label *label)
static constexpr int kFixedFrameSizeFromFp
static const int32_t kMaxOneByteCharCode
Register AcquireScratch()
void LoadSingleCharacterString(Register result, int char_code)
void LoadAndUntagTaggedSignedField(Register result, Register object, int offset)
MaglevAssembler(Isolate *isolate, Zone *zone, MaglevCodeGenState *code_gen_state)
void AssertObjectTypeInRange(Register heap_object, InstanceType lower_limit, InstanceType higher_limit, AbortReason reason)
void StringFromCharCode(RegisterSnapshot register_snapshot, Label *char_code_fits_one_byte, Register result, Register char_code, Register scratch, CharCodeMaskMode mask_mode)
void Jump(Label *target, Label::Distance distance=Label::kFar)
void CallRuntime(Runtime::FunctionId fid)
void LoadInstanceType(Register instance_type, Register heap_object)
void Allocate(RegisterSnapshot register_snapshot, Register result, int size_in_bytes, AllocationType alloc_type=AllocationType::kYoung, AllocationAlignment alignment=kTaggedAligned)
void TryTruncateDoubleToUint32(Register dst, DoubleRegister src, Label *fail)
void StringCharCodeOrCodePointAt(BuiltinStringPrototypeCharCodeOrCodePointAt::Mode mode, RegisterSnapshot ®ister_snapshot, Register result, Register string, Register index, Register scratch1, Register scratch2, Label *result_fits_one_byte)
void AllocateTwoByteString(RegisterSnapshot register_snapshot, Register result, int length)
void Move(StackSlot dst, Register src)
void CallBuiltin(Builtin builtin)
void TryTruncateDoubleToInt32(Register dst, DoubleRegister src, Label *fail)
Label * MakeDeferredCode(Function &&deferred_code_gen, Args &&... args)
void TruncateDoubleToInt32(Register dst, DoubleRegister src)
void MaybeEmitDeoptBuiltinsCall(size_t eager_deopt_count, Label *eager_deopt_entry, size_t lazy_deopt_count, Label *lazy_deopt_entry)
MaglevCompilationInfo * compilation_info() const
MaglevCodeGenState * code_gen_state() const
void OSRPrologue(Graph *graph)
void LoadTaggedFieldByIndex(Register result, Register object, Register index, int scale, int offset)
void TryChangeFloat64ToIndex(Register result, DoubleRegister value, Label *success, Label *fail)
void JumpIf(Condition cond, Label *target, Label::Distance distance=Label::kFar)
void LoadTaggedFieldWithoutDecompressing(Register result, Register object, int offset)
void LoadTaggedField(Register result, MemOperand operand)
#define ASM_CODE_COMMENT_STRING(asm,...)
#define ASM_CODE_COMMENT(asm)
ZoneVector< RpoNumber > & result
V8_INLINE constexpr std::optional< RootIndex > UniqueMapOfInstanceType(InstanceType type)
void AllocateSlow(MaglevAssembler *masm, RegisterSnapshot register_snapshot, Register object, Builtin builtin, T size_in_bytes, ZoneLabelRef done)
Builtin AllocateBuiltin(AllocationType alloc_type)
ExternalReference SpaceAllocationTopAddress(Isolate *isolate, AllocationType alloc_type)
void SubSizeAndTagObject(MaglevAssembler *masm, Register object, Register size_in_bytes)
void AllocateRaw(MaglevAssembler *masm, Isolate *isolate, RegisterSnapshot register_snapshot, Register object, T size_in_bytes, AllocationType alloc_type, AllocationAlignment alignment)
ExternalReference SpaceAllocationLimitAddress(Isolate *isolate, AllocationType alloc_type)
const uint32_t kStringEncodingMask
constexpr int kTaggedSize
DwVfpRegister DoubleRegister
const uint32_t kTwoByteStringTag
constexpr InstanceType LAST_STRING_TYPE
constexpr Register kJavaScriptCallArgCountRegister
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
MemOperand FieldMemOperand(Register object, int offset)
constexpr int kSystemPointerSize
constexpr Register kReturnRegister0
const uint32_t kStringRepresentationMask
constexpr Register kContextRegister
V8_EXPORT_PRIVATE bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kDoubleSize
constexpr Register kJavaScriptCallNewTargetRegister
constexpr Register kJSFunctionRegister
template const char * string
BytecodeSequenceNode * parent_
#define CHECK_LE(lhs, rhs)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
#define OFFSET_OF_DATA_START(Type)
#define V8_STATIC_ROOTS_BOOL