5#ifndef V8_OBJECTS_JS_FUNCTION_INL_H_
6#define V8_OBJECTS_JS_FUNCTION_INL_H_
35#include "torque-generated/src/objects/js-function-tq-inl.inc"
48 DCHECK(has_feedback_vector(cage_base));
59 return code(isolate)->checks_tiering_state();
64 if (has_initial_map() && initial_map()->IsInobjectSlackTrackingInProgress()) {
69template <
typename IsolateT>
83 DCHECK(code->is_optimized_code());
84#ifdef V8_ENABLE_LEAPTIERING
85 if (code->is_context_specialized()) {
93 code->set_js_dispatch_handle(dispatch_handle());
99 bool keep_tiering_request) {
102#ifdef V8_ENABLE_LEAPTIERING
107 set_dispatch_handle(
handle, mode);
109 if (keep_tiering_request) {
117 dispatch_handle(), TieringBuiltin::kFunctionLogNextExecution,
121 WriteCodePointerField(kCodeOffset, value);
125 feedback_vector()->set_log_next_execution(
true);
134 CHECK(!code->is_optimized_code());
140 CHECK(!code->is_optimized_code());
145#ifdef V8_ENABLE_LEAPTIERING
149 return ReadCodePointerField(kCodeOffset, isolate);
155#ifdef V8_ENABLE_LEAPTIERING
157 dispatch_handle(tag));
159 return ReadCodePointerField(kCodeOffset, isolate);
164#if V8_ENABLE_LEAPTIERING
168#elif V8_ENABLE_SANDBOX
169 return RawIndirectPointerField(kCodeOffset, kCodeIndirectPointerTag)
170 .Relaxed_Load(isolate);
178#if V8_ENABLE_LEAPTIERING
182#elif V8_ENABLE_SANDBOX
183 return RawIndirectPointerField(kCodeOffset, kCodeIndirectPointerTag)
184 .Acquire_Load(isolate);
190#ifdef V8_ENABLE_LEAPTIERING
197 DCHECK_EQ(function->raw_feedback_cell()->dispatch_handle(),
199 return AllocateAndInstallJSDispatchHandle(
200 function, kDispatchHandleOffset, isolate,
parameter_count, code, mode);
203void JSFunction::clear_dispatch_handle() {
204 WriteField<JSDispatchHandle::underlying_type>(kDispatchHandleOffset,
209 Relaxed_WriteField<JSDispatchHandle::underlying_type>(kDispatchHandleOffset,
224 ->js_dispatch_table()
225 ->SetCodeKeepTieringRequestNoWriteBarrier(
handle, new_code);
229 return JSDispatchHandle(Relaxed_ReadField<JSDispatchHandle::underlying_type>(
230 kDispatchHandleOffset));
234 return JSDispatchHandle(Acquire_ReadField<JSDispatchHandle::underlying_type>(
235 kDispatchHandleOffset));
252 kSharedFunctionInfoOffset>::Relaxed_Load(cage_base, *
this);
263#ifdef V8_ENABLE_LEAPTIERING
264 if (!has_feedback_vector())
return false;
265 return feedback_vector()->tiering_in_progress();
272#ifdef V8_ENABLE_LEAPTIERING
273 if (!has_feedback_vector())
return false;
283#ifdef V8_ENABLE_LEAPTIERING
285 dispatch_handle(), TieringBuiltin::kFunctionLogNextExecution, isolate);
287 return feedback_vector()->log_next_execution();
292#ifdef V8_ENABLE_LEAPTIERING
294 Address entrypoint = jdt->GetEntrypoint(dispatch_handle());
296#define CASE(name, ...) \
297 if (entrypoint == embedded_data.InstructionStartOf(Builtin::k##name)) { \
298 DCHECK(jdt->IsTieringRequested(dispatch_handle(), TieringBuiltin::k##name, \
300 return TieringBuiltin::k##name != \
301 TieringBuiltin::kFunctionLogNextExecution; \
303 BUILTIN_LIST_BASE_TIERING_MAGLEV(
CASE)
312#ifdef V8_ENABLE_LEAPTIERING
314 Address entrypoint = jdt->GetEntrypoint(dispatch_handle());
316#define CASE(name, ...) \
317 if (entrypoint == embedded_data.InstructionStartOf(Builtin::k##name)) { \
318 DCHECK(jdt->IsTieringRequested(dispatch_handle(), TieringBuiltin::k##name, \
320 return TieringBuiltin::k##name != \
321 TieringBuiltin::kFunctionLogNextExecution; \
323 BUILTIN_LIST_BASE_TIERING_TURBOFAN(
CASE)
337#ifdef V8_ENABLE_LEAPTIERING
339 Address entrypoint = jdt->GetEntrypoint(dispatch_handle());
341 auto builtin = ([&]() -> std::optional<TieringBuiltin> {
342#define CASE(name, ...) \
343 if (entrypoint == embedded_data.InstructionStartOf(Builtin::k##name)) { \
344 DCHECK(jdt->IsTieringRequested(dispatch_handle(), TieringBuiltin::k##name, \
346 return TieringBuiltin::k##name; \
350 DCHECK(!jdt->IsTieringRequested(dispatch_handle()));
355 case TieringBuiltin::kOptimizeMaglevEager:
358 case TieringBuiltin::kStartMaglevOptimizeJob:
361 case TieringBuiltin::kOptimizeTurbofanEager:
364 case TieringBuiltin::kStartTurbofanOptimizeJob:
367 case TieringBuiltin::kMarkLazyDeoptimized:
368 case TieringBuiltin::kMarkReoptimizeLazyDeoptimized:
369 case TieringBuiltin::kFunctionLogNextExecution:
376 return CodeKind::TURBOFAN_JS;
379 return CodeKind::MAGLEV;
384 return CodeKind::TURBOFAN_JS;
387 return CodeKind::MAGLEV;
396#ifdef V8_ENABLE_LEAPTIERING
401 feedback_vector()->reset_tiering_state();
408 if (!has_feedback_vector())
return;
409 if (osr_offset.
IsNone()) {
410#ifdef V8_ENABLE_LEAPTIERING
412 feedback_vector()->set_tiering_in_progress(in_progress);
413 if (!in_progress && was_in_progress) {
418 feedback_vector()->set_tiering_state(TieringState::kInProgress);
420 feedback_vector()->reset_tiering_state();
425 feedback_vector()->set_osr_tiering_in_progress(in_progress);
429#ifndef V8_ENABLE_LEAPTIERING
432 if (!has_feedback_vector())
return TieringState::kNone;
433 return feedback_vector()->tiering_state();
438 DCHECK(has_feedback_vector());
440 feedback_vector()->set_tiering_state(state);
446 DCHECK(has_feedback_vector());
447 return feedback_vector()->osr_tiering_in_progress();
451 return shared(cage_base)->is_compiled() &&
457 return shared()->is_compiled() &&
474 return context()->global_proxy();
478 return context()->native_context();
483 kPrototypeOrInitialMapOffset,
487 return map(cage_base)->has_prototype_slot();
496 return IsMap(prototype_or_initial_map(cage_base,
kAcquireLoad), cage_base);
501 return has_initial_map(cage_base) ||
502 !IsTheHole(prototype_or_initial_map(cage_base,
kAcquireLoad));
507 return map(cage_base)->has_non_instance_prototype() ||
508 has_instance_prototype(cage_base);
517 return !has_prototype_property(cage_base) ||
518 map(cage_base)->has_non_instance_prototype();
522 DCHECK(has_instance_prototype(cage_base));
523 if (has_initial_map(cage_base)) {
524 return initial_map(cage_base)->prototype(cage_base);
532 DCHECK(has_prototype(cage_base));
536 if (map->has_non_instance_prototype()) {
537 return map->GetNonInstancePrototype(cage_base);
539 return instance_prototype(cage_base);
543 return code(isolate,
kAcquireLoad)->builtin_id() != Builtin::kCompileLazy &&
557 if (!IsSharedFunctionInfo(maybe_shared))
return false;
560 if (!IsCode(maybe_code))
return false;
564 return !shared->is_compiled() &&
565 (code->builtin_id() != Builtin::kCompileLazy ||
572 return code(isolate)->kind() == CodeKind::BASELINE &&
573 !
shared()->HasBaselineCode();
580 gc_notify_updated_slot) {
581 const bool kBytecodeCanFlush =
583 const bool kBaselineCodeCanFlush =
585 if (!kBytecodeCanFlush && !kBaselineCodeCanFlush)
return;
598 kBaselineCodeCanFlush);
#define BUILTIN_LIST_BASE_TIERING(TFC)
#define BUILTIN_CODE(isolate, name)
constexpr UnderlyingType & value() &
constexpr bool IsNone() const
static EmbeddedData FromBlob()
static IsolateGroup * current()
void UpdateCodeImpl(Tagged< Code > code, WriteBarrierMode mode, bool keep_tiering_request)
void UpdateDispatchEntryKeepTieringRequest(Tagged< Code > new_code, WriteBarrierMode mode=WriteBarrierMode::UPDATE_WRITE_BARRIER)
Tagged< Context > context()
TieringState tiering_state() const
bool tiering_in_progress() const
Address instruction_start(IsolateForSandbox isolate) const
bool IsMaglevRequested(Isolate *isolate) const
void UpdateCodeKeepTieringRequests(Tagged< Code > code, WriteBarrierMode mode=WriteBarrierMode::UPDATE_WRITE_BARRIER)
bool NeedsResetDueToFlushedBaselineCode(IsolateForSandbox isolate)
void set_tiering_state(IsolateForSandbox isolate, TieringState state)
Tagged< Object > raw_code(IsolateForSandbox isolate) const
bool has_closure_feedback_cell_array() const
void UpdateOptimizedCode(Isolate *isolate, Tagged< Code > code, WriteBarrierMode mode=WriteBarrierMode::UPDATE_WRITE_BARRIER)
bool is_compiled(IsolateForSandbox isolate) const
bool IsTurbofanRequested(Isolate *isolate) const
void UpdateDispatchEntry(Tagged< Code > new_code, WriteBarrierMode mode=WriteBarrierMode::UPDATE_WRITE_BARRIER)
Tagged< ClosureFeedbackCellArray > closure_feedback_cell_array() const
bool osr_tiering_in_progress()
Tagged< NativeContext > native_context()
void SetInterruptBudget(Isolate *isolate, BudgetModification kind, std::optional< CodeKind > override_active_tier={})
bool IsLoggingRequested(Isolate *isolate) const
void ResetTieringRequests()
bool IsTieringRequestedOrInProgress() const
void SetTieringInProgress(bool in_progress, BytecodeOffset osr_offset=BytecodeOffset::None())
bool ChecksTieringState(IsolateForSandbox isolate)
Tagged< JSGlobalProxy > global_proxy()
V8_INLINE std::optional< CodeKind > GetRequestedOptimizationIfAny(Isolate *isolate, ConcurrencyMode mode=ConcurrencyMode::kConcurrent) const
bool IsOptimizationRequested(Isolate *isolate) const
void ResetIfCodeFlushed(Isolate *isolate, std::optional< std::function< void(Tagged< HeapObject > object, ObjectSlot slot, Tagged< HeapObject > target)> > gc_notify_updated_slot=std::nullopt)
void CompleteInobjectSlackTrackingIfActive()
bool NeedsResetDueToFlushedBytecode(Isolate *isolate)
V8_EXPORT_PRIVATE bool ActiveTierIsIgnition(IsolateForSandbox isolate) const
Tagged< AbstractCode > abstract_code(IsolateT *isolate)
void UpdateCode(Tagged< Code > code, WriteBarrierMode mode=WriteBarrierMode::UPDATE_WRITE_BARRIER)
static void CompleteInobjectSlackTracking(Isolate *isolate, Tagged< Map > initial_map)
static constexpr Tagged< Smi > zero()
static PtrType load(Tagged< HeapObject > host, int offset=0)
static PtrType Relaxed_Load(Tagged< HeapObject > host, int offset=0)
#define V8_ENABLE_LEAPTIERING_BOOL
SharedFunctionInfoRef shared
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
bool IsNone(Tagged< FieldType > obj)
Tagged(T object) -> Tagged< T >
bool IsGeneratorFunction(FunctionKind kind)
base::StrongAlias< JSDispatchHandleAliasTag, uint32_t > JSDispatchHandle
constexpr bool IsRequestTurbofan(TieringState state)
constexpr JSDispatchHandle kNullJSDispatchHandle(0)
typename detail::FlattenUnionHelper< Union<>, Ts... >::type UnionOf
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr bool IsRequestMaglev(TieringState state)
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
static constexpr RelaxedLoadTag kRelaxedLoad
static constexpr AcquireLoadTag kAcquireLoad
#define CONDITIONAL_JS_DISPATCH_HANDLE_WRITE_BARRIER(object, handle, mode)
#define ACCESSORS(holder, name, type, offset)
#define RELEASE_ACQUIRE_ACCESSORS_CHECKED(holder, name, type, offset, condition)
#define RELAXED_READ_FIELD(p, offset)
#define DEF_RELAXED_GETTER(holder, name,...)
#define TQ_OBJECT_CONSTRUCTORS_IMPL(Type)
#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode)
#define CONDITIONAL_CODE_POINTER_WRITE_BARRIER(object, offset, value, mode)
#define RELEASE_ACQUIRE_ACCESSORS(holder, name, type, offset)
#define ACQUIRE_READ_FIELD(p, offset)
#define RELEASE_WRITE_FIELD(p, offset, value)
#define DEF_GETTER(Camel, Lower, Bit)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define V8_LIKELY(condition)
#define V8_UNLIKELY(condition)