v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
feedback-vector-inl.h
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_OBJECTS_FEEDBACK_VECTOR_INL_H_
6#define V8_OBJECTS_FEEDBACK_VECTOR_INL_H_
7
9// Include the non-inl header before the rest of the headers.
10
11#include <optional>
12
13#include "src/common/globals.h"
19#include "src/objects/smi.h"
20#include "src/objects/tagged.h"
21#include "src/roots/roots-inl.h"
24
25// Has to be the last include (doesn't have include guards):
27
28namespace v8::internal {
29
30#include "torque-generated/src/objects/feedback-vector-tq-inl.inc"
31
32TQ_OBJECT_CONSTRUCTORS_IMPL(FeedbackVector)
33OBJECT_CONSTRUCTORS_IMPL(FeedbackMetadata, HeapObject)
34
35NEVER_READ_ONLY_SPACE_IMPL(FeedbackVector)
36NEVER_READ_ONLY_SPACE_IMPL(ClosureFeedbackCellArray)
37
38INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset)
39
40INT32_ACCESSORS(FeedbackMetadata, create_closure_slot_count,
41 kCreateClosureSlotCountOffset)
42
43int32_t FeedbackMetadata::slot_count(AcquireLoadTag) const {
44 return ACQUIRE_READ_INT32_FIELD(*this, kSlotCountOffset);
45}
46
48 return ACQUIRE_READ_INT32_FIELD(*this, kCreateClosureSlotCountOffset);
49}
50
51int32_t FeedbackMetadata::get(int index) const {
52 CHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(word_count()));
53 int offset = kHeaderSize + index * kInt32Size;
55}
56
57void FeedbackMetadata::set(int index, int32_t value) {
58 DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(word_count()));
59 int offset = kHeaderSize + index * kInt32Size;
61}
62
63#ifndef V8_ENABLE_LEAPTIERING
64// static
66 CodeKind code_kind) {
67 DCHECK(CodeKindCanTierUp(code_kind));
71 if (code_kind != CodeKind::MAGLEV) {
73 }
74 return flag_mask;
75}
76#endif // !V8_ENABLE_LEAPTIERING
77
80 return slot_count() == 0;
81}
82
87
91
127
128bool FeedbackVector::is_empty() const { return length() == 0; }
129
131 return shared_function_info(cage_base)->feedback_metadata(cage_base);
132}
133
135 return shared_function_info(cage_base)->feedback_metadata(cage_base,
137}
138
140 kInvocationCountOffset)
141
143 set_invocation_count(0, tag);
144}
145
146RELAXED_UINT8_ACCESSORS(FeedbackVector, invocation_count_before_stable,
147 kInvocationCountBeforeStableOffset)
148
149int FeedbackVector::osr_urgency() const {
150 return OsrUrgencyBits::decode(osr_state());
151}
152
153void FeedbackVector::set_osr_urgency(int urgency) {
154 DCHECK(0 <= urgency && urgency <= FeedbackVector::kMaxOsrUrgency);
155 static_assert(FeedbackVector::kMaxOsrUrgency <= OsrUrgencyBits::kMax);
156 set_osr_state(OsrUrgencyBits::update(osr_state(), urgency));
157}
158
160
164
165void FeedbackVector::reset_osr_state() { set_osr_state(0); }
166
170
172 return MaybeHasMaglevOsrCodeBit::decode(osr_state());
173}
174
176 return MaybeHasTurbofanOsrCodeBit::decode(osr_state());
177}
178
180 CodeKind code_kind) {
181 if (code_kind == CodeKind::MAGLEV) {
182 CHECK(v8_flags.maglev_osr);
183 set_osr_state(MaybeHasMaglevOsrCodeBit::update(osr_state(), value));
184 } else {
185 CHECK_EQ(code_kind, CodeKind::TURBOFAN_JS);
186 set_osr_state(MaybeHasTurbofanOsrCodeBit::update(osr_state(), value));
187 }
188}
189
191 return InterruptBudgetResetByIcChangeBit::decode(flags());
192}
193
195 set_flags(InterruptBudgetResetByIcChangeBit::update(flags(), value));
196}
197
199 return invocation_count_before_stable(kRelaxedLoad) ==
201}
202
207
208#ifdef V8_ENABLE_LEAPTIERING
209
210bool FeedbackVector::tiering_in_progress() const {
211 return TieringInProgressBit::decode(flags());
212}
213
214#else
215
217 return TieringStateBits::decode(flags());
218}
219
221 set_tiering_state(TieringState::kNone);
222}
223
225 return LogNextExecutionBit::decode(flags());
226}
227
229 set_flags(LogNextExecutionBit::update(flags(), value));
230}
231
233 Tagged<MaybeWeak<HeapObject>> slot = maybe_optimized_code();
234 DCHECK(slot.IsWeakOrCleared());
235 Tagged<HeapObject> heap_object;
237 if (slot.GetHeapObject(&heap_object)) {
238 code = Cast<CodeWrapper>(heap_object)->code(isolate);
239 }
240 // It is possible that the maybe_optimized_code slot is cleared but the flags
241 // haven't been updated yet. We update them when we execute the function next
242 // time / when we create new closure.
243 DCHECK_IMPLIES(!code.is_null(),
245 DCHECK_IMPLIES(!code.is_null() && code->is_maglevved(),
247 DCHECK_IMPLIES(!code.is_null() && code->is_turbofanned(),
249 return code;
250}
251
253 bool is_cleared = maybe_optimized_code().IsCleared();
254 DCHECK_IMPLIES(!is_cleared,
256 return !is_cleared;
257}
258
260 return MaybeHasMaglevCodeBit::decode(flags());
261}
262
264 set_flags(MaybeHasMaglevCodeBit::update(flags(), value));
265}
266
268 return MaybeHasTurbofanCodeBit::decode(flags());
269}
270
272 set_flags(MaybeHasTurbofanCodeBit::update(flags(), value));
273}
274
275#endif // V8_ENABLE_LEAPTIERING
276
277std::optional<Tagged<Code>> FeedbackVector::GetOptimizedOsrCode(
278 Isolate* isolate, FeedbackSlot slot) {
279 Tagged<MaybeObject> maybe_code = Get(isolate, slot);
280 if (maybe_code.IsCleared()) return {};
281
282 Tagged<Code> code =
283 Cast<CodeWrapper>(maybe_code.GetHeapObject())->code(isolate);
284 if (code->marked_for_deoptimization()) {
285 // Clear the cached Code object if deoptimized.
286 // TODO(jgruber): Add tracing.
287 Set(slot, ClearedValue(isolate));
288 return {};
289 }
290
291 return code;
292}
293
294// Conversion from an integer index to either a slot or an ic slot.
295// static
297 if (index == static_cast<intptr_t>(FeedbackSlot::Invalid().ToInt())) {
298 return FeedbackSlot();
299 }
300 DCHECK_LE(static_cast<uintptr_t>(index),
301 static_cast<uintptr_t>(std::numeric_limits<int>::max()));
302 return FeedbackSlot(static_cast<int>(index));
303}
304
305#ifdef DEBUG
306// Instead of FixedArray, the Feedback and the Extra should contain
307// WeakFixedArrays. The only allowed FixedArray subtype is HashTable.
308bool FeedbackVector::IsOfLegacyType(Tagged<MaybeObject> value) {
309 Tagged<HeapObject> heap_object;
310 if (value.GetHeapObject(&heap_object)) {
311 return IsFixedArray(heap_object) && !IsHashTable(heap_object);
312 }
313 return false;
314}
315#endif // DEBUG
316
318 Tagged<MaybeObject> value = raw_feedback_slots(GetIndex(slot), kRelaxedLoad);
319 DCHECK(!IsOfLegacyType(value));
320 return value;
321}
322
324 FeedbackSlot slot) const {
325 Tagged<MaybeObject> value =
326 raw_feedback_slots(cage_base, GetIndex(slot), kRelaxedLoad);
327 DCHECK(!IsOfLegacyType(value));
328 return value;
329}
330
332 Isolate* isolate, int index) const {
333 DCHECK_GE(index, 0);
334 return direct_handle(closure_feedback_cell_array()->get(index), isolate);
335}
336
338 DCHECK_GE(index, 0);
339 return closure_feedback_cell_array()->get(index);
340}
341
343 const int i = slot.ToInt();
344 DCHECK_LT(static_cast<unsigned>(i), static_cast<unsigned>(this->length()));
345 const int offset = kRawFeedbackSlotsOffset + i * kTaggedSize;
346 Tagged<MaybeObject> value =
348 DCHECK(!IsOfLegacyType(value));
349 return value;
350}
351
354 WriteBarrierMode mode) {
355 DCHECK(!IsOfLegacyType(value));
356 const int i = slot.ToInt();
357 DCHECK_LT(static_cast<unsigned>(i), static_cast<unsigned>(this->length()));
358 const int offset = kRawFeedbackSlotsOffset + i * kTaggedSize;
360 CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
361}
362
364 WriteBarrierMode mode) {
365 DCHECK(!IsOfLegacyType(value));
366 set_raw_feedback_slots(GetIndex(slot), value, kRelaxedStore, mode);
367}
368
370 return RawMaybeWeakField(OffsetOfElementAt(0));
371}
372
373// Helper function to transform the feedback to BinaryOperationHint.
401
402// Helper function to transform the feedback to CompareOperationHint.
403template <CompareOperationFeedback::Type Feedback>
404bool Is(int type_feedback) {
405 return !(type_feedback & ~Feedback);
406}
407
409 if (Is<CompareOperationFeedback::kNone>(type_feedback)) {
411 }
412
415 } else if (Is<CompareOperationFeedback::kNumber>(type_feedback)) {
417 } else if (Is<CompareOperationFeedback::kNumberOrBoolean>(type_feedback)) {
419 }
420
423 } else if (Is<CompareOperationFeedback::kString>(type_feedback)) {
425 }
426
427 if (Is<CompareOperationFeedback::kReceiver>(type_feedback)) {
430 type_feedback)) {
432 }
433
434 if (Is<CompareOperationFeedback::kBigInt64>(type_feedback)) {
436 } else if (Is<CompareOperationFeedback::kBigInt>(type_feedback)) {
438 }
439
440 if (Is<CompareOperationFeedback::kSymbol>(type_feedback)) {
442 }
443
446}
447
448// Helper function to transform the feedback to ForInHint.
450 switch (type_feedback) {
452 return ForInHint::kNone;
457 default:
458 return ForInHint::kAny;
459 }
460 UNREACHABLE();
461}
462
464 return isolate->factory()->uninitialized_symbol();
465}
466
468 return isolate->factory()->megamorphic_symbol();
469}
470
472 return isolate->factory()->mega_dom_symbol();
473}
474
476 return ReadOnlyRoots(isolate).uninitialized_symbol();
477}
478
480 return next_slot_.ToInt() < metadata()->slot_count();
481}
482
490
494
495template <typename T>
497 if (mode() == Mode::MainThread) {
498 return handle(object, isolate_);
499 }
501 return handle(object, local_heap_);
502}
503
505 FeedbackSlot slot) const {
506 return vector->SynchronizedGet(slot);
507}
508
510 Tagged<MaybeObject> feedback,
511 WriteBarrierMode mode) const {
512 DCHECK(can_write());
513 vector->SynchronizedSet(slot, feedback, mode);
514}
515
519
523
527
532
534 return value.IsCleared() ? MaybeObjectHandle()
535 : MaybeObjectHandle(config()->NewHandle(value));
536}
537
539 auto pair = GetFeedbackPair();
540 return pair.first;
541}
542
544 auto pair = GetFeedbackPair();
545 return pair.second;
546}
547
548std::pair<Tagged<MaybeObject>, Tagged<MaybeObject>>
551 feedback_cache_.has_value()) {
552 return std::make_pair(FromHandle(feedback_cache_->first),
553 FromHandle(feedback_cache_->second));
554 }
555 auto pair = FeedbackMetadata::GetSlotSize(kind()) == 2
556 ? config()->GetFeedbackPair(vector(), slot())
557 : std::make_pair(config()->GetFeedback(vector(), slot()),
560 !feedback_cache_.has_value()) {
562 std::make_pair(ToHandle(pair.first), ToHandle(pair.second));
563 }
564 return pair;
565}
566
567template <typename FeedbackType>
569 WriteBarrierMode mode) {
570 config()->SetFeedback(vector(), slot(), feedback, mode);
571}
572
573template <typename FeedbackType, typename FeedbackExtraType>
575 WriteBarrierMode mode,
576 Tagged<FeedbackExtraType> feedback_extra,
577 WriteBarrierMode mode_extra) {
578 config()->SetFeedbackPair(vector(), slot(), feedback, mode, feedback_extra,
579 mode_extra);
580}
581
582template <typename F>
584 // We don't need DisallowGarbageCollection here: accessing it.map() and
585 // it.handle() is safe between it.Advance() and a potential GC call in
586 // function(). The it itself is not invalidated, since it holds the
587 // polymorphic array by handle.
588 // TODO(370727490): Make the FeedbackIterator GC safe (e.g. look up
589 // map/handler in the feedback array on-demand).
590 for (FeedbackIterator it(this); !it.done(); it.Advance()) {
591 DirectHandle<Map> map = config()->NewHandle(it.map());
592 if (!it.handler().IsCleared()) {
593 function(map);
594 }
595 }
596}
597
598} // namespace v8::internal
599
601
602#endif // V8_OBJECTS_FEEDBACK_VECTOR_INL_H_
Builtins::Kind kind
Definition builtins.cc:40
Tagged< FeedbackMetadata > metadata() const
int32_t slot_count(AcquireLoadTag) const
static int SizeFor(int slot_count, int create_closure_slot_count)
int32_t create_closure_slot_count(AcquireLoadTag) const
int32_t get(int index) const
void set(int index, int32_t value)
static int GetSlotSize(FeedbackSlotKind kind)
Tagged< MaybeObject > UninitializedSentinel() const
const NexusConfig * config() const
Tagged< FeedbackVector > vector() const
Tagged< MaybeObject > GetFeedbackExtra() const
std::pair< Tagged< MaybeObject >, Tagged< MaybeObject > > GetFeedbackPair() const
FeedbackSlotKind kind() const
FeedbackSlot slot() const
Tagged< MaybeObject > GetFeedback() const
Tagged< MaybeObject > MegaDOMSentinel() const
std::optional< std::pair< MaybeObjectHandle, MaybeObjectHandle > > feedback_cache_
MaybeObjectHandle ToHandle(Tagged< MaybeObject > value) const
Tagged< MaybeObject > FromHandle(MaybeObjectDirectHandle slot) const
Tagged< MaybeObject > MegamorphicSentinel() const
void SetFeedback(Tagged< FeedbackType > feedback, WriteBarrierMode mode=UPDATE_WRITE_BARRIER)
static FeedbackSlot Invalid()
Definition utils.h:648
V8_EXPORT_PRIVATE void set_tiering_state(TieringState state)
static DirectHandle< Symbol > MegaDOMSentinel(Isolate *isolate)
Tagged< MaybeObject > Get(FeedbackSlot slot) const
static FeedbackSlot ToSlot(intptr_t index)
static DirectHandle< Symbol > UninitializedSentinel(Isolate *isolate)
Tagged< Code > optimized_code(IsolateForSandbox isolate) const
void set_maybe_has_turbofan_code(bool value)
void set_maybe_has_optimized_osr_code(bool value, CodeKind code_kind)
Tagged< FeedbackCell > closure_feedback_cell(int index) const
void Set(FeedbackSlot slot, Tagged< MaybeObject > value, WriteBarrierMode mode=UPDATE_WRITE_BARRIER)
static Tagged< Symbol > RawUninitializedSentinel(Isolate *isolate)
static constexpr uint8_t kInvocationCountBeforeStableDeoptSentinel
static constexpr uint32_t kFlagsMaybeHasMaglevCode
std::optional< Tagged< Code > > GetOptimizedOsrCode(Isolate *isolate, FeedbackSlot slot)
void set_log_next_execution(bool value=true)
Tagged< MaybeObject > SynchronizedGet(FeedbackSlot slot) const
DirectHandle< FeedbackCell > GetClosureFeedbackCell(Isolate *isolate, int index) const
static int GetIndex(FeedbackSlot slot)
void SynchronizedSet(FeedbackSlot slot, Tagged< MaybeObject > value, WriteBarrierMode mode=UPDATE_WRITE_BARRIER)
static constexpr uint32_t kFlagsTieringStateIsAnyRequested
void clear_invocation_count(RelaxedStoreTag tag)
static constexpr uint32_t kFlagsLogNextExecution
static Handle< Symbol > MegamorphicSentinel(Isolate *isolate)
void set_interrupt_budget_reset_by_ic_change(bool value)
void set_osr_urgency(int urgency)
static constexpr uint32_t kFlagsMaybeHasTurbofanCode
static constexpr int kMaxOsrUrgency
static constexpr uint32_t FlagMaskForNeedsProcessingCheckFrom(CodeKind code_kind)
static constexpr int OffsetOfElementAt(int index)
static constexpr int kHeaderSize
T ReadField(size_t offset) const
void WriteField(size_t offset, T value) const
Tagged< MaybeObject > GetFeedback(Tagged< FeedbackVector > vector, FeedbackSlot slot) const
MaybeObjectHandle NewHandle(Tagged< MaybeObject > object) const
void SetFeedback(Tagged< FeedbackVector > vector, FeedbackSlot slot, Tagged< MaybeObject > object, WriteBarrierMode mode=UPDATE_WRITE_BARRIER) const
static void Release_Store(Tagged< HeapObject > host, PtrType value)
static PtrType Acquire_Load(Tagged< HeapObject > host, int offset=0)
constexpr bool IsCleared() const
bool GetHeapObject(Tagged< HeapObject > *result) const
constexpr bool IsWeakOrCleared() const
Handle< Code > code
Isolate * isolate
int32_t offset
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr int kTaggedSize
Definition globals.h:542
ForInHint ForInHintFromFeedback(ForInFeedback type_feedback)
bool Is(IndirectHandle< U > value)
Definition handles-inl.h:51
CompareOperationHint CompareOperationHintFromFeedback(int type_feedback)
constexpr bool CodeKindCanTierUp(CodeKind kind)
Definition code-kind.h:95
V8_INLINE DirectHandle< T > direct_handle(Tagged< T > object, Isolate *isolate)
Flag flags[]
Definition flags.cc:3797
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
constexpr int kInt32Size
Definition globals.h:401
Tagged< ClearedWeakValue > ClearedValue(PtrComprCageBase cage_base)
V8_EXPORT_PRIVATE FlagValues v8_flags
return value
Definition map-inl.h:893
BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback)
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
static constexpr RelaxedLoadTag kRelaxedLoad
Definition globals.h:2909
static constexpr RelaxedStoreTag kRelaxedStore
Definition globals.h:2911
static constexpr AcquireLoadTag kAcquireLoad
Definition globals.h:2908
#define RELAXED_UINT8_ACCESSORS(holder, name, offset)
#define OBJECT_CONSTRUCTORS_IMPL(Type, Super)
#define DEF_ACQUIRE_GETTER(holder, name,...)
#define ACQUIRE_READ_INT32_FIELD(p, offset)
#define TQ_OBJECT_CONSTRUCTORS_IMPL(Type)
#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode)
#define RELAXED_INT32_ACCESSORS(holder, name, offset)
#define INT32_ACCESSORS(holder, name, offset)
#define NEVER_READ_ONLY_SPACE_IMPL(Type)
#define DEF_GETTER(Camel, Lower, Bit)
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LT(lhs, rhs)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485