v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
heap-object-header.h
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
6#define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
7
8#include <stdint.h>
9
10#include <atomic>
11
16#include "src/base/bit-field.h"
17#include "src/base/logging.h"
18#include "src/base/macros.h"
21
22#if defined(CPPGC_CAGED_HEAP)
24#endif // defined(CPPGC_CAGED_HEAP)
25
26namespace cppgc {
27
28class Visitor;
29
30namespace internal {
31
32// HeapObjectHeader contains meta data per object and is prepended to each
33// object.
34//
35// +-----------------+------+------------------------------------------+
36// | name | bits | |
37// +-----------------+------+------------------------------------------+
38// | padding | 32 | Only present on 64-bit platform. |
39// +-----------------+------+------------------------------------------+
40// | GCInfoIndex | 14 | |
41// | unused | 1 | |
42// | in construction | 1 | In construction encoded as |false|. |
43// +-----------------+------+------------------------------------------+
44// | size | 15 | 17 bits because allocations are aligned. |
45// | mark bit | 1 | |
46// +-----------------+------+------------------------------------------+
47//
48// Notes:
49// - See |GCInfoTable| for constraints on GCInfoIndex.
50// - |size| for regular objects is encoded with 15 bits but can actually
51// represent sizes up to |kBlinkPageSize| (2^17) because allocations are
52// always 4 byte aligned (see kAllocationGranularity) on 32bit. 64bit uses
53// 8 byte aligned allocations which leaves 1 bit unused.
54// - |size| for large objects is encoded as 0. The size of a large object is
55// stored in |LargeObjectPage::PayloadSize()|.
56// - |mark bit| and |in construction| bits are located in separate 16-bit halves
57// to allow potentially accessing them non-atomically.
59 public:
60 static constexpr size_t kSizeLog2 = 17;
61 static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
62 static constexpr uint16_t kLargeObjectSizeInHeader = 0;
63
64 inline static HeapObjectHeader& FromObject(void* address);
65 inline static const HeapObjectHeader& FromObject(const void* address);
66
67 inline HeapObjectHeader(size_t size, GCInfoIndex gc_info_index);
68
69 // The object starts directly after the HeapObjectHeader.
70 inline Address ObjectStart() const;
71 template <AccessMode mode = AccessMode::kNonAtomic>
72 inline Address ObjectEnd() const;
73
74 template <AccessMode mode = AccessMode::kNonAtomic>
75 inline GCInfoIndex GetGCInfoIndex() const;
76
77 template <AccessMode mode = AccessMode::kNonAtomic>
78 inline size_t AllocatedSize() const;
79 inline void SetAllocatedSize(size_t size);
80
81 template <AccessMode mode = AccessMode::kNonAtomic>
82 inline size_t ObjectSize() const;
83
84 template <AccessMode mode = AccessMode::kNonAtomic>
85 inline bool IsLargeObject() const;
86
87 template <AccessMode = AccessMode::kNonAtomic>
88 bool IsInConstruction() const;
90 // Use MarkObjectAsFullyConstructed() to mark an object as being constructed.
91
92 template <AccessMode = AccessMode::kNonAtomic>
93 bool IsMarked() const;
94 template <AccessMode = AccessMode::kNonAtomic>
95 void Unmark();
96 inline bool TryMarkAtomic();
97
98 inline void MarkNonAtomic();
99
100 template <AccessMode = AccessMode::kNonAtomic>
101 bool IsYoung() const;
102
103 template <AccessMode = AccessMode::kNonAtomic>
104 bool IsFree() const;
105
106 inline bool IsFinalizable() const;
107 void Finalize();
108
109#if defined(CPPGC_CAGED_HEAP)
110 inline void SetNextUnfinalized(HeapObjectHeader* next);
111 inline HeapObjectHeader* GetNextUnfinalized(uintptr_t cage_base) const;
112#endif // defined(CPPGC_CAGED_HEAP)
113
114 // Default version will retrieve `HeapObjectNameForUnnamedObject` as it is
115 // configured at runtime.
117 // Override for verifying and testing where we always want to pass the naming
118 // option explicitly.
121
122 template <AccessMode = AccessMode::kNonAtomic>
123 void TraceImpl(Visitor*) const;
124
125 private:
126 enum class EncodedHalf : uint8_t { kLow, kHigh };
127
128 // Used in |encoded_high_|.
132 // Used in |encoded_low_|.
134 using SizeField =
135 MarkBitField::Next<size_t, 15>; // Use EncodeSize/DecodeSize instead.
136
137 static constexpr size_t DecodeSize(uint16_t encoded) {
138 // Essentially, gets optimized to << 1.
140 }
141
142 static constexpr uint16_t EncodeSize(size_t size) {
143 // Essentially, gets optimized to >> 1.
145 }
146
148
149 template <AccessMode, EncodedHalf part,
150 std::memory_order memory_order = std::memory_order_seq_cst>
151 inline uint16_t LoadEncoded() const;
152 template <AccessMode mode, EncodedHalf part,
153 std::memory_order memory_order = std::memory_order_seq_cst>
154 inline void StoreEncoded(uint16_t bits, uint16_t mask);
155
156#if defined(V8_HOST_ARCH_64_BIT)
157 // If cage is enabled, to save on space required by sweeper metadata, we store
158 // the list of to-be-finalized objects inlined in HeapObjectHeader.
159#if defined(CPPGC_CAGED_HEAP)
160 uint32_t next_unfinalized_ = 0;
161#else // !defined(CPPGC_CAGED_HEAP)
162 uint32_t padding_ = 0;
163#endif // !defined(CPPGC_CAGED_HEAP)
164#endif // defined(V8_HOST_ARCH_64_BIT)
166 uint16_t encoded_low_;
167};
168
169static_assert(kAllocationGranularity == sizeof(HeapObjectHeader),
170 "sizeof(HeapObjectHeader) must match allocation granularity to "
171 "guarantee alignment");
172
173// static
175 return *reinterpret_cast<HeapObjectHeader*>(static_cast<Address>(object) -
176 sizeof(HeapObjectHeader));
177}
178
179// static
181 return *reinterpret_cast<const HeapObjectHeader*>(
182 static_cast<ConstAddress>(object) - sizeof(HeapObjectHeader));
183}
184
186#if defined(V8_HOST_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP)
187 USE(padding_);
188#endif // defined(V8_HOST_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP)
189 DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
190 DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
191 DCHECK_GE(kMaxSize, size);
192 encoded_low_ = EncodeSize(size);
193 // Objects may get published to the marker without any other synchronization
194 // (e.g., write barrier) in which case the in-construction bit is read
195 // concurrently which requires reading encoded_high_ atomically. It is ok if
196 // this write is not observed by the marker, since the sweeper sets the
197 // in-construction bit to 0 and we can rely on that to guarantee a correct
198 // answer when checking if objects are in-construction.
200 ->store(GCInfoIndexField::encode(gc_info_index),
201 std::memory_order_relaxed);
203#ifdef DEBUG
205#endif // DEBUG
206}
207
209 return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
210 sizeof(HeapObjectHeader);
211}
212
213template <AccessMode mode>
216 return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
218}
219
220template <AccessMode mode>
226
227template <AccessMode mode>
229 // Size is immutable after construction while either marking or sweeping
230 // is running so relaxed load (if mode == kAtomic) is enough.
231 uint16_t encoded_low_value =
233 const size_t size = DecodeSize(encoded_low_value);
234 return size;
235}
236
238#if !defined(CPPGC_YOUNG_GENERATION)
239 // With sticky bits, marked objects correspond to old objects.
240 // TODO(bikineev:1029379): Consider disallowing old/marked objects to be
241 // resized.
242 DCHECK(!IsMarked());
243#endif
244 // The object may be marked (i.e. old, in case young generation is enabled).
245 // Make sure to not overwrite the mark bit.
247 encoded_low_ |= EncodeSize(size);
248}
249
250template <AccessMode mode>
252 // The following DCHECK also fails for large objects.
254 return AllocatedSize<mode>() - sizeof(HeapObjectHeader);
255}
256
257template <AccessMode mode>
261
262template <AccessMode mode>
268
269template <AccessMode mode>
271 const uint16_t encoded =
273 return MarkBitField::decode(encoded);
274}
275
276template <AccessMode mode>
282
284 auto* atomic_encoded = v8::base::AsAtomicPtr(&encoded_low_);
285 uint16_t old_value = atomic_encoded->load(std::memory_order_relaxed);
286 const uint16_t new_value = old_value | MarkBitField::encode(true);
287 if (new_value == old_value) {
288 return false;
289 }
290 return atomic_encoded->compare_exchange_strong(old_value, new_value,
291 std::memory_order_relaxed);
292}
293
298
299template <AccessMode mode>
301 return !IsMarked<mode>();
302}
303
304template <AccessMode mode>
308
311 return gc_info.finalize;
312}
313
314#if defined(CPPGC_CAGED_HEAP)
315void HeapObjectHeader::SetNextUnfinalized(HeapObjectHeader* next) {
316#if defined(CPPGC_POINTER_COMPRESSION)
317 next_unfinalized_ = CompressedPointer::Compress(next);
318#else // !defined(CPPGC_POINTER_COMPRESSION)
319 next_unfinalized_ = CagedHeap::OffsetFromAddress<uint32_t>(next);
320#endif // !defined(CPPGC_POINTER_COMPRESSION)
321}
322
323HeapObjectHeader* HeapObjectHeader::GetNextUnfinalized(
324 uintptr_t cage_base_or_mask) const {
325 DCHECK(cage_base_or_mask);
326#if defined(CPPGC_POINTER_COMPRESSION)
327 DCHECK_EQ(
328 api_constants::kCagedHeapReservationAlignment - 1,
329 CagedHeap::OffsetFromAddress(reinterpret_cast<void*>(cage_base_or_mask)));
330 return reinterpret_cast<HeapObjectHeader*>(
331 CompressedPointer::Decompress(next_unfinalized_, cage_base_or_mask));
332#else // !defined(CPPGC_POINTER_COMPRESSION)
334 reinterpret_cast<void*>(cage_base_or_mask)));
335 return next_unfinalized_ ? reinterpret_cast<HeapObjectHeader*>(
336 cage_base_or_mask + next_unfinalized_)
337 : nullptr;
338#endif // !defined(CPPGC_POINTER_COMPRESSION)
339}
340#endif // defined(CPPGC_CAGED_HEAP)
341
342template <AccessMode mode>
344 const GCInfo& gc_info =
346 return gc_info.trace(visitor, ObjectStart());
347}
348
350 std::memory_order memory_order>
352 const uint16_t& half =
354 if (mode == AccessMode::kNonAtomic) return half;
355 return v8::base::AsAtomicPtr(&half)->load(memory_order);
356}
357
359 std::memory_order memory_order>
360void HeapObjectHeader::StoreEncoded(uint16_t bits, uint16_t mask) {
361 // Caveat: Not all changes to HeapObjectHeader's bitfields go through
362 // StoreEncoded. The following have their own implementations and need to be
363 // kept in sync:
364 // - HeapObjectHeader::TryMarkAtomic
365 // - MarkObjectAsFullyConstructed (API)
366 DCHECK_EQ(0u, bits & ~mask);
367 uint16_t& half = part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
368 if (mode == AccessMode::kNonAtomic) {
369 half = (half & ~mask) | bits;
370 return;
371 }
372 // We don't perform CAS loop here assuming that only none of the info that
373 // shares the same encoded halfs change at the same time.
374 auto* atomic_encoded = v8::base::AsAtomicPtr(&half);
375 uint16_t value = atomic_encoded->load(std::memory_order_relaxed);
376 value = (value & ~mask) | bits;
377 atomic_encoded->store(value, memory_order);
378}
379
380} // namespace internal
381} // namespace cppgc
382
383#endif // V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
static RetType OffsetFromAddress(const void *address)
Definition caged-heap.h:30
static constexpr GCInfoIndex kMaxIndex
static const GCInfo & GCInfoFromIndex(GCInfoIndex index)
V8_EXPORT_PRIVATE void MarkAsFullyConstructed()
HeapObjectHeader(size_t size, GCInfoIndex gc_info_index)
static HeapObjectHeader & FromObject(void *address)
static constexpr uint16_t EncodeSize(size_t size)
static constexpr uint16_t kLargeObjectSizeInHeader
V8_EXPORT_PRIVATE void CheckApiConstants()
static constexpr size_t DecodeSize(uint16_t encoded)
V8_EXPORT_PRIVATE HeapObjectName GetName() const
void StoreEncoded(uint16_t bits, uint16_t mask)
static constexpr U kMax
Definition bit-field.h:44
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr U encode(T value)
Definition bit-field.h:55
static constexpr U kMask
Definition bit-field.h:41
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool
uint32_t const mask
uint8_t * Address
Definition globals.h:17
const uint8_t * ConstAddress
Definition globals.h:18
constexpr size_t kAllocationGranularity
Definition globals.h:37
constexpr GCInfoIndex kFreeListGCInfoIndex
Definition globals.h:48
uint16_t GCInfoIndex
Definition gc-info.h:21
V8_INLINE std::atomic< T > * AsAtomicPtr(T *t)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
#define USE(...)
Definition macros.h:293
#define V8_EXPORT_PRIVATE
Definition macros.h:460
FinalizationCallback finalize