5#ifndef INCLUDE_CPPGC_INTERNAL_MEMBER_STORAGE_H_
6#define INCLUDE_CPPGC_INTERNAL_MEMBER_STORAGE_H_
26#if defined(CPPGC_POINTER_COMPRESSION)
31#define CPPGC_CONST __attribute__((const))
32#define CPPGC_REQUIRE_CONSTANT_INIT \
33 __attribute__((require_constant_initialization))
36#define CPPGC_REQUIRE_CONSTANT_INIT
41 V8_INLINE CPPGC_CONST
static uintptr_t Get() {
46 V8_INLINE CPPGC_CONST
static bool IsSet() {
48 return (g_base_.base & ~kLowerHalfWordMask) != 0;
53 static constexpr uintptr_t kLowerHalfWordMask =
54 (api_constants::kCagedHeapReservationAlignment - 1);
56 static union alignas(api_constants::kCachelineSize) Base {
58 char cache_line[api_constants::kCachelineSize];
59 } g_base_ CPPGC_REQUIRE_CONSTANT_INIT;
61 CageBaseGlobal() =
delete;
63 V8_INLINE static bool IsBaseConsistent() {
64 return kLowerHalfWordMask == (g_base_.base & kLowerHalfWordMask);
67 friend class CageBaseGlobalUpdater;
70#undef CPPGC_REQUIRE_CONSTANT_INIT
75 struct AtomicInitializerTag {};
77 using IntegralType = uint32_t;
78 static constexpr auto kWriteBarrierSlotType =
79 WriteBarrierSlotType::kCompressed;
82 V8_INLINE explicit CompressedPointer(
const void* value,
83 AtomicInitializerTag) {
86 V8_INLINE explicit CompressedPointer(
const void* ptr)
89 V8_INLINE explicit CompressedPointer(SentinelPointer)
90 :
value_(kCompressedSentinel) {}
93 V8_INLINE const void* LoadAtomic()
const {
95 reinterpret_cast<const std::atomic<IntegralType>&
>(
value_).load(
96 std::memory_order_relaxed));
100 V8_INLINE void StoreAtomic(
const void* value) {
101 reinterpret_cast<std::atomic<IntegralType>&
>(
value_).
store(
102 Compress(value), std::memory_order_relaxed);
108 V8_INLINE bool IsSentinel()
const {
return value_ == kCompressedSentinel; }
113 return a.value_ == b.value_;
116 return a.value_ != b.value_;
119 return a.value_ < b.value_;
122 return a.value_ <= b.value_;
125 return a.value_ > b.value_;
128 return a.value_ >= b.value_;
131 static V8_INLINE IntegralType Compress(
const void* ptr) {
132 static_assert(SentinelPointer::kSentinelValue ==
133 1 << api_constants::kPointerCompressionShift,
134 "The compression scheme relies on the sentinel encoded as 1 "
135 "<< kPointerCompressionShift");
136 static constexpr size_t kGigaCageMask =
137 ~(api_constants::kCagedHeapReservationAlignment - 1);
138 static constexpr size_t kPointerCompressionShiftMask =
139 (1 << api_constants::kPointerCompressionShift) - 1;
142 const uintptr_t
base = CageBaseGlobal::Get();
144 (
base & kGigaCageMask) ==
145 (
reinterpret_cast<uintptr_t
>(ptr) & kGigaCageMask));
147 (
reinterpret_cast<uintptr_t
>(ptr) & kPointerCompressionShiftMask) == 0);
149 const auto uptr =
reinterpret_cast<uintptr_t
>(ptr);
151 auto compressed =
static_cast<IntegralType
>(
152 uptr >> api_constants::kPointerCompressionShift);
155 CPPGC_DCHECK((!compressed || compressed == kCompressedSentinel) ||
156 (compressed & (1 << 31)));
160 static V8_INLINE void* Decompress(IntegralType ptr) {
162 const uintptr_t
base = CageBaseGlobal::Get();
163 return Decompress(ptr,
base);
166 static V8_INLINE void* Decompress(IntegralType ptr, uintptr_t
base) {
173 const uint64_t
mask =
static_cast<uint64_t
>(
static_cast<int32_t>(ptr))
174 << api_constants::kPointerCompressionShift;
178 return reinterpret_cast<void*
>(
mask &
base);
184 template <
typename Callback>
185 static V8_INLINE void VisitPossiblePointers(
const void* address,
189 static constexpr IntegralType kCompressedSentinel =
190 SentinelPointer::kSentinelValue >>
191 api_constants::kPointerCompressionShift;
198template <
typename Callback>
200void CompressedPointer::VisitPossiblePointers(
const void* address,
202 const uintptr_t
base = CageBaseGlobal::Get();
206 const uint32_t compressed_low =
207 static_cast<uint32_t
>(
reinterpret_cast<uintptr_t
>(address));
208 callback(CompressedPointer::Decompress(compressed_low,
base));
209 const uint32_t compressed_high =
static_cast<uint32_t
>(
210 reinterpret_cast<uintptr_t
>(address) >> (
sizeof(uint32_t) * CHAR_BIT));
211 callback(CompressedPointer::Decompress(compressed_high,
base));
217 static constexpr uintptr_t kBitForIntermediateValue =
218 (
sizeof(uint32_t) * CHAR_BIT) + api_constants::kPointerCompressionShift;
219 static constexpr uintptr_t kSignExtensionMask =
220 ~((uintptr_t{1} << kBitForIntermediateValue) - 1);
221 const uintptr_t intermediate_sign_extended =
222 reinterpret_cast<uintptr_t
>(address) | kSignExtensionMask;
223 callback(
reinterpret_cast<void*
>(intermediate_sign_extended &
base));
233 static constexpr auto kWriteBarrierSlotType =
234 WriteBarrierSlotType::kUncompressed;
244 return reinterpret_cast<const std::atomic<const void*>&
>(
ptr_).load(
245 std::memory_order_relaxed);
250 reinterpret_cast<std::atomic<const void*>&
>(
ptr_).
store(
251 ptr, std::memory_order_relaxed);
260 return reinterpret_cast<uintptr_t
>(
ptr_);
264 return a.ptr_ == b.
ptr_;
267 return a.ptr_ != b.
ptr_;
270 return a.ptr_ < b.
ptr_;
273 return a.ptr_ <= b.
ptr_;
276 return a.ptr_ > b.
ptr_;
279 return a.ptr_ >= b.
ptr_;
282 template <
typename Callback>
286 return callback(
const_cast<void*
>(address));
296#if defined(CPPGC_POINTER_COMPRESSION)
V8_INLINE const void * Load() const
V8_INLINE friend bool operator!=(RawPointer a, RawPointer b)
V8_INLINE friend bool operator<(RawPointer a, RawPointer b)
V8_INLINE bool IsCleared() const
V8_INLINE void Store(const void *ptr)
V8_INLINE friend bool operator==(RawPointer a, RawPointer b)
V8_INLINE uintptr_t GetAsInteger() const
V8_INLINE RawPointer(const void *ptr)
static V8_INLINE void VisitPossiblePointers(const void *address, Callback callback)
V8_INLINE void StoreAtomic(const void *ptr)
V8_INLINE RawPointer(const void *ptr, AtomicInitializerTag)
V8_INLINE bool IsSentinel() const
V8_INLINE friend bool operator>(RawPointer a, RawPointer b)
V8_INLINE friend bool operator>=(RawPointer a, RawPointer b)
V8_INLINE friend bool operator<=(RawPointer a, RawPointer b)
V8_INLINE const void * LoadAtomic() const
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
#define CPPGC_DCHECK(condition)
V8_INLINE bool operator>(const BasicMember< T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1, StorageType > &member1, const BasicMember< T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2, StorageType > &member2)
V8_INLINE bool operator>=(const BasicMember< T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1, StorageType > &member1, const BasicMember< T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2, StorageType > &member2)
V8_INLINE bool operator<=(const BasicMember< T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1, StorageType > &member1, const BasicMember< T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2, StorageType > &member2)
constexpr internal::SentinelPointer kSentinelPointer
bool operator==(PointerWithPayload< PointerType, PayloadType, NumPayloadBits > lhs, PointerWithPayload< PointerType, PayloadType, NumPayloadBits > rhs)
void Store(LiftoffAssembler *assm, LiftoffRegister src, MemOperand dst, ValueKind kind)
bool operator!=(ExternalReference lhs, ExternalReference rhs)
V8_INLINE constexpr bool operator<(Builtin a, Builtin b)
i::Address Load(i::Address address)