5#ifndef V8_COMPILER_TURBOSHAFT_STORE_STORE_ELIMINATION_REDUCER_INL_H_
6#define V8_COMPILER_TURBOSHAFT_STORE_STORE_ELIMINATION_REDUCER_INL_H_
86 switch (observability) {
88 return os <<
"Unobservable";
90 return os <<
"GCObservable";
92 return os <<
"Observable";
106 MaybeRedundantStoresKeyData> {
149 for (
const Block* s : successors) {
150 std::optional<Snapshot> s_snapshot =
155 if (!s_snapshot.has_value())
continue;
166 *std::max_element(successors.
begin(), successors.
end()));
186 if (size <
key.data().size)
return;
216 void Seal(
bool* snapshot_has_changed =
nullptr) {
221 if (!snapshot_has_changed) {
223 }
else if (!snapshot.has_value()) {
224 *snapshot_has_changed =
true;
228 *snapshot_has_changed =
false;
233 if (successors[0] != successors[1]) *snapshot_has_changed =
true;
235 *std::max_element(successors.begin(), successors.end()));
242 void Print(std::ostream& os,
const char* sep =
"\n")
const {
245 os << (first ?
"" : sep) <<
key.data().base.id() <<
"@"
246 <<
key.data().offset <<
": " <<
Get(
key);
263 return key.data().active_keys_index;
289 for (uint32_t processed =
graph_.block_count(); processed > 0;
298 if (block.IsLoop()) {
300 bool needs_revisit =
false;
305 processed = back_edge->
index().
id() + 1;
316 auto op_range =
graph_.OperationIndices(block);
317 for (
auto it = op_range.end(); it != op_range.begin();) {
323 case Opcode::kStore: {
328 const bool is_field_store = !store.index().valid();
329 const uint8_t size = store.stored_rep.SizeInBytes();
331 if (is_on_heap_store && is_field_store) {
332 bool is_eliminable_store =
false;
337 is_eliminable_store =
true;
340 if (store.maybe_initializing_or_transitioning) {
348 is_eliminable_store =
true;
362 store.maybe_initializing_or_transitioning &&
365 store.stored_rep.IsCompressibleTagged()) {
393 uint32_t high =
static_cast<uint32_t
>(c1->handle()->ptr());
394 uint32_t low =
static_cast<uint32_t
>(c0->
handle()->ptr());
395#if V8_TARGET_BIG_ENDIAN
413 case Opcode::kLoad: {
418 const bool is_field_load = !load.index().valid();
420 if (is_on_heap_load && is_field_load) {
463 __ Store(
__ MapToNewGraph(store.base()), value,
468 return Next::ReduceInputGraphStore(ig_index, store);
#define REDUCE_INPUT_GRAPH(operation)
constexpr T * begin() const
constexpr T * end() const
static V8_INLINE bool InReadOnlySpace(Tagged< HeapObject > object)
Block * LastPredecessor() const
void Set(Key key, StoreObservability new_value)
Key NewKey(MaybeRedundantStoresKeyData data, StoreObservability initial_value=StoreObservability{})
void StartNewSnapshot(base::Vector< const Snapshot > predecessors)
void Print(std::ostream &os, const char *sep="\n") const
void MarkPotentiallyAliasingStoresAsObservable(OpIndex base, int32_t offset)
ZoneAbslFlatHashMap< std::pair< OpIndex, int32_t >, Key > key_mapping_
ZoneIntrusiveSet< Key, GetActiveKeysIndex > active_keys_
const Block * current_block_
void OnNewKey(Key key, StoreObservability value)
Key map_to_key(OpIndex base, int32_t offset, uint8_t size)
GrowingBlockSidetable< std::optional< Snapshot > > block_to_snapshot_mapping_
void BeginBlock(const Block *block)
StoreObservability GetObservability(OpIndex base, int32_t offset, uint8_t size)
void MarkAllStoresAsGCObservable()
void MarkStoreAsUnobservable(OpIndex base, int32_t offset, uint8_t size)
ZoneVector< Snapshot > successor_snapshots_
MaybeRedundantStoresTable(const Graph &graph, Zone *zone)
void MarkAllStoresAsObservable()
void Seal(bool *snapshot_has_changed=nullptr)
void OnValueChange(Key key, StoreObservability old_value, StoreObservability new_value)
static constexpr MemoryRepresentation Uint64()
static constexpr OpIndex Invalid()
constexpr bool valid() const
constexpr bool valid() const
ZoneSet< OpIndex > * eliminable_stores_
void Run(ZoneSet< OpIndex > &eliminable_stores, ZoneMap< OpIndex, uint64_t > &mergeable_store_pairs)
RedundantStoreAnalysis(const Graph &graph, Zone *phase_zone)
void ProcessBlock(const Block &block)
OpIndex last_field_initialization_store_
ZoneMap< OpIndex, uint64_t > * mergeable_store_pairs_
MaybeRedundantStoresTable table_
ZoneMap< OpIndex, uint64_t > mergeable_store_pairs_
OpIndex REDUCE_INPUT_GRAPH Store(OpIndex ig_index, const StoreOp &store)
ZoneSet< OpIndex > eliminable_stores_
RedundantStoreAnalysis analysis_
#define COMPRESS_POINTERS_BOOL
#define TURBOSHAFT_REDUCER_BOILERPLATE(Name)
constexpr Vector< T > VectorOf(T *start, size_t size)
std::ostream & operator<<(std::ostream &os, PaddingSpace padding)
V8_INLINE const Operation & Get(const Graph &graph, OpIndex index)
base::SmallVector< Block *, 4 > SuccessorBlocks(const Block &block, const Graph &graph)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
#define DCHECK_LE(v1, v2)
#define DCHECK_NOT_NULL(val)
#define DCHECK_IMPLIES(v1, v2)
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
uint64_t make_uint64(uint32_t high, uint32_t low)
IndirectHandle< i::HeapObject > handle() const
static constexpr Kind TaggedBase()
IntrusiveSetIndex active_keys_index
IntrusiveSetIndex & operator()(Key key) const
bool can_read_mutable_memory() const
bool requires_consistent_heap() const
underlying_operation_t< Op > & Cast()
OpEffects Effects() const
OptionalOpIndex index() const