5#ifndef V8_COMPILER_TURBOSHAFT_MEMORY_OPTIMIZATION_REDUCER_H_
6#define V8_COMPILER_TURBOSHAFT_MEMORY_OPTIMIZATION_REDUCER_H_
36 if (isolate->roots_table().IsRootHandle(constant->handle(),
45 const Operation& input_op = graph->Get(input);
48 if (input_op.Is<PhiOp>()) return true;
49 return ValueNeedsWriteBarrier(graph, input_op, isolate);
63 op = &graph->Get(bitcast->input());
67 op = &graph->Get(binop->left());
124 if (allocation ==
nullptr)
return false;
125 if (state.last_allocation ==
nullptr)
return false;
127 if (state.last_allocation == allocation)
return true;
130 return it->second == state.last_allocation;
145 if (
v8_flags.disable_write_barriers)
return true;
147 std::stringstream str;
148 str <<
"MemoryOptimizationReducer could not remove write barrier for "
150 <<
input_graph.Index(store) <<
": " << store.ToString() <<
"\n";
151 FATAL(
"%s", str.str().c_str());
188#if V8_ENABLE_WEBASSEMBLY
189 bool is_wasm = info->IsWasm() || info->IsWasmBuiltin();
191 bool is_wasm =
false;
195 info->allocation_folding()
208 return Next::ReduceInputGraphStore(ig_index, store);
211 if (
analyzer_->skipped_write_barriers.count(ig_index)) {
212 __ Store(
__ MapToNewGraph(store.base()),
__ MapToNewGraph(store.index()),
213 __ MapToNewGraph(store.value()), store.kind, store.stored_rep,
215 store.element_size_log2,
216 store.maybe_initializing_or_transitioning,
217 store.indirect_pointer_tag());
221 return Next::ReduceInputGraphStore(ig_index, store);
233 top_address =
__ ExternalConstant(
235 ? ExternalReference::new_space_allocation_top_address(
isolate_)
236 : ExternalReference::old_space_allocation_top_address(
isolate_));
240#if V8_ENABLE_WEBASSEMBLY
242 int top_address_offset =
244 ? WasmTrustedInstanceData::kNewAllocationTopAddressOffset
245 : WasmTrustedInstanceData::kOldAllocationTopAddressOffset;
254 if (
analyzer_->IsFoldedAllocation(
__ current_operation_origin())) {
257 __ SetVariable(
top(type),
__ WordPtrAdd(
__ GetVariable(
top(type)), size));
258 __ StoreOffHeap(top_address,
__ GetVariable(
top(type)),
260 return __ BitcastWordPtrToHeapObject(
264 __ SetVariable(
top(type),
__ LoadOffHeap(top_address,
271 __ BuiltinCode(Builtin::kAllocateInYoungGeneration,
isolate_);
274 __ BuiltinCode(Builtin::kAllocateInOldGeneration,
isolate_);
277#if V8_ENABLE_WEBASSEMBLY
284 builtin = Builtin::kWasmAllocateInYoungGeneration;
286 builtin = Builtin::kWasmAllocateInOldGeneration;
288 static_assert(std::is_same<Smi, BuiltinPtr>(),
289 "BuiltinPtr must be Smi");
294 __ BuiltinCode(Builtin::kWasmAllocateInYoungGeneration,
isolate_);
297 __ BuiltinCode(Builtin::kWasmAllocateInOldGeneration,
isolate_);
305 Block* call_runtime =
__ NewBlock();
314 uint64_t constant_size{};
315 if (!
__ matcher().MatchIntegralWordConstant(
320 if (!constant_size) {
324 __ BitcastWordPtrToHeapObject(
__ WordPtrAdd(
329 __ GotoIfNot(
LIKELY(
__ UintPtrLessThan(new_top, limit)), call_runtime);
333 __ SetVariable(
top(type), new_top);
337 if (constant_size ||
__ Bind(call_runtime)) {
344 __ BindReachable(done);
349 if (
auto c =
analyzer_->ReservedSize(
__ current_operation_origin())) {
350 reservation_size =
__ UintPtrConstant(*c);
352 reservation_size =
size;
356 __ GotoIfNot(
__ UintPtrLessThan(
363 __ Branch(
__ UintPtrLessThan(
364 __ WordPtrAdd(
__ GetVariable(
top(type)), reservation_size),
370 if (
__ Bind(call_runtime)) {
373 __ SetVariable(
top(type),
374 __ WordPtrSub(
__ BitcastHeapObjectToWordPtr(allocated),
379 __ BindReachable(done);
382 __ SetVariable(
top(type),
__ WordPtrAdd(
__ GetVariable(
top(type)), size));
383 __ StoreOffHeap(top_address,
__ GetVariable(
top(type)),
385 return __ BitcastWordPtrToHeapObject(
391#ifdef V8_ENABLE_SANDBOX
408 shared_external_pointer_table_address_address(
411 :
__ ExternalConstant(
412 ExternalReference::external_pointer_table_address(
414 table =
__ LoadOffHeap(table_address,
418#if V8_ENABLE_WEBASSEMBLY
424 IsolateData::shared_external_pointer_table_offset());
431 IsolateData::external_pointer_table_offset() +
440 __ Word32ShiftRightLogical(
handle, kExternalPointerIndexShift);
441 V<Word64> pointer =
__ LoadOffHeap(table,
__ ChangeUint32ToUint64(index), 0,
450 if (tag_range.Size() == 1) {
455 V<Word32> tag =
__ TruncateWord64ToWord32(tag_bits);
456 V<Word32> expected_tag =
__ Word32Constant(tag_range.first);
467 __ BindReachable(done);
478 std::optional<Variable>
top_[2];
485 top_[
static_cast<int>(
type)].emplace(
502 limit_address =
__ ExternalConstant(
504 ? ExternalReference::new_space_allocation_limit_address(
isolate_)
505 : ExternalReference::old_space_allocation_limit_address(
510#if V8_ENABLE_WEBASSEMBLY
512 int limit_address_offset =
514 ? WasmTrustedInstanceData::kNewAllocationLimitAddressOffset
515 : WasmTrustedInstanceData::kOldAllocationLimitAddressOffset;
523 return limit_address;
#define REDUCE(operation)
#define REDUCE_INPUT_GRAPH(operation)
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
Isolate * isolate() const
static const int kExternalPointerTableBasePointerOffset
static constexpr bool IsImmortalImmovable(RootIndex root_index)
OpIndex REDUCE DecodeExternalPointer(OpIndex handle, ExternalPointerTagRange tag_range)
Variable top(AllocationType type)
V< WordPtr > GetLimitAddress(AllocationType type)
const TSCallDescriptor * allocate_builtin_descriptor_
std::optional< MemoryAnalyzer > analyzer_
V< HeapObject > REDUCE Allocate(V< WordPtr > size, AllocationType type)
std::optional< Variable > top_[2]
const TSCallDescriptor * AllocateBuiltinDescriptor()
V< None > REDUCE_INPUT_GRAPH Store(V< None > ig_index, const StoreOp &store)
static constexpr MemoryRepresentation UintPtr()
static constexpr MemoryRepresentation Uint64()
static constexpr RegisterRepresentation WordPtr()
static constexpr RegisterRepresentation Tagged()
static constexpr WordRepresentation WordPtr()
#define TURBOSHAFT_REDUCER_BOILERPLATE(Name)
Handle< SharedFunctionInfo > info
ZoneVector< RpoNumber > & result
bool any_of(const C &container, const P &predicate)
bool ValueNeedsWriteBarrier(const Graph *graph, const Operation &value, Isolate *isolate)
any_of(const Args &...) -> any_of< Args... >
const TSCallDescriptor * CreateAllocateBuiltinDescriptor(Zone *zone, Isolate *isolate)
const AllocateOp * UnwrapAllocate(const Graph *graph, const Operation *op)
V8_EXPORT_PRIVATE bool ShouldSkipOptimizationStep()
NumberConstant(std::numeric_limits< double >::quiet_NaN())) DEFINE_GETTER(EmptyStateValues
static const Operator * IntPtrConstant(CommonOperatorBuilder *common, intptr_t value)
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
bool TryCast(Tagged< From > value, Tagged< To > *out)
constexpr uint64_t kExternalPointerTagShift
constexpr int kMaxRegularHeapObjectSize
static V8_INLINE constexpr bool IsSharedExternalPointerType(ExternalPointerTagRange tag_range)
constexpr uint64_t kExternalPointerPayloadMask
constexpr ExternalPointerTagRange kAnyExternalPointerTagRange(kFirstExternalPointerTag, kLastExternalPointerTag)
constexpr uint64_t kExternalPointerTagMask
kWasmInternalFunctionIndirectPointerTag instance_data
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
V8_EXPORT_PRIVATE FlagValues v8_flags
static V8_INLINE constexpr bool ExternalPointerCanBeEmpty(ExternalPointerTagRange tag_range)
i::Address Load(i::Address address)
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
static constexpr Kind TaggedBase()
static constexpr Kind RawAligned()
bool operator!=(const BlockState &other)
const AllocateOp * last_allocation
std::optional< uint32_t > reserved_size
const Graph & input_graph
bool SkipWriteBarrier(const StoreOp &store)
void Process(const Operation &op)
void MergeCurrentStateIntoSuccessor(const Block *successor)
std::optional< uint32_t > ReservedSize(V< AnyOrNone > alloc)
void ProcessStore(const StoreOp &store)
FixedBlockSidetable< std::optional< BlockState > > block_states
ZoneAbslFlatHashMap< const AllocateOp *, uint32_t > reserved_size
void ProcessAllocation(const AllocateOp &alloc)
TurboshaftPipelineKind pipeline_kind
ZoneAbslFlatHashSet< V< None > > skipped_write_barriers
void ProcessBlockTerminator(const Operation &op)
bool IsFoldedAllocation(V< AnyOrNone > op)
AllocationFolding allocation_folding
ZoneAbslFlatHashMap< const AllocateOp *, const AllocateOp * > folded_into
MemoryAnalyzer(PipelineData *data, Zone *phase_zone, const Graph &input_graph, AllocationFolding allocation_folding, bool is_wasm)
bool IsPartOfLastAllocation(const Operation *op)
const underlying_operation_t< Op > * TryCast() const
#define V8_UNLIKELY(condition)