|
#define | FLAG_MODE_DEFINE_DEFAULTS |
|
#define | DEFINE_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_WEAK_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_WEAK_NEG_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_NEG_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_NEG_NEG_IMPLICATION(whenflag, thenflag) |
|
#define | FLAG_FULL(ftype, ctype, nam, def, cmt) |
|
#define | FLAG_READONLY(ftype, ctype, nam, def, cmt) |
|
#define | FLAG_ALIAS(ftype, ctype, alias, nam) |
|
#define | DEFINE_VALUE_IMPLICATION(whenflag, thenflag, value) |
|
#define | DEFINE_WEAK_VALUE_IMPLICATION(whenflag, thenflag, value) |
|
#define | DEFINE_GENERIC_IMPLICATION(whenflag, statement) |
|
#define | DEFINE_NEG_VALUE_IMPLICATION(whenflag, thenflag, value) |
|
#define | DEFINE_NEG_VALUE_VALUE_IMPLICATION(whenflag, whenvalue, thenflag, thenvalue) |
|
#define | DEFINE_MIN_VALUE_IMPLICATION(flag, min_value) |
|
#define | DEFINE_DISABLE_FLAG_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_REQUIREMENT(statement) |
|
#define | ENABLE_SPARKPLUG_BY_DEFAULT false |
|
#define | ARM_ARCH_DEFAULT "armv8" |
|
#define | ENABLE_LOG_COLOUR true |
|
#define | DEFINE_BOOL(nam, def, cmt) |
|
#define | DEFINE_BOOL_READONLY(nam, def, cmt) |
|
#define | DEFINE_MAYBE_BOOL(nam, cmt) |
|
#define | DEFINE_INT(nam, def, cmt) |
|
#define | DEFINE_UINT(nam, def, cmt) |
|
#define | DEFINE_UINT_READONLY(nam, def, cmt) |
|
#define | DEFINE_UINT64(nam, def, cmt) |
|
#define | DEFINE_FLOAT(nam, def, cmt) |
|
#define | DEFINE_SIZE_T(nam, def, cmt) |
|
#define | DEFINE_STRING(nam, def, cmt) |
|
#define | DEFINE_ALIAS_BOOL(alias, nam) |
|
#define | DEFINE_ALIAS_INT(alias, nam) |
|
#define | DEFINE_ALIAS_FLOAT(alias, nam) |
|
#define | DEFINE_ALIAS_SIZE_T(alias, nam) |
|
#define | DEFINE_ALIAS_STRING(alias, nam) |
|
#define | DEFINE_DEBUG_BOOL DEFINE_BOOL_READONLY |
|
#define | FLAG FLAG_FULL |
|
#define | DEFINE_EXPERIMENTAL_FEATURE(nam, cmt) |
|
#define | HARMONY_INPROGRESS_BASE(V) |
|
#define | JAVASCRIPT_INPROGRESS_FEATURES_BASE(V) |
|
#define | HARMONY_INPROGRESS(V) |
|
#define | JAVASCRIPT_INPROGRESS_FEATURES(V) |
|
#define | HARMONY_STAGED_BASE(V) |
|
#define | JAVASCRIPT_STAGED_FEATURES_BASE(V) |
|
#define | HARMONY_STAGED(V) |
|
#define | JAVASCRIPT_STAGED_FEATURES(V) |
|
#define | HARMONY_SHIPPING_BASE(V) |
|
#define | JAVASCRIPT_SHIPPING_FEATURES_BASE(V) |
|
#define | HARMONY_SHIPPING(V) |
|
#define | JAVASCRIPT_SHIPPING_FEATURES(V) |
|
#define | FLAG_INPROGRESS_FEATURES(id, description) |
|
#define | FLAG_STAGED_FEATURES(id, description) |
|
#define | FLAG_SHIPPING_FEATURES(id, description) |
|
#define | V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL false |
|
#define | V8_LAZY_SOURCE_POSITIONS_BOOL false |
|
#define | V8_LITE_MODE_BOOL false |
|
#define | V8_ALLOCATION_FOLDING_BOOL false |
|
#define | V8_DISABLE_WRITE_BARRIERS_BOOL false |
|
#define | V8_ENABLE_UNCONDITIONAL_WRITE_BARRIERS_BOOL false |
|
#define | V8_SINGLE_GENERATION_BOOL false |
|
#define | V8_ENABLE_CONSERVATIVE_STACK_SCANNING_BOOL false |
|
#define | V8_ENABLE_DIRECT_HANDLE_BOOL false |
|
#define | V8_ENABLE_LOCAL_OFF_STACK_CHECK_BOOL false |
|
#define | FUTURE_BOOL false |
|
#define | V8_JITLESS_BOOL false |
|
#define | V8_ALLOCATION_SITE_TRACKING_BOOL false |
|
#define | DEFAULT_SCAVENGER_MAX_NEW_SPACE_CAPACITY_MB 32 |
|
#define | FLAG FLAG_READONLY |
|
#define | FLAG FLAG_FULL |
|
#define | V8_ENABLE_TURBOFAN_BOOL false |
|
#define | DEFINE_SLOW_TRACING_BOOL DEFINE_BOOL_READONLY |
|
#define | DEFAULT_MAX_POLYMORPHIC_MAP_COUNT 4 |
|
#define | V8_SHORT_BUILTIN_CALLS_BOOL false |
|
#define | REGEXP_PEEPHOLE_OPTIMIZATION_BOOL true |
|
#define | V8_CET_SHADOW_STACK_BOOL false |
|
#define | V8_ENABLE_BLACK_ALLOCATED_PAGES_BOOL false |
|
#define | V8_ENABLE_STICKY_MARK_BITS_BOOL false |
|
#define | V8_MINOR_MS_CONCURRENT_MARKING_MIN_CAPACITY_DEFAULT 8 |
|
#define | FLAG FLAG_READONLY |
|
#define | FLAG FLAG_READONLY |
|
#define | FLAG FLAG_FULL |
|
#define | DEFAULT_PROF_SAMPLING_INTERVAL 1000 |
|
#define | DEFINE_PERF_PROF_BOOL(nam, cmt) |
|
#define | DEFINE_PERF_PROF_IMPLICATION(...) |
|
#define | DEFAULT_PERF_BASIC_PROF_PATH "/tmp" |
|
#define | DEFAULT_PERF_PROF_PATH "." |
|
#define | FLAG FLAG_READONLY |
|
#define | FLAG FLAG_FULL |
|
#define | FLAG FLAG_READONLY |
|
#define | LOG_FLAGS(V) |
|
#define | SET_IMPLICATIONS(V) |
|
#define | FLAG_MODE_META |
|
#define | DEFINE_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_WEAK_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_WEAK_NEG_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_NEG_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_NEG_NEG_IMPLICATION(whenflag, thenflag) |
|
#define | FLAG_FULL(ftype, ctype, nam, def, cmt) |
|
#define | FLAG_READONLY(ftype, ctype, nam, def, cmt) |
|
#define | FLAG_ALIAS(ftype, ctype, alias, nam) |
|
#define | DEFINE_VALUE_IMPLICATION(whenflag, thenflag, value) |
|
#define | DEFINE_WEAK_VALUE_IMPLICATION(whenflag, thenflag, value) |
|
#define | DEFINE_GENERIC_IMPLICATION(whenflag, statement) |
|
#define | DEFINE_NEG_VALUE_IMPLICATION(whenflag, thenflag, value) |
|
#define | DEFINE_NEG_VALUE_VALUE_IMPLICATION(whenflag, whenvalue, thenflag, thenvalue) |
|
#define | DEFINE_MIN_VALUE_IMPLICATION(flag, min_value) |
|
#define | DEFINE_DISABLE_FLAG_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_REQUIREMENT(statement) |
|
#define | ENABLE_SPARKPLUG_BY_DEFAULT false |
|
#define | ARM_ARCH_DEFAULT "armv8" |
|
#define | ENABLE_LOG_COLOUR true |
|
#define | DEFINE_BOOL(nam, def, cmt) |
|
#define | DEFINE_BOOL_READONLY(nam, def, cmt) |
|
#define | DEFINE_MAYBE_BOOL(nam, cmt) |
|
#define | DEFINE_INT(nam, def, cmt) |
|
#define | DEFINE_UINT(nam, def, cmt) |
|
#define | DEFINE_UINT_READONLY(nam, def, cmt) |
|
#define | DEFINE_UINT64(nam, def, cmt) |
|
#define | DEFINE_FLOAT(nam, def, cmt) |
|
#define | DEFINE_SIZE_T(nam, def, cmt) |
|
#define | DEFINE_STRING(nam, def, cmt) |
|
#define | DEFINE_ALIAS_BOOL(alias, nam) |
|
#define | DEFINE_ALIAS_INT(alias, nam) |
|
#define | DEFINE_ALIAS_FLOAT(alias, nam) |
|
#define | DEFINE_ALIAS_SIZE_T(alias, nam) |
|
#define | DEFINE_ALIAS_STRING(alias, nam) |
|
#define | DEFINE_DEBUG_BOOL DEFINE_BOOL_READONLY |
|
#define | FLAG FLAG_FULL |
|
#define | DEFINE_EXPERIMENTAL_FEATURE(nam, cmt) |
|
#define | HARMONY_INPROGRESS_BASE(V) |
|
#define | JAVASCRIPT_INPROGRESS_FEATURES_BASE(V) |
|
#define | HARMONY_INPROGRESS(V) |
|
#define | JAVASCRIPT_INPROGRESS_FEATURES(V) |
|
#define | HARMONY_STAGED_BASE(V) |
|
#define | JAVASCRIPT_STAGED_FEATURES_BASE(V) |
|
#define | HARMONY_STAGED(V) |
|
#define | JAVASCRIPT_STAGED_FEATURES(V) |
|
#define | HARMONY_SHIPPING_BASE(V) |
|
#define | JAVASCRIPT_SHIPPING_FEATURES_BASE(V) |
|
#define | HARMONY_SHIPPING(V) |
|
#define | JAVASCRIPT_SHIPPING_FEATURES(V) |
|
#define | FLAG_INPROGRESS_FEATURES(id, description) |
|
#define | FLAG_STAGED_FEATURES(id, description) |
|
#define | FLAG_SHIPPING_FEATURES(id, description) |
|
#define | V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL false |
|
#define | V8_LAZY_SOURCE_POSITIONS_BOOL false |
|
#define | V8_LITE_MODE_BOOL false |
|
#define | V8_ALLOCATION_FOLDING_BOOL false |
|
#define | V8_DISABLE_WRITE_BARRIERS_BOOL false |
|
#define | V8_ENABLE_UNCONDITIONAL_WRITE_BARRIERS_BOOL false |
|
#define | V8_SINGLE_GENERATION_BOOL false |
|
#define | V8_ENABLE_CONSERVATIVE_STACK_SCANNING_BOOL false |
|
#define | V8_ENABLE_DIRECT_HANDLE_BOOL false |
|
#define | V8_ENABLE_LOCAL_OFF_STACK_CHECK_BOOL false |
|
#define | FUTURE_BOOL false |
|
#define | V8_JITLESS_BOOL false |
|
#define | V8_ALLOCATION_SITE_TRACKING_BOOL false |
|
#define | DEFAULT_SCAVENGER_MAX_NEW_SPACE_CAPACITY_MB 32 |
|
#define | FLAG FLAG_READONLY |
|
#define | FLAG FLAG_FULL |
|
#define | V8_ENABLE_TURBOFAN_BOOL false |
|
#define | DEFINE_SLOW_TRACING_BOOL DEFINE_BOOL_READONLY |
|
#define | DEFAULT_MAX_POLYMORPHIC_MAP_COUNT 4 |
|
#define | V8_SHORT_BUILTIN_CALLS_BOOL false |
|
#define | REGEXP_PEEPHOLE_OPTIMIZATION_BOOL true |
|
#define | V8_CET_SHADOW_STACK_BOOL false |
|
#define | V8_ENABLE_BLACK_ALLOCATED_PAGES_BOOL false |
|
#define | V8_ENABLE_STICKY_MARK_BITS_BOOL false |
|
#define | V8_MINOR_MS_CONCURRENT_MARKING_MIN_CAPACITY_DEFAULT 8 |
|
#define | FLAG FLAG_READONLY |
|
#define | FLAG FLAG_READONLY |
|
#define | FLAG FLAG_FULL |
|
#define | DEFAULT_PROF_SAMPLING_INTERVAL 1000 |
|
#define | DEFINE_PERF_PROF_BOOL(nam, cmt) |
|
#define | DEFINE_PERF_PROF_IMPLICATION(...) |
|
#define | DEFAULT_PERF_BASIC_PROF_PATH "/tmp" |
|
#define | DEFAULT_PERF_PROF_PATH "." |
|
#define | FLAG FLAG_READONLY |
|
#define | FLAG FLAG_FULL |
|
#define | FLAG FLAG_READONLY |
|
#define | LOG_FLAGS(V) |
|
#define | SET_IMPLICATIONS(V) |
|
#define | ADD_JS_INPROGRESS_FLAG(name, desc) |
|
#define | ADD_JS_STAGED_FLAG(name, desc) |
|
#define | ADD_JS_SHIPPING_FLAG(name, desc) |
|
#define | FLAG_MODE_DEFINE_IMPLICATIONS |
|
#define | DEFINE_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_WEAK_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_WEAK_NEG_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_NEG_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_NEG_NEG_IMPLICATION(whenflag, thenflag) |
|
#define | DEFINE_VALUE_IMPLICATION(whenflag, thenflag, value) |
|
#define | DEFINE_WEAK_VALUE_IMPLICATION(whenflag, thenflag, value) |
|
#define | DEFINE_GENERIC_IMPLICATION(whenflag, statement) |
|
#define | DEFINE_REQUIREMENT(statement) |
|
#define | DEFINE_NEG_VALUE_IMPLICATION(whenflag, thenflag, value) |
|
#define | DEFINE_NEG_VALUE_VALUE_IMPLICATION(whenflag, whenvalue, thenflag, thenvalue) |
|
#define | DEFINE_MIN_VALUE_IMPLICATION(flag, min_value) |
|
#define | DEFINE_DISABLE_FLAG_IMPLICATION(whenflag, thenflag) |
|
#define | FLAG_FULL(ftype, ctype, nam, def, cmt) |
|
#define | FLAG_READONLY(ftype, ctype, nam, def, cmt) |
|
#define | FLAG_ALIAS(ftype, ctype, alias, nam) |
|
#define | ENABLE_SPARKPLUG_BY_DEFAULT false |
|
#define | ARM_ARCH_DEFAULT "armv8" |
|
#define | ENABLE_LOG_COLOUR true |
|
#define | DEFINE_BOOL(nam, def, cmt) |
|
#define | DEFINE_BOOL_READONLY(nam, def, cmt) |
|
#define | DEFINE_MAYBE_BOOL(nam, cmt) |
|
#define | DEFINE_INT(nam, def, cmt) |
|
#define | DEFINE_UINT(nam, def, cmt) |
|
#define | DEFINE_UINT_READONLY(nam, def, cmt) |
|
#define | DEFINE_UINT64(nam, def, cmt) |
|
#define | DEFINE_FLOAT(nam, def, cmt) |
|
#define | DEFINE_SIZE_T(nam, def, cmt) |
|
#define | DEFINE_STRING(nam, def, cmt) |
|
#define | DEFINE_ALIAS_BOOL(alias, nam) |
|
#define | DEFINE_ALIAS_INT(alias, nam) |
|
#define | DEFINE_ALIAS_FLOAT(alias, nam) |
|
#define | DEFINE_ALIAS_SIZE_T(alias, nam) |
|
#define | DEFINE_ALIAS_STRING(alias, nam) |
|
#define | DEFINE_DEBUG_BOOL DEFINE_BOOL_READONLY |
|
#define | FLAG FLAG_FULL |
|
#define | DEFINE_EXPERIMENTAL_FEATURE(nam, cmt) |
|
#define | HARMONY_INPROGRESS_BASE(V) |
|
#define | JAVASCRIPT_INPROGRESS_FEATURES_BASE(V) |
|
#define | HARMONY_INPROGRESS(V) |
|
#define | JAVASCRIPT_INPROGRESS_FEATURES(V) |
|
#define | HARMONY_STAGED_BASE(V) |
|
#define | JAVASCRIPT_STAGED_FEATURES_BASE(V) |
|
#define | HARMONY_STAGED(V) |
|
#define | JAVASCRIPT_STAGED_FEATURES(V) |
|
#define | HARMONY_SHIPPING_BASE(V) |
|
#define | JAVASCRIPT_SHIPPING_FEATURES_BASE(V) |
|
#define | HARMONY_SHIPPING(V) |
|
#define | JAVASCRIPT_SHIPPING_FEATURES(V) |
|
#define | FLAG_INPROGRESS_FEATURES(id, description) |
|
#define | FLAG_STAGED_FEATURES(id, description) |
|
#define | FLAG_SHIPPING_FEATURES(id, description) |
|
#define | V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL false |
|
#define | V8_LAZY_SOURCE_POSITIONS_BOOL false |
|
#define | V8_LITE_MODE_BOOL false |
|
#define | V8_ALLOCATION_FOLDING_BOOL false |
|
#define | V8_DISABLE_WRITE_BARRIERS_BOOL false |
|
#define | V8_ENABLE_UNCONDITIONAL_WRITE_BARRIERS_BOOL false |
|
#define | V8_SINGLE_GENERATION_BOOL false |
|
#define | V8_ENABLE_CONSERVATIVE_STACK_SCANNING_BOOL false |
|
#define | V8_ENABLE_DIRECT_HANDLE_BOOL false |
|
#define | V8_ENABLE_LOCAL_OFF_STACK_CHECK_BOOL false |
|
#define | FUTURE_BOOL false |
|
#define | V8_JITLESS_BOOL false |
|
#define | V8_ALLOCATION_SITE_TRACKING_BOOL false |
|
#define | DEFAULT_SCAVENGER_MAX_NEW_SPACE_CAPACITY_MB 32 |
|
#define | FLAG FLAG_READONLY |
|
#define | FLAG FLAG_FULL |
|
#define | V8_ENABLE_TURBOFAN_BOOL false |
|
#define | DEFINE_SLOW_TRACING_BOOL DEFINE_BOOL_READONLY |
|
#define | DEFAULT_MAX_POLYMORPHIC_MAP_COUNT 4 |
|
#define | V8_SHORT_BUILTIN_CALLS_BOOL false |
|
#define | REGEXP_PEEPHOLE_OPTIMIZATION_BOOL true |
|
#define | V8_CET_SHADOW_STACK_BOOL false |
|
#define | V8_ENABLE_BLACK_ALLOCATED_PAGES_BOOL false |
|
#define | V8_ENABLE_STICKY_MARK_BITS_BOOL false |
|
#define | V8_MINOR_MS_CONCURRENT_MARKING_MIN_CAPACITY_DEFAULT 8 |
|
#define | FLAG FLAG_READONLY |
|
#define | FLAG FLAG_READONLY |
|
#define | FLAG FLAG_FULL |
|
#define | DEFAULT_PROF_SAMPLING_INTERVAL 1000 |
|
#define | DEFINE_PERF_PROF_BOOL(nam, cmt) |
|
#define | DEFINE_PERF_PROF_IMPLICATION(...) |
|
#define | DEFAULT_PERF_BASIC_PROF_PATH "/tmp" |
|
#define | DEFAULT_PERF_PROF_PATH "." |
|
#define | FLAG FLAG_READONLY |
|
#define | FLAG FLAG_FULL |
|
#define | FLAG FLAG_READONLY |
|
#define | LOG_FLAGS(V) |
|
#define | SET_IMPLICATIONS(V) |
|
#define | CONTRADICTION(flag1, flag2) |
|
#define | RESET_WHEN_FUZZING(flag) |
|
#define | RESET_WHEN_CORRECTNESS_FUZZING(flag) |
|
|
| v8::internal::DEFINE_BOOL (experimental, false, "Indicates that V8 is running with experimental features enabled. " "This flag is typically not set explicitly but instead enabled as " "an implication of other flags which enable experimental features.") DEFINE_BOOL(abort_on_contradictory_flags |
|
Disallow flags or implications overriding each other | v8::internal::DEFINE_BOOL (exit_on_contradictory_flags, false, "Exit with return code 0 on contradictory flags.") DEFINE_WEAK_IMPLICATION(exit_on_contradictory_flags |
|
Disallow flags or implications overriding each other abort_on_contradictory_flags | v8::internal::DEFINE_BOOL (allow_overwriting_for_next_flag, false, "temporary disable flag contradiction to allow overwriting just " "the next flag") DEFINE_BOOL(builtin_subclassing |
|
Disallow flags or implications overriding each other abort_on_contradictory_flags subclassing support in built in methods | v8::internal::DEFINE_BOOL (enable_sharedarraybuffer_per_context, false, "enable the SharedArrayBuffer constructor per context") DEFINE_BOOL(stress_snapshot |
|
| v8::internal::DEFINE_BOOL (lite_mode, V8_LITE_MODE_BOOL, "enables trade-off of performance for memory savings") DEFINE_BOOL_READONLY(enable_allocation_folding |
|
| v8::internal::DEFINE_BOOL_READONLY (disable_write_barriers, V8_DISABLE_WRITE_BARRIERS_BOOL, "disable write barriers when GC is non-incremental " "and heap contains single generation.") DEFINE_BOOL_READONLY(enable_unconditional_write_barriers |
|
| v8::internal::DEFINE_BOOL_READONLY (single_generation, V8_SINGLE_GENERATION_BOOL, "allocate all objects from young generation to old generation") DEFINE_BOOL_READONLY(conservative_stack_scanning |
|
use conservative stack scanning | v8::internal::DEFINE_IMPLICATION (conservative_stack_scanning, scavenger_conservative_object_pinning) DEFINE_BOOL_READONLY(direct_handle |
|
use conservative stack scanning use direct handles with conservative stack scanning | v8::internal::DEFINE_EXPERIMENTAL_FEATURE (scavenger_conservative_object_pinning, "Objects reachable from the native stack during " "scavenge will be pinned and " "won't move.") DEFINE_BOOL(stress_scavenger_conservative_object_pinning |
|
use conservative stack scanning use direct handles with conservative stack scanning Treat some precise references as conservative references to stress test object pinning in Scavenger | v8::internal::DEFINE_IMPLICATION (stress_scavenger_conservative_object_pinning, scavenger_conservative_object_pinning) DEFINE_NEG_IMPLICATION(stress_scavenger_conservative_object_pinning |
|
use conservative stack scanning use direct handles with conservative stack scanning Treat some precise references as conservative references to stress test object pinning in Scavenger minor_gc_task | v8::internal::DEFINE_VALUE_IMPLICATION (stress_scavenger_conservative_object_pinning, scavenger_max_new_space_capacity_mb, 1u) DEFINE_BOOL(stress_scavenger_conservative_object_pinning_random |
|
use conservative stack scanning use direct handles with conservative stack scanning Treat some precise references as conservative references to stress test object pinning in Scavenger minor_gc_task Enables random stressing of object pinning in such that each GC would randomly pick a subset of the precise references to treat conservatively | v8::internal::DEFINE_IMPLICATION (stress_scavenger_conservative_object_pinning_random, stress_scavenger_conservative_object_pinning) DEFINE_BOOL(scavenger_precise_object_pinning |
|
use conservative stack scanning use direct handles with conservative stack scanning Treat some precise references as conservative references to stress test object pinning in Scavenger minor_gc_task Enables random stressing of object pinning in such that each GC would randomly pick a subset of the precise references to treat conservatively Objects reachable from handles during scavenge will be pinned and won t move | v8::internal::DEFINE_BOOL (precise_object_pinning, false, "Objects reachable from handles during GC will be pinned and won't move.") DEFINE_BOOL(scavenger_promote_quarantined_pages |
|
| v8::internal::DEFINE_BOOL_READONLY (local_off_stack_check, V8_ENABLE_LOCAL_OFF_STACK_CHECK_BOOL, "check for off-stack allocation of v8::Local") DEFINE_BOOL(future |
|
Implies all staged features that we want to ship in the not too far future | v8::internal::DEFINE_BOOL (force_emit_interrupt_budget_checks, false, "force emit tier-up logic from all non-turbofan code, even if it " "is the top enabled tier") DEFINE_BOOL_READONLY(maglev_future |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future | v8::internal::DEFINE_BOOL_READONLY (optimize_on_next_call_optimizes_to_maglev, false, "make OptimizeFunctionOnNextCall optimize to maglev instead of turbofan") DEFINE_BOOL(maglev_inlining |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler | v8::internal::DEFINE_BOOL (maglev_loop_peeling, true, "enable loop peeling in the maglev optimizing compiler") DEFINE_BOOL(maglev_optimistic_peeled_loops |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for | v8::internal::loops (loop SPeeling) in the " "maglev optimizing compiler") DEFINE_INT(maglev_loop_peeling_max_size |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler | v8::internal::DEFINE_INT (maglev_loop_peeling_max_size_cumulative, 900, "max cumulative size for loop peeling in the maglev optimizing compiler") DEFINE_BOOL(maglev_deopt_data_on_background |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread | v8::internal::DEFINE_BOOL (maglev_build_code_on_background, true, "Generate code on background thread") DEFINE_WEAK_IMPLICATION(maglev_build_code_on_background |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background | v8::internal::DEFINE_BOOL (maglev_destroy_on_background, true, "Destroy compilation jobs on background thread") DEFINE_BOOL(maglev_inline_api_calls |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code | v8::internal::DEFINE_BOOL (maglev_cons_string_elision, false, "Native support for cons strings and their elision in maglev.") DEFINE_BOOL(maglev_pretenure_store_values |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites | v8::internal::DEFINE_UINT (concurrent_maglev_max_threads, 2, "max number of threads that concurrent Maglev can use (0 for unbounded)") DEFINE_BOOL(concurrent_maglev_high_priority_threads |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev | v8::internal::DEFINE_INT (max_maglev_inline_depth, 1, "max depth of functions that Maglev will inline excl. small functions") DEFINE_INT(max_maglev_hard_inline_depth |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev max depth of functions that Maglev will incl small functions | v8::internal::DEFINE_INT (max_maglev_inlined_bytecode_size, 460, "maximum size of bytecode for a single inlining") DEFINE_INT(max_maglev_inlined_bytecode_size_cumulative |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev max depth of functions that Maglev will incl small functions maximum cumulative size of bytecode considered for inlining excl small functions | v8::internal::DEFINE_INT (max_maglev_inlined_bytecode_size_small, 27, "maximum size of bytecode considered for small function inlining") DEFINE_FLOAT(min_maglev_inlining_frequency |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev max depth of functions that Maglev will incl small functions maximum cumulative size of bytecode considered for inlining excl small functions minimum frequency for inlining | v8::internal::DEFINE_WEAK_VALUE_IMPLICATION (turbofan, max_maglev_inlined_bytecode_size_cumulative, 920) DEFINE_BOOL(maglev_reuse_stack_slots |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev max depth of functions that Maglev will incl small functions maximum cumulative size of bytecode considered for inlining excl small functions minimum frequency for inlining reuse stack slots in the maglev optimizing compiler | v8::internal::DEFINE_BOOL (maglev_untagged_phis, true, "enable phi untagging in the maglev optimizing compiler") DEFINE_BOOL(maglev_hoist_osr_value_phi_untagging |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev max depth of functions that Maglev will incl small functions maximum cumulative size of bytecode considered for inlining excl small functions minimum frequency for inlining reuse stack slots in the maglev optimizing compiler enable phi untagging to hoist untagging of osr values | v8::internal::DEFINE_EXPERIMENTAL_FEATURE (maglev_speculative_hoist_phi_untagging, "enable phi untagging to hoist untagging of loop phi inputs (could " "still cause deopt loops)") DEFINE_EXPERIMENTAL_FEATURE(maglev_non_eager_inlining |
|
Implies all staged features that we want to ship in the not too far future enable maglev features that we want to ship in the not too far future enable inlining in the maglev optimizing compiler enable aggressive optimizations for max loop size for loop peeling in the maglev optimizing compiler Generate deopt data on background thread maglev_deopt_data_on_background Inline CallApiCallback builtin into generated code Recursively pretenure values which are stored into pretenured allocation sites use high priority compiler threads for concurrent Maglev max depth of functions that Maglev will incl small functions maximum cumulative size of bytecode considered for inlining excl small functions minimum frequency for inlining reuse stack slots in the maglev optimizing compiler enable phi untagging to hoist untagging of osr values enable Maglev non eager inlining | v8::internal::DEFINE_EXPERIMENTAL_FEATURE (turbolev_non_eager_inlining, "enable Turbolev non-eager inlining") DEFINE_BOOL(maglev_inlining_following_eager_order |
|
other heap size | v8::internal::flags (e.g. initial_heap_size) take precedence") DEFINE_SIZE_T( max_shared_heap_size |
|
other heap size max size of the shared | v8::internal::heap (in Mbytes) |
|
other heap size generate builtins concurrently on separate threads in mksnapshot | v8::internal::DEFINE_BOOL (concurrent_recompilation, true, "optimizing hot functions asynchronously on a separate thread") DEFINE_BOOL(trace_concurrent_recompilation |
|
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation | v8::internal::DEFINE_INT (concurrent_recompilation_queue_length, 8, "the length of the concurrent compilation queue") DEFINE_INT(concurrent_recompilation_delay |
|
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms | v8::internal::DEFINE_BOOL (concurrent_recompilation_front_running, true, "move compile jobs to the front if recompilation is requested " "multiple times") DEFINE_UINT(concurrent_turbofan_max_threads |
|
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can | v8::internal::use (0 for unbounded)") DEFINE_BOOL( stress_concurrent_inlining |
|
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can create additional concurrent optimization jobs but throw away result | v8::internal::DEFINE_WEAK_VALUE_IMPLICATION (stress_concurrent_inlining, invocation_count_for_turbofan, 150) DEFINE_BOOL(maglev_overwrite_budget |
|
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can create additional concurrent optimization jobs but throw away result whether maglev resets the interrupt budget | v8::internal::DEFINE_WEAK_VALUE_IMPLICATION (maglev_overwrite_budget, invocation_count_for_turbofan, 10000) DEFINE_BOOL(maglev_overwrite_osr_budget |
|
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can create additional concurrent optimization jobs but throw away result whether maglev resets the interrupt budget whether maglev resets the OSR interrupt budget | v8::internal::DEFINE_WEAK_VALUE_IMPLICATION (maglev_overwrite_osr_budget, invocation_count_for_osr, 800) DEFINE_BOOL(stress_concurrent_inlining_attach_code |
|
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can create additional concurrent optimization jobs but throw away result whether maglev resets the interrupt budget whether maglev resets the OSR interrupt budget create additional concurrent optimization jobs | v8::internal::DEFINE_IMPLICATION (stress_concurrent_inlining_attach_code, stress_concurrent_inlining) DEFINE_INT(max_serializer_nesting |
|
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can create additional concurrent optimization jobs but throw away result whether maglev resets the interrupt budget whether maglev resets the OSR interrupt budget create additional concurrent optimization jobs maximum levels for nesting child serializers | v8::internal::DEFINE_BOOL (trace_heap_broker_verbose, false, "trace the heap broker verbosely (all reports)") DEFINE_BOOL(trace_heap_broker |
|
other heap size generate builtins concurrently on separate threads in mksnapshot track concurrent recompilation artificial compilation delay in ms max number of threads that concurrent Turbofan can create additional concurrent optimization jobs but throw away result whether maglev resets the interrupt budget whether maglev resets the OSR interrupt budget create additional concurrent optimization jobs maximum levels for nesting child serializers trace the heap | v8::internal::broker (reports on missing data only)") DEFINE_INT(deopt_every_n_times |
|
| v8::internal::DEFINE_BOOL (stress_turbo_late_spilling, false, "optimize placement of all spill instructions, not just loop-top phis") DEFINE_BOOL(turbo_wasm_address_reassociation |
|
refactor address components for immediate indexing | v8::internal::DEFINE_BOOL (concurrent_turbo_tracing, false, "allow concurrent compilation to happen in combination with " "trace-turbo* flags") DEFINE_BOOL(optimize_maglev_optimizes_to_turbofan |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev | v8::internal::DEFINE_STRING (trace_turbo_path, nullptr, "directory to dump generated TurboFan IR to") DEFINE_STRING(trace_turbo_filter |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation | v8::internal::DEFINE_STRING (trace_turbo_file_prefix, "turbo", "trace turbo graph to a file with given prefix") DEFINE_STRING(trace_turbo_cfg_file |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg | v8::internal::graph (for C1 visualizer) to a given file name") DEFINE_SLOW_TRACING_BOOL(trace_turbo_trimming |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer | v8::internal::DEFINE_SLOW_TRACING_BOOL (trace_turbo_jt, false, "trace TurboFan's jump threading") DEFINE_SLOW_TRACING_BOOL(trace_turbo_ceq |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence | v8::internal::DEFINE_SLOW_TRACING_BOOL (trace_turbo_loop, false, "trace TurboFan's loop optimizations") DEFINE_SLOW_TRACING_BOOL(trace_turbo_alloc |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator | v8::internal::DEFINE_SLOW_TRACING_BOOL (trace_representation, false, "trace representation types") DEFINE_BOOL(trace_turbo_stack_accesses |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run | v8::internal::time (x64 only)") DEFINE_BOOL(fuzzing_and_concurrent_recompilation |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation | v8::internal::DEFINE_NEG_NEG_IMPLICATION (concurrent_recompilation, fuzzing_and_concurrent_recompilation) DEFINE_DISABLE_FLAG_IMPLICATION(fuzzing_and_concurrent_recompilation |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo | v8::internal::DEFINE_DISABLE_FLAG_IMPLICATION (fuzzing_and_concurrent_recompilation, trace_turbo_graph) DEFINE_DISABLE_FLAG_IMPLICATION(fuzzing_and_concurrent_recompilation |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled | v8::internal::DEFINE_DISABLE_FLAG_IMPLICATION (fuzzing_and_concurrent_recompilation, trace_turbo_reduction) DEFINE_DISABLE_FLAG_IMPLICATION(fuzzing_and_concurrent_recompilation |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses | v8::internal::DEFINE_STRING (turbo_verify_machine_graph, nullptr, "verify TurboFan machine graph before instruction selection") DEFINE_BOOL_READONLY(verify_csa |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs | v8::internal::DEFINE_STRING (csa_trap_on_node, nullptr, "trigger break point when a node with given id is created in " "given stub. The format is: StubName,NodeId") DEFINE_BOOL_READONLY(fixed_array_bounds_checks |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks | v8::internal::DEFINE_BOOL (turbo_stats_nvp, false, "print TurboFan statistics in machine-readable format") DEFINE_BOOL(turbo_stats_wasm |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations | v8::internal::DEFINE_INT (max_inlined_bytecode_size, 460, "maximum size of bytecode for a single inlining") DEFINE_INT(max_inlined_bytecode_size_cumulative |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining | v8::internal::DEFINE_INT (max_inlined_bytecode_size_absolute, 4600, "maximum absolute size of bytecode considered for inlining") DEFINE_FLOAT(reserve_inline_budget_scale_factor |
|
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs enable FixedArray bounds checks print TurboFan statistics of wasm compilations maximum cumulative size of bytecode considered for inlining scale factor of bytecode size used to calculate the inlining budget | v8::internal::DEFINE_INT (max_inlined_bytecode_size_small, 27, "maximum size of bytecode considered for small function inlining") DEFINE_INT(max_optimized_bytecode_size |
|
too high values may cause the compiler to | v8::internal::hit (release) assertions") DEFINE_BOOL(stress_inline |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible | v8::internal::DEFINE_VALUE_IMPLICATION (stress_inline, max_inlined_bytecode_size_cumulative, 999999) DEFINE_VALUE_IMPLICATION(stress_inline |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible | v8::internal::DEFINE_BOOL (turbo_inline_array_builtins, true, "inline array builtins in TurboFan code") DEFINE_BOOL(maglev_escape_analysis |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape | v8::internal::DEFINE_EXPERIMENTAL_FEATURE (maglev_object_tracking, "track object changes to avoid escaping them") DEFINE_BOOL(trace_maglev_object_tracking |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects | v8::internal::DEFINE_WEAK_IMPLICATION (trace_maglev_graph_building, trace_maglev_object_tracking) DEFINE_BOOL_READONLY(turbo_string_builder |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder | v8::internal::DEFINE_BOOL (log_or_trace_osr, false, "internal helper flag, please use --trace-osr instead.") DEFINE_BOOL(analyze_environment_liveness |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values | v8::internal::DEFINE_BOOL (trace_environment_liveness, false, "trace liveness of local variable slots") DEFINE_BOOL(trace_turbo_load_elimination |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination | v8::internal::DEFINE_BOOL (turbo_profiling_verbose, false, "enable basic block profiling in TurboFan, and include each " "function's schedule and disassembly in the output") DEFINE_STRING(turbo_profiling_output |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this | v8::internal::file (requires that V8 was built with v8_enable_builtins_profiling=true)") DEFINE_BOOL(reorder_builtins |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot | v8::internal::DEFINE_BOOL (abort_on_bad_builtin_profile_data, false, "flag for mksnapshot, abort if builtins profile can't be applied") DEFINE_BOOL(warn_about_builtin_profile_data |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data | v8::internal::DEFINE_STRING (dump_builtins_hashes_to_file, nullptr, "flag for mksnapshot, dump CSA builtins graph hashes to this file") DEFINE_BOOL(turbo_verify_allocation |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan | v8::internal::DEFINE_BOOL (turbo_instruction_scheduling, false, "enable instruction scheduling in TurboFan") DEFINE_BOOL(turbo_stress_instruction_scheduling |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking | v8::internal::DEFINE_IMPLICATION (turbo_stress_instruction_scheduling, turbo_instruction_scheduling) DEFINE_BOOL(turbo_store_elimination |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan | v8::internal::DEFINE_BOOL_READONLY (turbo_typer_hardening, true, "extra bounds checks to protect against some known typer " "mismatch exploit techniques (best effort)") DEFINE_BOOL_READONLY(turbo_rewrite_far_jumps |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near | v8::internal::jumps (ia32, x64)") DEFINE_BOOL( stress_gc_during_compilation |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode | v8::internal::DEFINE_BOOL_READONLY (turbo_compress_frame_translations, false, "compress deoptimization frame translations (experimental)") DEFINE_BOOL(turbo_inline_js_wasm_calls |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm | v8::internal::calls (specifically:inline JS-to-Wasm wrappers and then "
"the body of the Wasm function, if applicable)") DEFINE_BOOL(turbo_optimize_inlined_js_wasm_wrappers |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional | v8::internal::optimizations (especially load-elimination) on " "inlined JS-to-Wasm wrappers") DEFINE_NEG_NEG_IMPLICATION(turbo_inline_js_wasm_calls |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers | v8::internal::DEFINE_BOOL (turbo_optimize_math_minmax, true, "optimize call math.min/max with double array") DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering | v8::internal::DEFINE_BOOL (turboshaft_enable_debug_features, false, "enables Turboshaft's DebugPrint, StaticAssert and " "CheckTurboshaftTypeOf operations") DEFINE_BOOL(turboshaft_wasm_load_elimination |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination | v8::internal::DEFINE_EXPERIMENTAL_FEATURE (turboshaft_wasm_in_js_inlining, "inline Wasm code into JS functions via Turboshaft (instead of via " "TurboFan). Only the Wasm code is inlined in Turboshaft, the JS-to-Wasm " "wrappers are still inlined in TurboFan. For controlling whether to inline " "at all, see --turbo-inline-js-wasm-calls.") DEFINE_BOOL(turboshaft_load_elimination |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS | v8::internal::DEFINE_BOOL (turboshaft_loop_unrolling, true, "enable Turboshaft's loop unrolling") DEFINE_BOOL(turboshaft_string_concat_escape_analysis |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation | v8::internal::DEFINE_EXPERIMENTAL_FEATURE (turboshaft_typed_optimizations, "enable an additional Turboshaft phase that " "performs optimizations based on type information") DEFINE_BOOL(turbolev |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use | v8::internal::Turbolev (≈ Maglev+Turboshaft combined) as the 4th tier " "compiler instead of Turbofan") DEFINE_EXPERIMENTAL_FEATURE( turbolev_future |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future | v8::internal::DEFINE_BOOL (typed_array_length_loading, true, "Enable specializing loading the TypedArray length in Maglev / Turbofan") DEFINE_BOOL_READONLY(turboshaft_trace_reduction |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps | v8::internal::DEFINE_BOOL_READONLY (turboshaft_trace_emitted, false, "trace emitted Turboshaft instructions") DEFINE_BOOL_READONLY(turboshaft_trace_intermediate_reductions |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps | v8::internal::DEFINE_BOOL (profile_guided_optimization_for_empty_feedback_vector, true, "profile guided optimization for empty feedback vector") DEFINE_INT(invocation_count_for_early_optimization |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization | v8::internal::DEFINE_INT (invocation_count_for_maglev_with_delay, 600, "invocation count for maglev for functions which according to " "profile_guided_optimization are likely to deoptimize before " "reaching this invocation count") DEFINE_BOOL(optimize_for_size |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed | v8::internal::DEFINE_BOOL (reopt_after_lazy_deopts, true, "Immediately re-optimize code after some lazy deopts") DEFINE_INT(stress_sampling_allocation_profiler |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval | v8::internal::DEFINE_BOOL (lazy_new_space_shrinking, false, "Enables the lazy new space shrinking strategy") DEFINE_SIZE_T(min_semi_space_size |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi | v8::internal::space (in MBytes) |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces | v8::internal::DEFINE_SIZE_T (max_semi_space_size, 0, "max size of a semi-space (in MBytes), the new space consists of " "two semi-spaces") DEFINE_SIZE_T(max_heap_size |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after | v8::internal::random (0, X) V8 allocations. It override s " "gc_interval.") DEFINE_INT(cppgc_random_gc_interval |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections | v8::internal::DEFINE_BOOL (trace_gc, false, "print one trace line following each garbage collection") DEFINE_BOOL(trace_gc_nvp |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed | v8::internal::DEFINE_BOOL (incremental_marking_start_user_visible, true, "Starts incremental marking with kUserVisible priority.") DEFINE_INT(incremental_marking_soft_trigger |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects | v8::internal::DEFINE_UINT (minor_gc_task_trigger, 80, "minor GC task trigger in percent of the current heap limit") DEFINE_BOOL(minor_gc_task_with_lower_priority |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority | v8::internal::DEFINE_EXPERIMENTAL_FEATURE (cppgc_young_generation, "run young generation garbage collections in Oilpan") DEFINE_INT(concurrent_marking_max_worker_num |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads | v8::internal::DEFINE_BOOL (concurrent_array_buffer_sweeping, true, "concurrently sweep array buffers") DEFINE_BOOL(stress_concurrent_allocation |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory | v8::internal::DEFINE_INT (ephemeron_fixpoint_iterations, 10, "number of fixpoint iterations it takes to switch to linear " "ephemeron algorithm") DEFINE_NEG_NEG_IMPLICATION(concurrent_sweeping |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping | v8::internal::DEFINE_BOOL (parallel_pointer_update, true, "use parallel pointer update during compaction") DEFINE_BOOL(parallel_weak_ref_clearing |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause | v8::internal::DEFINE_BOOL (detect_ineffective_gcs_near_heap_limit, true, "trigger out-of-memory failure to avoid GC storm near heap limit") DEFINE_BOOL(trace_incremental_marking |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking | v8::internal::DEFINE_BOOL (track_gc_object_stats, false, "track object counts and memory usage") DEFINE_BOOL(trace_gc_object_stats |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage | v8::internal::DEFINE_GENERIC_IMPLICATION (trace_zone_stats, TracingFlags::zone_stats.store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_SIZE_T(zone_stats_tolerance |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount | v8::internal::DEFINE_GENERIC_IMPLICATION (trace_zone_type_stats, TracingFlags::zone_stats.store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(track_gc_object_stats |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats | v8::internal::store (v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected | v8::internal::DEFINE_BOOL (trace_detached_contexts, false, "trace native contexts that are expected to be garbage collected") DEFINE_BOOL_READONLY(verify_heap |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC | v8::internal::DEFINE_BOOL (memory_reducer_respects_frozen_state, false, "don't schedule another GC when we are frozen") DEFINE_BOOL(memory_reducer_favors_memory |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag | v8::internal::DEFINE_BOOL (memory_reducer_for_small_heaps, true, "use memory reducer for small heaps") DEFINE_INT(memory_reducer_gc_count |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled | v8::internal::DEFINE_BOOL (external_memory_accounted_in_global_limit, false, "External memory limits are computed as part of global limits in v8 Heap.") DEFINE_BOOL(gc_speed_uses_counters |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters | v8::internal::DEFINE_INT (heap_growing_percent, 0, "specifies heap growing factor as (1 + heap_growing_percent/100)") DEFINE_BOOL(compact |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics | v8::internal::DEFINE_BOOL (compact_code_space, true, "Perform code space compaction on full collections.") DEFINE_BOOL(compact_on_every_full_gc |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC | v8::internal::DEFINE_BOOL (compact_with_stack, true, "Perform compaction when finalizing a full GC with stack") DEFINE_BOOL(compact_code_space_with_stack |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack | v8::internal::DEFINE_BOOL (shortcut_strings_with_stack, true, "Shortcut Strings during GC with stack") DEFINE_BOOL(stress_compaction |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects | v8::internal::DEFINE_BOOL (stress_compaction_random, false, "Stress GC compaction by selecting random percent of pages as " "evacuation candidates. Overrides stress_compaction.") DEFINE_BOOL(flush_baseline_code |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently | v8::internal::DEFINE_BOOL (flush_bytecode, true, "flush of bytecode when it has not been executed recently") DEFINE_BOOL(flush_code_based_on_time |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age | v8::internal::DEFINE_BOOL (flush_code_based_on_tab_visibility, false, "Flush code when tab goes into the background.") DEFINE_BOOL(use_marking_progress_bar |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active | v8::internal::DEFINE_BOOL (stress_per_context_marking_worklist, false, "Use per-context worklist for marking") DEFINE_BOOL(stress_incremental_marking |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often | v8::internal::DEFINE_BOOL (fuzzer_gc_analysis, false, "prints number of allocations and enables analysis mode for gc " "fuzz testing, e.g. --stress-marking, --stress-scavenge") DEFINE_INT(stress_marking |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and | v8::internal::X (inclusive) percent " "of the regular marking start limit") DEFINE_INT(stress_scavenge |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible | v8::internal::DEFINE_BOOL (parallel_reclaim_unmodified_wrappers, true, "reclaim wrapper objects in parallel") DEFINE_BOOL(gc_experiment_less_compaction |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode | v8::internal::DEFINE_INT (gc_memory_reducer_start_delay_ms, 8000, "Delay before memory reducer start") DEFINE_BOOL(concurrent_marking_high_priority_threads |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking | v8::internal::DEFINE_BOOL (randomize_all_allocations, false, "randomize virtual memory reservations by ignoring any hints " "passed when allocating pages") DEFINE_BOOL(manual_evacuation_candidates_selection |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates | v8::internal::pages (requires --stress_compaction).") DEFINE_BOOL(cppheap_incremental_marking |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap | v8::internal::DEFINE_BOOL (cppheap_concurrent_marking, false, "use concurrent marking for CppHeap") DEFINE_NEG_NEG_IMPLICATION(cppheap_incremental_marking |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking | v8::internal::DEFINE_BOOL (memory_balancer, false, "use membalancer, " "a new heap limit balancing algorithm") DEFINE_FLOAT(memory_balancer_c_value |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses | v8::internal::DEFINE_BOOL (enable_sse4_1, true, "enable use of SSE4.1 instructions if available") DEFINE_BOOL(enable_sse4_2 |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available | v8::internal::DEFINE_BOOL (enable_sahf, true, "enable use of SAHF instruction if available (X64 only)") DEFINE_BOOL(enable_avx_vnni |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available | v8::internal::DEFINE_BOOL (enable_avx_vnni_int8, true, "enable use of AVX-VNNI-INT8 instructions if available") DEFINE_BOOL(enable_popcnt |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available | v8::internal::DEFINE_STRING (arm_arch, ARM_ARCH_DEFAULT, "generate instructions for the selected ARM architecture if " "available: armv6, armv7, armv7+sudiv or armv8") DEFINE_BOOL(force_long_branches |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long | v8::internal::mode (MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant | v8::internal::pools (x64 only)") DEFINE_STRING(sim_arm64_optional_features |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs | v8::internal::DEFINE_BOOL (enable_source_at_csa_bind, false, "Include source information in the binary at CSA bind locations.") DEFINE_BOOL(enable_regexp_unaligned_accesses |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine | v8::internal::DEFINE_BOOL (stress_background_compile, false, "stress test parsing on background") DEFINE_BOOL(concurrent_cache_deserialization |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background | v8::internal::DEFINE_BOOL (merge_background_deserialized_script_with_compilation_cache, true, "After deserializing code cache data on a background thread, merge it into " "an existing Script if one is found in the Isolate compilation cache") DEFINE_BOOL(experimental_embedder_instance_types |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background enable type checks based on instance types provided by the embedder | v8::internal::DEFINE_STRING (expose_gc_as, nullptr, "expose gc extension under the specified name") DEFINE_BOOL(expose_externalize_string |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background enable type checks based on instance types provided by the embedder expose externalize string extension | v8::internal::DEFINE_BOOL (expose_ignition_statistics, false, "expose ignition-statistics extension (requires building with " "v8_enable_ignition_dispatch_counting)") DEFINE_BOOL(builtins_in_stack_traces |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background enable type checks based on instance types provided by the embedder expose externalize string extension show built in functions in stack traces | v8::internal::DEFINE_BOOL (experimental_stack_trace_frames, false, "enable experimental frames (API/Builtins) and stack trace layout") DEFINE_BOOL(disallow_code_generation_from_strings |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background enable type checks based on instance types provided by the embedder expose externalize string extension show built in functions in stack traces disallow eval and friends | v8::internal::DEFINE_STRING (expose_cputracemark_as, nullptr, "expose cputracemark extension under the specified name") DEFINE_BOOL(experimental_report_exceptions_from_callbacks |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background enable type checks based on instance types provided by the embedder expose externalize string extension show built in functions in stack traces disallow eval and friends Notify Api callback about exceptions thrown in Api callbacks | v8::internal::DEFINE_BOOL (allow_unsafe_function_constructor, false, "allow invoking the function constructor without security checks") DEFINE_BOOL(test_small_max_function_context_stub_size |
|
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible less compaction in non memory reducing mode use high priority threads for concurrent Marking Test mode only flag It allows an unit test to select evacuation candidates use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant enable optional features on the simulator for enable mitigation for Intel JCC erratum on affected CPUs enable unaligned accesses for the regexp engine enable deserializing code caches on background enable type checks based on instance types provided by the embedder expose externalize string extension show built in functions in stack traces disallow eval and friends Notify Api callback about exceptions thrown in Api callbacks enable testing the function context size overflow path by making the maximum size smaller | v8::internal::DEFINE_INT (switch_table_spread_threshold, 3, "allow the jump table used for switch statements to span a range " "of integers roughly equal to this number times the number of " "clauses in the switch") DEFINE_INT(switch_table_min_cases |
|
| v8::internal::if (change_flag &&IsReadOnly()) |
|
| v8::internal::switch (set_by_) |
|
| v8::internal::if (IsAnyImplication(new_set_by)) |
|
V8_EXPORT_PRIVATE base::Vector< Flag > | v8::internal::Flags () |
|
V8_EXPORT_PRIVATE Flag * | v8::internal::FindImplicationFlagByName (const char *name) |
|
V8_EXPORT_PRIVATE Flag * | v8::internal::FindFlagByName (const char *name) |
|
Flag * | v8::internal::FindFlagByPointer (const void *ptr) |
|
static const char * | v8::internal::Type2String (Flag::FlagType type) |
|
std::ostream & | v8::internal::operator<< (std::ostream &os, PrintFlagValue flag_value) |
|
std::ostream & | v8::internal::operator<< (std::ostream &os, const Flag &flag) |
|
uint32_t | v8::internal::ComputeFlagListHash () |
|
static void | v8::internal::SplitArgument (const char *arg, char *buffer, int buffer_size, const char **name, const char **value, bool *negated) |
|
template<typename T > |
bool | v8::internal::TryParseUnsigned (Flag *flag, const char *arg, const char *value, char **endp, T *out_val) |
|
static char * | v8::internal::SkipWhiteSpace (char *p) |
|
static char * | v8::internal::SkipBlackSpace (char *p) |
|