29#if V8_ADVANCED_BIGINT_ALGORITHMS
30#define MAYBE_TERMINATE
32#define MAYBE_TERMINATE \
33 if (should_terminate()) return;
41int RoundUpLen(
int len) {
42 if (len <= 36)
return RoundUp(len, 2);
45 if ((len >> shift) >= 0x18) {
50 int additive = ((1 << shift) - 1);
51 if (shift >= 2 && (len & additive) < (1 << (shift - 2))) {
54 return ((len + additive) >> shift) << shift;
58int KaratsubaLength(
int n) {
69void KaratsubaSubtractionHelper(RWDigits
result, Digits
X, Digits Y,
79 for (;
i < Y.len();
i++) {
82 for (;
i <
X.len();
i++) {
95 int k = KaratsubaLength(Y.
len());
96 int scratch_len = 4 * k;
107 for (
int i = 2 * k;
i < Z.
len();
i++) Z[
i] = 0;
108 if (k < Y.
len() ||
X.len() != Y.
len()) {
121 for (
int i = k;
i <
X.len();
i += k) {
141 if (
X.len() == 0 || Y.
len() == 0)
return Z.
Clear();
142 if (
X.len() < Y.
len()) std::swap(
X, Y);
145 int k = KaratsubaLength(Y.
len());
156 if (
X.len() >= Y.
len()) {
169 RWDigits scratch_for_recursion(scratch, 2 * n, 2 * n);
173 for (
int i = 0;
i <
n;
i++) Z[
i] = P0[
i];
178 int end = std::min(Z2.len(), P2.
len());
179 for (
int i = 0;
i <
end;
i++) Z2[
i] = P2[
i];
180 for (
int i =
end;
i <
n;
i++) {
190 KaratsubaSubtractionHelper(X_diff, X1, X0, &sign);
191 KaratsubaSubtractionHelper(Y_diff, Y0, Y1, &sign);
193 KaratsubaMain(P1, X_diff, Y_diff, scratch_for_recursion, n2);
204#undef MAYBE_TERMINATE
void KaratsubaMain(RWDigits Z, Digits X, Digits Y, RWDigits scratch, int n)
void MultiplySingle(RWDigits Z, Digits X, digit_t y)
void MultiplySchoolbook(RWDigits Z, Digits X, Digits Y)
void MultiplyKaratsuba(RWDigits Z, Digits X, Digits Y)
void KaratsubaStart(RWDigits Z, Digits X, Digits Y, RWDigits scratch, int k)
void KaratsubaChunk(RWDigits Z, Digits X, Digits Y, RWDigits scratch)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and X(inclusive) percent " "of the regular marking start limit") DEFINE_INT(stress_scavenge
ZoneVector< RpoNumber > & result
constexpr int BitLength(int n)
bool GreaterThanOrEqual(Digits A, Digits B)
digit_t AddAndReturnOverflow(RWDigits Z, Digits X)
digit_t SubAndReturnBorrow(RWDigits Z, Digits X)
digit_t digit_sub2(digit_t a, digit_t b, digit_t borrow_in, digit_t *borrow_out)
digit_t digit_sub(digit_t a, digit_t b, digit_t *borrow)
constexpr int kKaratsubaThreshold
constexpr int RoundUp(int x, int y)
#define DCHECK(condition)