23constexpr uint8_t kMaxBitsPerChar[] = {
24 0, 0, 32, 51, 64, 75, 83, 90, 96,
25 102, 107, 111, 115, 119, 122, 126, 128,
26 131, 134, 136, 139, 141, 143, 145, 147,
27 149, 151, 153, 154, 156, 158, 159, 160,
31static const int kBitsPerCharTableShift = 5;
32static const size_t kBitsPerCharTableMultiplier = 1u << kBitsPerCharTableShift;
34static const char kConversionChars[] =
"0123456789abcdefghijklmnopqrstuvwxyz";
39 while (exponent > 0) {
51 return exponent == 1 ?
base :
base * digit_pow_rec(base, exponent - 1);
57char* BasecaseFixedLast(
digit_t chunk,
char* out) {
61 *(--out) =
'0' + (chunk % radix);
63 *(--out) = kConversionChars[chunk % radix];
78template <digit_t radix>
79char* DivideByMagic(RWDigits rest, Digits input,
char* output) {
80 constexpr uint8_t max_bits_per_char = kMaxBitsPerChar[radix];
81 constexpr int chunk_chars =
83 constexpr digit_t chunk_divisor = digit_pow_rec(radix, chunk_chars);
85 for (
int i = input.len() - 1;
i >= 0;
i--) {
88 digit_t u_result = upper / chunk_divisor;
91 digit_t l_result = lower / chunk_divisor;
96 for (
int i = 0;
i < chunk_chars;
i++) {
101 *(--output) = kConversionChars[
remainder % radix];
113#if V8_ADVANCED_BIGINT_ALGORITHMS
114#define MAYBE_INTERRUPT(code) ((void)0)
116#define MAYBE_INTERRUPT(code) code
119class ToStringFormatter {
121 ToStringFormatter(Digits
X,
int radix,
bool sign,
char* out,
122 uint32_t chars_available, ProcessorImpl* processor)
148 ScratchDigits rest(
digits_.len());
155 out_ = DivideByMagic<10>(rest, dividend,
out_);
168 }
while (rest.len() > 1);
172 void BasePowerOfTwo();
175 char* FillWithZeros(RecursionLevel* level,
char* prev_cursor,
char* out,
176 bool is_last_on_level);
177 char* ProcessLevel(RecursionLevel* level, Digits chunk,
char* out,
178 bool is_last_on_level);
183 char* BasecaseLast(
digit_t digit,
char* out) {
184 if (
radix_ == 10)
return BasecaseFixedLast<10>(digit, out);
187 *(--out) = kConversionChars[digit %
radix_];
195 char* BasecaseMiddle(
digit_t digit,
char* out) {
198 *(--out) = kConversionChars[digit %
radix_];
217#undef MAYBE_INTERRUPT
220void ToStringFormatter::Start() {
228int ToStringFormatter::Finish() {
242void ToStringFormatter::BasePowerOfTwo() {
244 const int char_mask =
radix_ - 1;
247 int available_bits = 0;
251 int current = (digit | (new_digit << available_bits)) & char_mask;
252 *(--
out_) = kConversionChars[current];
253 int consumed_bits = bits_per_char - available_bits;
254 digit = new_digit >> consumed_bits;
255 available_bits = kDigitBits - consumed_bits;
256 while (available_bits >= bits_per_char) {
257 *(--
out_) = kConversionChars[digit & char_mask];
258 digit >>= bits_per_char;
259 available_bits -= bits_per_char;
264 int current = (digit | (msd << available_bits)) & char_mask;
265 *(--
out_) = kConversionChars[current];
266 digit = msd >> (bits_per_char - available_bits);
268 *(--
out_) = kConversionChars[digit & char_mask];
269 digit >>= bits_per_char;
273#if V8_ADVANCED_BIGINT_ALGORITHMS
320class RecursionLevel {
322 static RecursionLevel* CreateLevels(digit_t base_divisor,
int base_char_count,
323 int target_bit_length,
324 ProcessorImpl* processor);
325 ~RecursionLevel() {
delete next_; }
327 void ComputeInverse(ProcessorImpl* proc,
int dividend_length = 0);
328 Digits GetInverse(
int dividend_length);
331 friend class ToStringFormatter;
332 RecursionLevel(digit_t base_divisor,
int base_char_count)
333 : char_count_(base_char_count), divisor_(1) {
334 divisor_[0] = base_divisor;
336 explicit RecursionLevel(RecursionLevel* next)
337 : char_count_(next->char_count_ * 2),
339 divisor_(next->divisor_.len() * 2) {
340 next->is_toplevel_ =
false;
343 void LeftShiftDivisor() {
345 LeftShift(divisor_, divisor_, leading_zero_shift_);
348 int leading_zero_shift_{0};
351 bool is_toplevel_{
true};
352 RecursionLevel* next_{
nullptr};
353 ScratchDigits divisor_;
354 std::unique_ptr<Storage> inverse_storage_;
359RecursionLevel* RecursionLevel::CreateLevels(
digit_t base_divisor,
361 int target_bit_length,
362 ProcessorImpl* processor) {
363 RecursionLevel* level =
new RecursionLevel(base_divisor, base_char_count);
374 while (
BitLength(level->divisor_) * 2 - 1 <= target_bit_length) {
375 RecursionLevel* prev = level;
376 level =
new RecursionLevel(prev);
377 processor->Multiply(level->divisor_, prev->divisor_, prev->divisor_);
378 if (processor->should_terminate()) {
382 level->divisor_.Normalize();
385 prev->LeftShiftDivisor();
386 prev->ComputeInverse(processor);
388 level->LeftShiftDivisor();
396void RecursionLevel::ComputeInverse(ProcessorImpl* processor,
397 int dividend_length) {
398 int inverse_len = divisor_.len();
399 if (dividend_length != 0) {
400 inverse_len = dividend_length - divisor_.len();
401 DCHECK(inverse_len <= divisor_.len());
404 ScratchDigits scratch(scratch_len);
405 Storage* inv_storage =
new Storage(inverse_len + 1);
406 inverse_storage_.reset(inv_storage);
407 RWDigits inverse_initializer(inv_storage->get(), inverse_len + 1);
408 Digits input(divisor_, divisor_.len() - inverse_len, inverse_len);
409 processor->Invert(inverse_initializer, input, scratch);
410 inverse_initializer.TrimOne();
411 inverse_ = inverse_initializer;
414Digits RecursionLevel::GetInverse(
int dividend_length) {
415 DCHECK(inverse_.len() != 0);
416 int inverse_len = dividend_length - divisor_.len();
421 CHECK(inverse_len <= inverse_.len());
422 return inverse_ + (inverse_.len() - inverse_len);
425void ToStringFormatter::Fast() {
430 int target_bit_length =
digits_.len() * kDigitBits;
431 std::unique_ptr<RecursionLevel> recursion_levels(RecursionLevel::CreateLevels(
440char* ToStringFormatter::FillWithZeros(RecursionLevel* level,
441 char* right_boundary,
char* out,
442 bool is_last_on_level) {
445 if (is_last_on_level)
return out;
446 int chunk_chars = level ==
nullptr ?
chunk_chars_ : level->char_count_ * 2;
447 char*
end = right_boundary - chunk_chars;
455char* ToStringFormatter::ProcessLevel(RecursionLevel* level, Digits chunk,
456 char* out,
bool is_last_on_level) {
458 Digits normalized = chunk;
459 normalized.Normalize();
460 if (normalized.len() <= 1) {
461 char* right_boundary = out;
462 if (normalized.len() == 1) {
463 out = BasecaseLast(normalized[0], out);
465 return FillWithZeros(level, right_boundary, out, is_last_on_level);
470 if (normalized.len() < level->divisor_.len()) {
471 char* right_boundary = out;
472 out = ProcessLevel(level->next_, chunk, out, is_last_on_level);
473 return FillWithZeros(level, right_boundary, out, is_last_on_level);
476 bool allow_inplace_modification = chunk.digits() !=
digits_.digits();
477 Digits original_chunk = chunk;
478 ShiftedDigits chunk_shifted(chunk, level->leading_zero_shift_,
479 allow_inplace_modification);
480 chunk = chunk_shifted;
483 int comparison =
Compare(chunk, level->divisor_);
484 if (comparison <= 0) {
485 char* right_boundary = out;
486 if (comparison < 0) {
492 chunk_shifted.Reset();
494 chunk = original_chunk;
495 out = ProcessLevel(level->next_, chunk, out, is_last_on_level);
502 out = FillWithZeros(level->next_, right_boundary, out,
false);
506 return FillWithZeros(level, right_boundary, out, is_last_on_level);
510 ScratchDigits right(level->divisor_.len() + 1);
512 ScratchDigits left(chunk.len() - level->divisor_.len() + 1);
515 int inverse_len = chunk.len() - level->divisor_.len();
516 if (inverse_len == 0) {
517 processor_->DivideSchoolbook(left, right, chunk, level->divisor_);
518 }
else if (level->divisor_.len() == 1) {
519 processor_->DivideSingle(left, right.digits(), chunk, level->divisor_[0]);
520 for (
int i = 1;
i < right.len();
i++) right[
i] = 0;
525 if (level->is_toplevel_) {
526 level->ComputeInverse(
processor_, chunk.len());
527 if (
processor_->should_terminate())
return out;
529 Digits inverse = level->GetInverse(chunk.len());
530 processor_->DivideBarrett(left, right, chunk, level->divisor_, inverse,
532 if (
processor_->should_terminate())
return out;
534 RightShift(right, right, level->leading_zero_shift_);
536 Digits left_test = left;
537 left_test.Normalize();
538 DCHECK(left_test.len() <= level->divisor_.len());
542 char* end_of_right_part = ProcessLevel(level->next_, right, out,
false);
543 if (
processor_->should_terminate())
return out;
546 DCHECK(end_of_right_part == out - level->char_count_);
547 USE(end_of_right_part);
550 return ProcessLevel(level->next_, left, out - level->char_count_,
559 int radix,
bool sign) {
561 ToStringImpl(out, out_length,
X, radix, sign, use_fast_algorithm);
566 int radix,
bool sign,
bool fast) {
570 ToStringFormatter formatter(
X, radix, sign, out, *out_length,
this);
572 formatter.BasePowerOfTwo();
573#if V8_ADVANCED_BIGINT_ALGORITHMS
577 if (should_terminate())
return;
585 int excess = formatter.Finish();
586 *out_length -= excess;
587 memset(out + *out_length, 0, excess);
593 impl->ToString(out, out_length,
X, radix, sign);
594 return impl->get_and_clear_status();
605 const uint8_t max_bits_per_char = kMaxBitsPerChar[radix];
608 const uint8_t min_bits_per_char = max_bits_per_char - 1;
610 uint64_t chars_required = bit_length;
611 chars_required *= kBitsPerCharTableMultiplier;
612 chars_required =
DIV_CEIL(chars_required, min_bits_per_char);
614 static_cast<uint64_t
>(std::numeric_limits<uint32_t>::max()));
615 result =
static_cast<uint32_t
>(chars_required);
void ToString(char *out, uint32_t *out_length, Digits X, int radix, bool sign)
void ToStringImpl(char *out, uint32_t *out_length, Digits X, int radix, bool sign, bool use_fast_algorithm)
Status ToString(char *out, uint32_t *out_length, Digits X, int radix, bool sign)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often force marking at random points between and X(inclusive) percent " "of the regular marking start limit") DEFINE_INT(stress_scavenge
ZoneVector< RpoNumber > & result
ProcessorImpl * processor_
constexpr unsigned CountLeadingZeros(T value)
constexpr unsigned CountTrailingZeros(T value)
uint32_t ToStringResultLength(Digits X, int radix, bool sign)
constexpr int BitLength(int n)
void LeftShift(RWDigits Z, Digits X, digit_t shift)
static constexpr int kHalfDigitBits
constexpr char kStringZapValue
static constexpr digit_t kHalfDigitMask
constexpr int CountTrailingZeros(uint32_t value)
constexpr bool IsPowerOfTwo(int value)
void RightShift(RWDigits Z, Digits X, digit_t shift, const RightShiftState &state)
constexpr int InvertScratchSpace(int n)
constexpr int DivideBarrettScratchSpace(int n)
constexpr int kToStringFastThreshold
int Compare(const T &a, const T &b)
#define DCHECK(condition)
#define MAYBE_INTERRUPT(code)