v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
string-table.cc
Go to the documentation of this file.
1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <atomic>
8
10#include "src/base/macros.h"
12#include "src/common/globals.h"
16#include "src/heap/safepoint.h"
21#include "src/objects/slots.h"
26#include "src/utils/ostreams.h"
27
28namespace v8 {
29namespace internal {
30
32 : public OffHeapHashTableBase<OffHeapStringHashSet> {
33 public:
34 static constexpr int kEntrySize = 1;
35 static constexpr int kMaxEmptyFactor = 4;
36 static constexpr int kMinCapacity = 2048;
37
38 explicit OffHeapStringHashSet(int capacity)
40
42 return Cast<String>(key)->hash();
43 }
44
45 template <typename IsolateT, typename StringTableKey>
46 static bool KeyIsMatch(IsolateT* isolate, StringTableKey* key,
47 Tagged<Object> obj) {
48 auto string = Cast<String>(obj);
49 if (string->hash() != key->hash()) return false;
50 if (string->length() != key->length()) return false;
51 return key->IsMatch(isolate, string);
52 }
53
55 return slot(index).Acquire_Load(cage_base);
56 }
57
59 DCHECK(IsString(key));
60 slot(index).Release_Store(key);
61 }
62 void Set(InternalIndex index, Tagged<String> key) { SetKey(index, key); }
63
66 // Do nothing, since the entry size is 1 (just the key).
67 }
68
69 private:
70 friend class StringTable::Data;
71};
72
73// Data holds the actual data of the string table, including capacity and number
74// of elements.
75//
76// It is a variable sized structure, with a "header" followed directly in memory
77// by the elements themselves. These are accessed as offsets from the elements_
78// field, which itself provides storage for the first element.
79//
80// The elements themselves are stored as an open-addressed hash table, with
81// quadratic probing and Smi 0 and Smi 1 as the empty and deleted sentinels,
82// respectively.
84 public:
85 static std::unique_ptr<Data> New(int capacity);
86 static std::unique_ptr<Data> Resize(PtrComprCageBase cage_base,
87 std::unique_ptr<Data> data, int capacity);
88
89 void* operator new(size_t size, int capacity);
90 void* operator new(size_t size) = delete;
91 void operator delete(void* description);
92
94 const OffHeapStringHashSet& table() const { return table_; }
95
96 // Helper method for StringTable::TryStringToIndexOrLookupExisting.
97 template <typename Char>
99 Tagged<String> string,
100 Tagged<String> source,
101 size_t start);
102
104 table_.IterateElements(Root::kStringTable, visitor);
105 }
106
107 Data* PreviousData() { return previous_data_.get(); }
109
110 void Print(PtrComprCageBase cage_base) const;
111 size_t GetCurrentMemoryUsage() const;
112
113 private:
114 explicit Data(int capacity) : table_(capacity) {}
115
116 std::unique_ptr<Data> previous_data_;
118};
119
120void* StringTable::Data::operator new(size_t size, int capacity) {
121 // Make sure the size given is the size of the Data structure.
122 DCHECK_EQ(size, sizeof(StringTable::Data));
123 return OffHeapStringHashSet::Allocate<Data, offsetof(Data, table_.elements_)>(
124 capacity);
125}
126
127void StringTable::Data::operator delete(void* table) {
128 OffHeapStringHashSet::Free(table);
129}
130
132 size_t usage = sizeof(*this) + table_.GetSizeExcludingHeader();
133 if (previous_data_) {
134 usage += previous_data_->GetCurrentMemoryUsage();
135 }
136 return usage;
137}
138
139std::unique_ptr<StringTable::Data> StringTable::Data::New(int capacity) {
140 return std::unique_ptr<Data>(new (capacity) Data(capacity));
141}
142
143std::unique_ptr<StringTable::Data> StringTable::Data::Resize(
144 PtrComprCageBase cage_base, std::unique_ptr<Data> data, int capacity) {
145 std::unique_ptr<Data> new_data(new (capacity) Data(capacity));
146 data->table_.RehashInto(cage_base, &new_data->table_);
147 new_data->previous_data_ = std::move(data);
148 return new_data;
149}
150
152 OFStream os(stdout);
153 os << "StringTable {" << std::endl;
154 for (InternalIndex i : InternalIndex::Range(table_.capacity())) {
155 os << " " << i.as_uint32() << ": " << Brief(table_.GetKey(cage_base, i))
156 << std::endl;
157 }
158 os << "}" << std::endl;
159}
160
167
169
171 return data_.load(std::memory_order_acquire)->table().capacity();
172}
174 {
175 base::MutexGuard table_write_guard(&write_mutex_);
176 return data_.load(std::memory_order_relaxed)->table().number_of_elements();
177 }
178}
179
180// InternalizedStringKey carries a string/internalized-string object as key.
182 public:
183 explicit InternalizedStringKey(DirectHandle<String> string, uint32_t hash)
184 : StringTableKey(hash, string->length()), string_(string) {
185 // When sharing the string table, it's possible that another thread already
186 // internalized the key, in which case StringTable::LookupKey will perform a
187 // redundant lookup and return the already internalized copy.
188 DCHECK_IMPLIES(!v8_flags.shared_string_table,
189 !IsInternalizedString(*string));
190 DCHECK(string->IsFlat());
192 }
193
194 bool IsMatch(Isolate* isolate, Tagged<String> string) {
196 return string_->SlowEquals(string);
197 }
198
200 StringTransitionStrategy strategy =
201 isolate->factory()->ComputeInternalizationStrategyForString(
203 switch (strategy) {
205 break;
207 // In-place transition will be done in GetHandleForInsertion, when we
208 // are sure that we are going to insert the string into the table.
209 return;
211 // We can see already internalized strings here only when sharing the
212 // string table and allowing concurrent internalization.
213 DCHECK(v8_flags.shared_string_table);
215 return;
216 }
217
218 // Copying the string here is always threadsafe, as no instance type
219 // requiring a copy can transition any further.
220 StringShape shape(*string_);
221 // External strings get special treatment, to avoid copying their
222 // contents as long as they are not uncached or the string table is shared.
223 // If the string table is shared, another thread could lookup a string with
224 // the same content before this thread completes MakeThin (which sets the
225 // resource), resulting in a string table hit returning the string we just
226 // created that is not correctly initialized.
227 const bool can_avoid_copy =
228 !v8_flags.shared_string_table && !shape.IsUncachedExternal();
229 if (can_avoid_copy && shape.IsExternalOneByte()) {
230 // Shared external strings are always in-place internalizable.
231 // If this assumption is invalidated in the future, make sure that we
232 // fully initialize (copy contents) for shared external strings, as the
233 // original string is not transitioned to a ThinString (setting the
234 // resource) immediately.
235 DCHECK(!shape.IsShared());
237 isolate->factory()->InternalizeExternalString<ExternalOneByteString>(
238 string_);
239 } else if (can_avoid_copy && shape.IsExternalTwoByte()) {
240 // Shared external strings are always in-place internalizable.
241 // If this assumption is invalidated in the future, make sure that we
242 // fully initialize (copy contents) for shared external strings, as the
243 // original string is not transitioned to a ThinString (setting the
244 // resource) immediately.
245 DCHECK(!shape.IsShared());
247 isolate->factory()->InternalizeExternalString<ExternalTwoByteString>(
248 string_);
249 } else {
250 // Otherwise allocate a new internalized string.
251 internalized_string_ = isolate->factory()->NewInternalizedStringImpl(
253 }
254 }
255
257 DirectHandle<Map> internalized_map;
258 // When preparing the string, the strategy was to in-place migrate it.
259 if (maybe_internalized_map_.ToHandle(&internalized_map)) {
260 // It is always safe to overwrite the map. The only transition possible
261 // is another thread migrated the string to internalized already.
262 // Migrations to thin are impossible, as we only call this method on table
263 // misses inside the critical section.
264 string_->set_map_safe_transition_no_write_barrier(isolate,
265 *internalized_map);
266 DCHECK(IsInternalizedString(*string_));
267 return string_;
268 }
269 // We prepared an internalized copy for the string or the string was already
270 // internalized.
271 // In theory we could have created a copy of a SeqString in young generation
272 // that has been promoted to old space by now. In that case we could
273 // in-place migrate the original string instead of internalizing the copy
274 // and migrating the original string to a ThinString. This scenario doesn't
275 // seem to be common enough to justify re-computing the strategy here.
276 return internalized_string_.ToHandleChecked();
277 }
278
279 private:
281 // Copy of the string to be internalized (only set if the string is not
282 // in-place internalizable). We can't override the original string, as
283 // internalized external strings don't set the resource directly (deferred to
284 // MakeThin to ensure unique ownership of the resource), and thus would break
285 // equality checks in case of hash collisions.
288};
289
290namespace {
291
292void SetInternalizedReference(Isolate* isolate, Tagged<String> string,
293 Tagged<String> internalized) {
294 DCHECK(!IsThinString(string));
295 DCHECK(!IsInternalizedString(string));
296 DCHECK(IsInternalizedString(internalized));
297 DCHECK(!internalized->HasInternalizedForwardingIndex(kAcquireLoad));
298 if (string->IsShared() || v8_flags.always_use_string_forwarding_table) {
299 uint32_t field = string->raw_hash_field(kAcquireLoad);
300 // Don't use the forwarding table for strings that have an integer index.
301 // Using the hash field for the integer index is more beneficial than
302 // using it to store the forwarding index to the internalized string.
303 if (Name::IsIntegerIndex(field)) return;
304 // Check one last time if we already have an internalized forwarding index
305 // to prevent too many copies of the string in the forwarding table.
306 if (Name::IsInternalizedForwardingIndex(field)) return;
307
308 // If we already have an entry for an external resource in the table, update
309 // the entry instead of creating a new one. There is no guarantee that we
310 // will always update existing records instead of creating new ones, but
311 // races should be rare.
312 if (Name::IsForwardingIndex(field)) {
313 const int forwarding_index =
315 isolate->string_forwarding_table()->UpdateForwardString(forwarding_index,
316 internalized);
317 // Update the forwarding index type to include internalized.
319 string->set_raw_hash_field(field, kReleaseStore);
320 } else {
321 const int forwarding_index =
322 isolate->string_forwarding_table()->AddForwardString(string,
323 internalized);
324 string->set_raw_hash_field(
327 }
328 } else {
329 DCHECK(!string->HasForwardingIndex(kAcquireLoad));
330 string->MakeThin(isolate, internalized);
331 }
332}
333
334} // namespace
335
337 DirectHandle<String> string) {
338 // When sharing the string table, internalization is allowed to be concurrent
339 // from multiple Isolates, assuming that:
340 //
341 // - All in-place internalizable strings (i.e. old-generation flat strings)
342 // and internalized strings are in the shared heap.
343 // - LookupKey supports concurrent access (see comment below).
344 //
345 // These assumptions guarantee the following properties:
346 //
347 // - String::Flatten is not threadsafe but is only called on non-shared
348 // strings, since non-flat strings are not shared.
349 //
350 // - String::ComputeAndSetRawHash is threadsafe on flat strings. This is safe
351 // because the characters are immutable and the same hash will be
352 // computed. The hash field is set with relaxed memory order. A thread that
353 // doesn't see the hash may do redundant work but will not be incorrect.
354 //
355 // - In-place internalizable strings do not incur a copy regardless of string
356 // table sharing. The map mutation is threadsafe even with relaxed memory
357 // order, because for concurrent table lookups, the "losing" thread will be
358 // correctly ordered by LookupKey's write mutex and see the updated map
359 // during the re-lookup.
360 //
361 // For lookup misses, the internalized string map is the same map in RO space
362 // regardless of which thread is doing the lookup.
363 //
364 // For lookup hits, we use the StringForwardingTable for shared strings to
365 // delay the transition into a ThinString to the next stop-the-world GC.
367 String::Flatten(isolate, indirect_handle(string, isolate));
368 if (!IsInternalizedString(*result)) {
369 uint32_t raw_hash_field = result->raw_hash_field(kAcquireLoad);
370
371 if (String::IsInternalizedForwardingIndex(raw_hash_field)) {
372 const int index =
375 isolate->string_forwarding_table()->GetForwardString(isolate, index),
376 isolate);
377 } else {
378 if (!Name::IsHashFieldComputed(raw_hash_field)) {
379 raw_hash_field = result->EnsureRawHash();
380 }
381 InternalizedStringKey key(result, raw_hash_field);
382 result = LookupKey(isolate, &key);
383 }
384 }
385 if (*string != *result && !IsThinString(*string)) {
386 SetInternalizedReference(isolate, *string, *result);
387 }
388 return result;
389}
390
391template <typename StringTableKey, typename IsolateT>
394 // String table lookups are allowed to be concurrent, assuming that:
395 //
396 // - The Heap access is allowed to be concurrent (using LocalHeap or
397 // similar),
398 // - All writes to the string table are guarded by the Isolate string table
399 // mutex,
400 // - Resizes of the string table first copies the old contents to the new
401 // table, and only then sets the new string table pointer to the new
402 // table,
403 // - Only GCs can remove elements from the string table.
404 //
405 // These assumptions allow us to make the following statement:
406 //
407 // "Reads are allowed when not holding the lock, as long as false negatives
408 // (misses) are ok. We will never get a false positive (hit of an entry no
409 // longer in the table)"
410 //
411 // This is because we _know_ that if we find an entry in the string table, any
412 // entry will also be in all reallocations of that tables. This is required
413 // for strong consistency of internalized string equality implying reference
414 // equality.
415 //
416 // We therefore try to optimistically read from the string table without
417 // taking the lock (both here and in the NoAllocate version of the lookup),
418 // and on a miss we take the lock and try to write the entry, with a second
419 // read lookup in case the non-locked read missed a write.
420 //
421 // One complication is allocation -- we don't want to allocate while holding
422 // the string table lock. This applies to both allocation of new strings, and
423 // re-allocation of the string table on resize. So, we optimistically allocate
424 // (without copying values) outside the lock, and potentially discard the
425 // allocation if another write also did an allocation. This assumes that
426 // writes are rarer than reads.
427
428 // Load the current string table data, in case another thread updates the
429 // data while we're reading.
430 Data* const current_data = data_.load(std::memory_order_acquire);
431 OffHeapStringHashSet& current_table = current_data->table();
432
433 // First try to find the string in the table. This is safe to do even if the
434 // table is now reallocated; we won't find a stale entry in the old table
435 // because the new table won't delete it's corresponding entry until the
436 // string is dead, in which case it will die in this table too and worst
437 // case we'll have a false miss.
438 InternalIndex entry = current_table.FindEntry(isolate, key, key->hash());
439 if (entry.is_found()) {
441 Cast<String>(current_table.GetKey(isolate, entry)), isolate);
442 DCHECK_IMPLIES(v8_flags.shared_string_table,
444 return result;
445 }
446
447 // No entry found, so adding new string.
448 key->PrepareForInsertion(isolate);
449 {
450 base::MutexGuard table_write_guard(&write_mutex_);
451
452 Data* data = EnsureCapacity(isolate, 1);
453 OffHeapStringHashSet& table = data->table();
454
455 // Check one last time if the key is present in the table, in case it was
456 // added after the check.
457 entry = table.FindEntryOrInsertionEntry(isolate, key, key->hash());
458
459 Tagged<Object> element = table.GetKey(isolate, entry);
460 if (element == OffHeapStringHashSet::empty_element()) {
461 // This entry is empty, so write it and register that we added an
462 // element.
463 DirectHandle<String> new_string = key->GetHandleForInsertion(isolate_);
464 DCHECK_IMPLIES(v8_flags.shared_string_table, new_string->IsShared());
465 table.AddAt(isolate, entry, *new_string);
466 return new_string;
467 } else if (element == OffHeapStringHashSet::deleted_element()) {
468 // This entry was deleted, so overwrite it and register that we
469 // overwrote a deleted element.
470 DirectHandle<String> new_string = key->GetHandleForInsertion(isolate_);
471 DCHECK_IMPLIES(v8_flags.shared_string_table, new_string->IsShared());
472 table.OverwriteDeletedAt(isolate, entry, *new_string);
473 return new_string;
474 } else {
475 // Return the existing string as a handle.
476 return direct_handle(Cast<String>(element), isolate);
477 }
478 }
479}
480
489
494
499
501 int additional_elements) {
502 // This call is only allowed while the write mutex is held.
504
505 // This load can be relaxed as the table pointer can only be modified while
506 // the lock is held.
507 Data* data = data_.load(std::memory_order_relaxed);
508
509 int new_capacity;
510 if (data->table().ShouldResizeToAdd(additional_elements, &new_capacity)) {
511 std::unique_ptr<Data> new_data =
512 Data::Resize(cage_base, std::unique_ptr<Data>(data), new_capacity);
513 // `new_data` is the new owner of `data`.
514 DCHECK_EQ(new_data->PreviousData(), data);
515 // Release-store the new data pointer as `data_`, so that it can be
516 // acquire-loaded by other threads. This string table becomes the owner of
517 // the pointer.
518 data = new_data.release();
519 data_.store(data, std::memory_order_release);
520 }
521
522 return data;
523}
524
525namespace {
526template <typename Char>
527class CharBuffer {
528 public:
529 void Reset(size_t length) {
530 if (length >= kInlinedBufferSize)
531 outofline_ = std::make_unique<Char[]>(length);
532 }
533
534 Char* Data() {
535 if (outofline_)
536 return outofline_.get();
537 else
538 return inlined_;
539 }
540
541 private:
542 static constexpr size_t kInlinedBufferSize = 256;
544 std::unique_ptr<Char[]> outofline_;
545};
546} // namespace
547
548// static
549template <typename Char>
551 Isolate* isolate, Tagged<String> string, Tagged<String> source,
552 size_t start) {
553 // TODO(leszeks): This method doesn't really belong on StringTable::Data.
554 // Ideally it would be a free function in an anonymous namespace, but that
555 // causes issues around method and class visibility.
556
558
559 uint32_t length = string->length();
560 // The source hash is usable if it is not from a sliced string.
561 // For sliced strings we need to recalculate the hash from the given offset
562 // with the correct length.
563 const bool is_source_hash_usable = start == 0 && length == source->length();
564
565 // First check if the string constains a forwarding index.
566 uint32_t raw_hash_field = source->raw_hash_field(kAcquireLoad);
567 if (Name::IsInternalizedForwardingIndex(raw_hash_field) &&
568 is_source_hash_usable) {
569 const int index = Name::ForwardingIndexValueBits::decode(raw_hash_field);
570 Tagged<String> internalized =
571 isolate->string_forwarding_table()->GetForwardString(isolate, index);
572 return internalized.ptr();
573 }
574
575 uint64_t seed = HashSeed(isolate);
576
577 CharBuffer<Char> buffer;
578 const Char* chars;
579
580 SharedStringAccessGuardIfNeeded access_guard(isolate);
581 if (IsConsString(source, isolate)) {
582 DCHECK(!source->IsFlat());
583 buffer.Reset(length);
584 String::WriteToFlat(source, buffer.Data(), 0, length, access_guard);
585 chars = buffer.Data();
586 } else {
587 chars = source->GetDirectStringChars<Char>(no_gc, access_guard) + start;
588 }
589
590 if (!Name::IsHashFieldComputed(raw_hash_field) || !is_source_hash_usable) {
591 raw_hash_field =
592 StringHasher::HashSequentialString<Char>(chars, length, seed);
593 }
594 // TODO(verwaest): Internalize to one-byte when possible.
595 SequentialStringKey<Char> key(raw_hash_field,
596 base::Vector<const Char>(chars, length), seed);
597
598 // String could be an array index.
599 if (Name::ContainsCachedArrayIndex(raw_hash_field)) {
601 .ptr();
602 }
603
604 if (Name::IsIntegerIndex(raw_hash_field)) {
605 // It is an index, but it's not cached.
607 }
608
609 Data* string_table_data =
610 isolate->string_table()->data_.load(std::memory_order_acquire);
611
612 InternalIndex entry =
613 string_table_data->table().FindEntry(isolate, &key, key.hash());
614 if (entry.is_not_found()) {
615 // A string that's not an array index, and not in the string table,
616 // cannot have been used as a property name before.
618 }
619
620 Tagged<String> internalized =
621 Cast<String>(string_table_data->table().GetKey(isolate, entry));
622 // string can be internalized here, if another thread internalized it.
623 // If we found and entry in the string table and string is not internalized,
624 // there is no way that it can transition to internalized later on. So a last
625 // check here is sufficient.
626 if (!IsInternalizedString(string)) {
627 SetInternalizedReference(isolate, string, internalized);
628 } else {
629 DCHECK(v8_flags.shared_string_table);
630 }
631 return internalized.ptr();
632}
633
634// static
636 Address raw_string) {
637 Tagged<String> string = Cast<String>(Tagged<Object>(raw_string));
638 if (IsInternalizedString(string)) {
639 // string could be internalized, if the string table is shared and another
640 // thread internalized it.
641 DCHECK(v8_flags.shared_string_table);
642 return raw_string;
643 }
644
645 // Valid array indices are >= 0, so they cannot be mixed up with any of
646 // the result sentinels, which are negative.
647 static_assert(
649 static_assert(
651
652 size_t start = 0;
653 Tagged<String> source = string;
654 if (IsSlicedString(source)) {
656 start = sliced->offset();
657 source = sliced->parent();
658 } else if (IsConsString(source) && source->IsFlat()) {
659 source = Cast<ConsString>(source)->first();
660 }
661 if (IsThinString(source)) {
662 source = Cast<ThinString>(source)->actual();
663 if (string->length() == source->length()) {
664 return source.ptr();
665 }
666 }
667
668 if (source->IsOneByteRepresentation()) {
670 isolate, string, source, start);
671 }
673 isolate, string, source, start);
674}
675
677 Isolate* isolate, const base::Vector<DirectHandle<String>>& strings) {
679
680 const int length = static_cast<int>(strings.size());
681 {
682 base::MutexGuard table_write_guard(&write_mutex_);
683
684 Data* const data = EnsureCapacity(isolate, length);
685
686 for (const DirectHandle<String>& s : strings) {
689 InternalIndex entry =
690 data->table().FindEntryOrInsertionEntry(isolate, &key, key.hash());
691
692 DirectHandle<String> inserted_string = key.GetHandleForInsertion(isolate);
693 DCHECK_IMPLIES(v8_flags.shared_string_table, inserted_string->IsShared());
694 data->table().AddAt(isolate, entry, *inserted_string);
695 }
696 }
697
698 DCHECK_EQ(NumberOfElements(), length);
699}
700
703 {
704 base::MutexGuard table_write_guard(&write_mutex_);
705
706 Data* const data = EnsureCapacity(isolate, 1);
707
708 DirectHandle<String> empty_string = isolate->factory()->empty_string();
709 uint32_t hash = empty_string->EnsureHash();
710
711 InternalIndex entry = data->table().FindInsertionEntry(isolate, hash);
712
713 DCHECK_IMPLIES(v8_flags.shared_string_table, empty_string->IsShared());
714 data->table().AddAt(isolate, entry, *empty_string);
715 }
717}
718
720 data_.load(std::memory_order_acquire)->Print(cage_base);
721}
722
724 return sizeof(*this) +
725 data_.load(std::memory_order_acquire)->GetCurrentMemoryUsage();
726}
727
729 // This should only happen during garbage collection when background threads
730 // are paused, so the load can be relaxed.
732 data_.load(std::memory_order_relaxed)->IterateElements(visitor);
733}
734
736 // This should only happen during garbage collection when background threads
737 // are paused, so the load can be relaxed.
739 DCHECK_NE(isolate_->heap()->gc_state(), Heap::NOT_IN_GC);
740 data_.load(std::memory_order_relaxed)->DropPreviousData();
741}
742
744 // This should only happen during garbage collection when background threads
745 // are paused, so the load can be relaxed.
747 DCHECK_NE(isolate_->heap()->gc_state(), Heap::NOT_IN_GC);
748 data_.load(std::memory_order_relaxed)->table().ElementsRemoved(count);
749}
750
751} // namespace internal
752} // namespace v8
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
static constexpr T decode(U value)
Definition bit-field.h:66
static constexpr bool is_valid(T value)
Definition bit-field.h:50
static V8_NODISCARD constexpr U update(U previous, T value)
Definition bit-field.h:61
V8_INLINE void AssertHeld() const
Definition mutex.h:58
Tagged< Object > Acquire_Load() const
Definition slots-inl.h:74
void Release_Store(Tagged< Object > value) const
Definition slots-inl.h:104
static V8_INLINE bool InAnySharedSpace(Tagged< HeapObject > object)
HeapState gc_state() const
Definition heap.h:521
IsolateSafepoint * safepoint()
Definition heap.h:579
void PrepareForInsertion(Isolate *isolate)
MaybeDirectHandle< String > internalized_string_
InternalizedStringKey(DirectHandle< String > string, uint32_t hash)
DirectHandle< String > GetHandleForInsertion(Isolate *isolate)
bool IsMatch(Isolate *isolate, Tagged< String > string)
MaybeDirectHandle< Map > maybe_internalized_map_
static bool IsIntegerIndex(uint32_t raw_hash_field)
Definition name-inl.h:106
static bool IsForwardingIndex(uint32_t raw_hash_field)
Definition name-inl.h:112
static bool ContainsCachedArrayIndex(uint32_t hash)
Definition name-inl.h:290
static uint32_t CreateInternalizedForwardingIndex(uint32_t index)
Definition name-inl.h:138
static bool IsInternalizedForwardingIndex(uint32_t raw_hash_field)
Definition name-inl.h:118
static bool IsHashFieldComputed(uint32_t raw_hash_field)
Definition name-inl.h:96
InternalIndex FindEntry(IsolateT *isolate, FindKey key, uint32_t hash) const
void IterateElements(Root root, RootVisitor *visitor)
OffHeapObjectSlot slot(InternalIndex index, int offset=0) const
static bool IsNeeded(Tagged< String > str, LocalIsolate *local_isolate)
Definition string-inl.h:76
static constexpr Tagged< Smi > FromInt(int value)
Definition smi.h:38
static uint32_t HashSequentialString(const char_t *chars, uint32_t length, uint64_t seed)
V8_INLINE bool IsShared() const
Definition string-inl.h:200
V8_INLINE bool IsExternalOneByte() const
Definition string-inl.h:238
V8_INLINE bool IsExternalTwoByte() const
Definition string-inl.h:247
V8_INLINE bool IsUncachedExternal() const
Definition string-inl.h:196
uint32_t raw_hash_field() const
static std::unique_ptr< Data > Resize(PtrComprCageBase cage_base, std::unique_ptr< Data > data, int capacity)
OffHeapStringHashSet & table()
static std::unique_ptr< Data > New(int capacity)
static Address TryStringToIndexOrLookupExisting(Isolate *isolate, Tagged< String > string, Tagged< String > source, size_t start)
const OffHeapStringHashSet & table() const
void IterateElements(RootVisitor *visitor)
void Print(PtrComprCageBase cage_base) const
std::unique_ptr< Data > previous_data_
void CopyEntryExcludingKeyInto(PtrComprCageBase, InternalIndex, OffHeapStringHashSet *, InternalIndex)
void SetKey(InternalIndex index, Tagged< Object > key)
void Set(InternalIndex index, Tagged< String > key)
static bool KeyIsMatch(IsolateT *isolate, StringTableKey *key, Tagged< Object > obj)
static uint32_t Hash(PtrComprCageBase, Tagged< Object > key)
Tagged< Object > GetKey(PtrComprCageBase cage_base, InternalIndex index) const
static Address TryStringToIndexOrLookupExisting(Isolate *isolate, Address raw_string)
void InsertForIsolateDeserialization(Isolate *isolate, const base::Vector< DirectHandle< String > > &strings)
void IterateElements(RootVisitor *visitor)
DirectHandle< String > LookupString(Isolate *isolate, DirectHandle< String > key)
void InsertEmptyStringForBootstrapping(Isolate *isolate)
Data * EnsureCapacity(PtrComprCageBase cage_base, int additional_elements)
size_t GetCurrentMemoryUsage() const
StringTable(Isolate *isolate)
static constexpr Tagged< Smi > empty_element()
void NotifyElementsRemoved(int count)
DirectHandle< String > LookupKey(IsolateT *isolate, StringTableKey *key)
static constexpr Tagged< Smi > deleted_element()
void Print(PtrComprCageBase cage_base) const
std::atomic< Data * > data_
static void WriteToFlat(Tagged< String > source, SinkCharT *sink, uint32_t start, uint32_t length)
Definition string.cc:772
static V8_INLINE HandleType< String > Flatten(Isolate *isolate, HandleType< T > string, AllocationType allocation=AllocationType::kYoung)
V8_INLINE constexpr StorageType ptr() const
int start
ZoneVector< RpoNumber > & result
V8_INLINE IndirectHandle< T > indirect_handle(DirectHandle< T > handle)
Definition handles.h:757
V8_INLINE DirectHandle< T > direct_handle(Tagged< T > object, Isolate *isolate)
StringTransitionStrategy
Definition globals.h:2864
V8_EXPORT_PRIVATE FlagValues v8_flags
uint64_t HashSeed(Isolate *isolate)
template const char * string
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
static constexpr ReleaseStoreTag kReleaseStore
Definition globals.h:2910
static constexpr AcquireLoadTag kAcquireLoad
Definition globals.h:2908
SourcePositionTable *const table_
Definition pipeline.cc:227
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
std::unique_ptr< Char[]> outofline_
static constexpr size_t kInlinedBufferSize
Char inlined_[kInlinedBufferSize]