v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
swiss-name-dictionary.cc
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// Only including the -inl.h file directly makes the linter complain.
7
8#include "src/heap/heap-inl.h"
10
11namespace v8 {
12namespace internal {
13
14// static
15template <template <typename> typename HandleType>
16 requires(std::is_convertible_v<HandleType<SwissNameDictionary>,
17 DirectHandle<SwissNameDictionary>>)
18HandleType<SwissNameDictionary> SwissNameDictionary::DeleteEntry(
19 Isolate* isolate, HandleType<SwissNameDictionary> table,
20 InternalIndex entry) {
21 // GetCtrl() does the bounds check.
22 DCHECK(IsFull(table->GetCtrl(entry.as_int())));
23
24 int i = entry.as_int();
25
26 table->SetCtrl(i, Ctrl::kDeleted);
27 table->ClearDataTableEntry(isolate, i);
28 // We leave the PropertyDetails unchanged because they are not relevant for
29 // GC.
30
31 int nof = table->NumberOfElements();
32 table->SetNumberOfElements(nof - 1);
33 int nod = table->NumberOfDeletedElements();
34 table->SetNumberOfDeletedElements(nod + 1);
35
36 // TODO(v8:11388) Abseil's flat_hash_map doesn't shrink on deletion, but may
37 // decide on addition to do an in-place rehash to remove deleted elements. We
38 // shrink on deletion here to follow what NameDictionary and
39 // OrderedNameDictionary do. We should investigate which approach works
40 // better.
41 return Shrink(isolate, table);
42}
43
44// static
45template <typename IsolateT, template <typename> typename HandleType>
46 requires(std::is_convertible_v<HandleType<SwissNameDictionary>,
48HandleType<SwissNameDictionary> SwissNameDictionary::Rehash(
49 IsolateT* isolate, HandleType<SwissNameDictionary> table,
50 int new_capacity) {
51 DCHECK(IsValidCapacity(new_capacity));
52 DCHECK_LE(table->NumberOfElements(), MaxUsableCapacity(new_capacity));
53 ReadOnlyRoots roots(isolate);
54
55 HandleType<SwissNameDictionary> new_table =
56 isolate->factory()->NewSwissNameDictionaryWithCapacity(
57 new_capacity, HeapLayout::InYoungGeneration(*table)
60
62
63 int new_enum_index = 0;
64 new_table->SetNumberOfElements(table->NumberOfElements());
65 for (int enum_index = 0; enum_index < table->UsedCapacity(); ++enum_index) {
66 int entry = table->EntryForEnumerationIndex(enum_index);
67
69
70 if (table->ToKey(roots, entry, &key)) {
71 Tagged<Object> value = table->ValueAtRaw(entry);
72 PropertyDetails details = table->DetailsAt(entry);
73
74 int new_entry = new_table->AddInternal(Cast<Name>(key), value, details);
75
76 // TODO(v8::11388) Investigate ways of hoisting the branching needed to
77 // select the correct meta table entry size (based on the capacity of the
78 // table) out of the loop.
79 new_table->SetEntryForEnumerationIndex(new_enum_index, new_entry);
80 ++new_enum_index;
81 }
82 }
83
84 new_table->SetHash(table->Hash());
85 return new_table;
86}
87
89 if (Capacity() != other->Capacity() ||
90 NumberOfElements() != other->NumberOfElements() ||
91 NumberOfDeletedElements() != other->NumberOfDeletedElements() ||
92 Hash() != other->Hash()) {
93 return false;
94 }
95
96 for (int i = 0; i < Capacity() + kGroupWidth; i++) {
97 if (CtrlTable()[i] != other->CtrlTable()[i]) {
98 return false;
99 }
100 }
101 for (int i = 0; i < Capacity(); i++) {
102 if (KeyAt(i) != other->KeyAt(i) || ValueAtRaw(i) != other->ValueAtRaw(i)) {
103 return false;
104 }
105 if (IsFull(GetCtrl(i))) {
106 if (DetailsAt(i) != other->DetailsAt(i)) return false;
107 }
108 }
109 for (int i = 0; i < UsedCapacity(); i++) {
110 if (EntryForEnumerationIndex(i) != other->EntryForEnumerationIndex(i)) {
111 return false;
112 }
113 }
114 return true;
115}
116
117// static
120 // TODO(v8:11388) Consider doing some cleanup during copying: For example, we
121 // could turn kDeleted into kEmpty in certain situations. But this would
122 // require tidying up the enumeration table in a similar fashion as would be
123 // required when trying to reuse deleted entries.
124
125 if (table->Capacity() == 0) {
126 return table;
127 }
128
129 int capacity = table->Capacity();
130 int used_capacity = table->UsedCapacity();
131
133 isolate->factory()->NewSwissNameDictionaryWithCapacity(
134 capacity, HeapLayout::InYoungGeneration(*table)
137
138 new_table->SetHash(table->Hash());
139
141 WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
142
144 // Copy data table and ctrl table, which are stored next to each other.
145 void* original_start =
146 reinterpret_cast<void*>(table->field_address(DataTableStartOffset()));
147 void* new_table_start = reinterpret_cast<void*>(
148 new_table->field_address(DataTableStartOffset()));
149 size_t bytes_to_copy = DataTableSize(capacity) + CtrlTableSize(capacity);
150 DCHECK(DataTableEndOffset(capacity) == CtrlTableStartOffset(capacity));
151 MemCopy(new_table_start, original_start, bytes_to_copy);
152 } else {
154
155 // We may have to trigger write barriers when copying the data table.
156 for (int i = 0; i < capacity; ++i) {
157 Tagged<Object> key = table->KeyAt(i);
158 Tagged<Object> value = table->ValueAtRaw(i);
159
160 // Cannot use SetKey/ValueAtPut because they don't accept the hole as data
161 // to store.
162 new_table->StoreToDataTable(i, kDataTableKeyEntryIndex, key);
163 new_table->StoreToDataTable(i, kDataTableValueEntryIndex, value);
164 }
165
166 void* original_ctrl_table = table->CtrlTable();
167 void* new_ctrl_table = new_table->CtrlTable();
168 MemCopy(new_ctrl_table, original_ctrl_table, CtrlTableSize(capacity));
169 }
170
171 // PropertyDetails table may contain uninitialized data for unused slots.
172 for (int i = 0; i < capacity; ++i) {
173 if (IsFull(table->GetCtrl(i))) {
174 new_table->DetailsAtPut(i, table->DetailsAt(i));
175 }
176 }
177
178 // Meta table is only initialized for the first 2 + UsedCapacity() entries,
179 // where size of each entry depends on table capacity.
180 int size_per_meta_table_entry = MetaTableSizePerEntryFor(capacity);
181 int meta_table_used_bytes = (2 + used_capacity) * size_per_meta_table_entry;
182 MemCopy(new_table->meta_table()->begin(), table->meta_table()->begin(),
183 meta_table_used_bytes);
184
185 return new_table;
186}
187
188// static
189template <template <typename> typename HandleType>
190 requires(std::is_convertible_v<HandleType<SwissNameDictionary>,
192HandleType<SwissNameDictionary> SwissNameDictionary::Shrink(
193 Isolate* isolate, HandleType<SwissNameDictionary> table) {
194 // TODO(v8:11388) We're using the same logic to decide whether or not to
195 // shrink as OrderedNameDictionary and NameDictionary here. We should compare
196 // this with the logic used by Abseil's flat_hash_map, which has a heuristic
197 // for triggering an (in-place) rehash on addition, but never shrinks the
198 // table. Abseil's heuristic doesn't take the number of deleted elements into
199 // account, because it doesn't track that.
200
201 int nof = table->NumberOfElements();
202 int capacity = table->Capacity();
203 if (nof >= (capacity >> 2)) return table;
204 int new_capacity = std::max(capacity / 2, kInitialCapacity);
205 return Rehash(isolate, table, new_capacity);
206}
207
208// TODO(v8::11388) Copying all data into a std::vector and then re-adding into
209// the table doesn't seem like a good algorithm. Abseil's Swiss Tables come with
210// a clever algorithm for re-hashing in place: It first changes the control
211// table, effectively changing the roles of full, empty and deleted buckets. It
212// then moves each entry to its new bucket by swapping entries (see
213// drop_deletes_without_resize in Abseil's raw_hash_set.h). This algorithm could
214// generally adapted to work on our insertion order preserving implementation,
215// too. However, it would require a mapping from hash table buckets back to
216// enumeration indices. This could either be be created in this function
217// (requiring a vector with Capacity() entries and a separate pass over the
218// enumeration table) or by creating this backwards mapping ahead of time and
219// storing it somewhere in the main table or the meta table, for those
220// SwissNameDictionaries that we know will be in-place rehashed, most notably
221// those stored in the snapshot.
222template <typename IsolateT>
223void SwissNameDictionary::Rehash(IsolateT* isolate) {
225
226 struct Entry {
229 PropertyDetails details;
230 };
231
232 if (Capacity() == 0) return;
233
235 std::vector<Entry> data(NumberOfElements(), dummy);
236
237 ReadOnlyRoots roots(isolate);
238 int data_index = 0;
239 for (int enum_index = 0; enum_index < UsedCapacity(); ++enum_index) {
240 int entry = EntryForEnumerationIndex(enum_index);
242 if (!ToKey(roots, entry, &key)) continue;
243
244 data[data_index++] =
245 Entry{Cast<Name>(key), ValueAtRaw(entry), DetailsAt(entry)};
246 }
247
248 Initialize(isolate, meta_table(), Capacity());
249
250 int new_enum_index = 0;
251 SetNumberOfElements(static_cast<int>(data.size()));
252 for (Entry& e : data) {
253 int new_entry = AddInternal(e.key, e.value, e.details);
254
255 // TODO(v8::11388) Investigate ways of hoisting the branching needed to
256 // select the correct meta table entry size (based on the capacity of the
257 // table) out of the loop.
258 SetEntryForEnumerationIndex(new_enum_index, new_entry);
259 ++new_enum_index;
260 }
261}
262
263// TODO(emrich,v8:11388): This is almost an identical copy of
264// HashTable<..>::NumberOfEnumerableProperties. Consolidate both versions
265// elsewhere (e.g., hash-table-utils)?
268 int result = 0;
269 for (InternalIndex i : this->IterateEntries()) {
271 if (!this->ToKey(roots, i, &k)) continue;
272 if (Object::FilterKey(k, ENUMERABLE_STRINGS)) continue;
273 PropertyDetails details = this->DetailsAt(i);
274 PropertyAttributes attr = details.attributes();
275 if ((int{attr} & ONLY_ENUMERABLE) == 0) result++;
276 }
277 return result;
278}
279
280// TODO(emrich, v8:11388): This is almost an identical copy of
281// Dictionary<..>::SlowReverseLookup. Consolidate both versions elsewhere (e.g.,
282// hash-table-utils)?
284 Tagged<Object> value) {
285 ReadOnlyRoots roots(isolate);
286 for (InternalIndex i : IterateEntries()) {
288 if (!ToKey(roots, i, &k)) continue;
289 Tagged<Object> e = this->ValueAt(i);
290 if (e == value) return k;
291 }
292 return roots.undefined_value();
293}
294
295// The largest value we ever have to store in the enumeration table is
296// Capacity() - 1. The largest value we ever have to store for the present or
297// deleted element count is MaxUsableCapacity(Capacity()). All data in the
298// meta table is unsigned. Using this, we verify the values of the constants
299// |kMax1ByteMetaTableCapacity| and |kMax2ByteMetaTableCapacity|.
301 std::numeric_limits<uint8_t>::max());
304 std::numeric_limits<uint8_t>::max());
306 std::numeric_limits<uint16_t>::max());
309 std::numeric_limits<uint16_t>::max());
310
312 Isolate* isolate, Tagged<ByteArray> meta_table, int capacity);
314 LocalIsolate* isolate, Tagged<ByteArray> meta_table, int capacity);
315
319 InternalIndex entry);
323 InternalIndex entry);
324
328 int new_capacity);
332 int new_capacity);
336 int new_capacity);
340 int new_capacity);
341
343 LocalIsolate* isolate);
345
352
355
356} // namespace internal
357} // namespace v8
union v8::internal::@341::BuiltinMetadata::KindSpecificData data
static V8_INLINE bool InYoungGeneration(Tagged< Object > object)
constexpr int as_int() const
static bool FilterKey(Tagged< Object > obj, PropertyFilter filter)
PropertyAttributes attributes() const
static constexpr PropertyDetails Empty(PropertyCellType cell_type=PropertyCellType::kNoCell)
static constexpr int MaxUsableCapacity(int capacity)
Tagged< Object > KeyAt(InternalIndex entry)
static constexpr int DataTableSize(int capacity)
static constexpr int CtrlTableSize(int capacity)
void Initialize(IsolateT *isolate, Tagged< ByteArray > meta_table, int capacity)
static HandleType< SwissNameDictionary > Rehash(IsolateT *isolate, HandleType< SwissNameDictionary > table, int new_capacity)
int EntryForEnumerationIndex(int enumeration_index)
Tagged< Object > ValueAt(InternalIndex entry)
static constexpr Offset DataTableEndOffset(int capacity)
static constexpr int MetaTableSizePerEntryFor(int capacity)
static constexpr Offset CtrlTableStartOffset(int capacity)
void SetEntryForEnumerationIndex(int enumeration_index, int entry)
static HandleType< SwissNameDictionary > Shrink(Isolate *isolate, HandleType< SwissNameDictionary > table)
bool EqualsForTesting(Tagged< SwissNameDictionary > other)
Tagged< Object > SlowReverseLookup(Isolate *isolate, Tagged< Object > value)
bool ToKey(ReadOnlyRoots roots, InternalIndex entry, Tagged< Object > *out_key)
static DirectHandle< SwissNameDictionary > ShallowCopy(Isolate *isolate, DirectHandle< SwissNameDictionary > table)
PropertyDetails DetailsAt(InternalIndex entry)
int AddInternal(Tagged< Name > key, Tagged< Object > value, PropertyDetails details)
static HandleType< SwissNameDictionary > DeleteEntry(Isolate *isolate, HandleType< SwissNameDictionary > table, InternalIndex entry)
ZoneVector< RpoNumber > & result
@ SKIP_WRITE_BARRIER
Definition objects.h:52
@ UPDATE_WRITE_BARRIER
Definition objects.h:55
ReadOnlyRoots GetReadOnlyRoots()
Definition roots-inl.h:86
Tagged(T object) -> Tagged< T >
void MemCopy(void *dest, const void *src, size_t size)
Definition memcopy.h:124
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define V8_EXPORT_PRIVATE
Definition macros.h:460
std::unique_ptr< ValueMirror > value
std::unique_ptr< ValueMirror > key