v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
swiss-name-dictionary-inl.h
Go to the documentation of this file.
1// Copyright 2021 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_OBJECTS_SWISS_NAME_DICTIONARY_INL_H_
6#define V8_OBJECTS_SWISS_NAME_DICTIONARY_INL_H_
7
9// Include the non-inl header before the rest of the headers.
10
11#include <algorithm>
12#include <optional>
13
14#include "src/base/macros.h"
16#include "src/heap/heap.h"
23#include "src/objects/smi.h"
24
25// Has to be the last include (doesn't have include guards):
27
28namespace v8::internal {
29
30#include "torque-generated/src/objects/swiss-name-dictionary-tq-inl.inc"
31
32OBJECT_CONSTRUCTORS_IMPL(SwissNameDictionary, HeapObject)
33
34swiss_table::ctrl_t* SwissNameDictionary::CtrlTable() {
35 return reinterpret_cast<ctrl_t*>(
36 field_address(CtrlTableStartOffset(Capacity())));
37}
38
40 return reinterpret_cast<uint8_t*>(
42}
43
47
49 DCHECK(IsValidCapacity(capacity));
50
52}
53
57
61
65
69
73
74// static
75constexpr bool SwissNameDictionary::IsValidCapacity(int capacity) {
76 return capacity == 0 || (capacity >= kInitialCapacity &&
77 // Must be power of 2.
78 ((capacity & (capacity - 1)) == 0));
79}
80
81// static
82constexpr int SwissNameDictionary::DataTableSize(int capacity) {
83 return capacity * kTaggedSize * kDataTableEntryCount;
84}
85
86// static
87constexpr int SwissNameDictionary::CtrlTableSize(int capacity) {
88 // Doing + |kGroupWidth| due to the copy of first group at the end of control
89 // table.
90 return (capacity + kGroupWidth) * kOneByteSize;
91}
92
93// static
94constexpr int SwissNameDictionary::SizeFor(int capacity) {
95 DCHECK(IsValidCapacity(capacity));
96 return PropertyDetailsTableStartOffset(capacity) + capacity;
97}
98
99// We use 7/8th as maximum load factor for non-special cases.
100// For 16-wide groups, that gives an average of two empty slots per group.
101// Similar to Abseil's CapacityToGrowth.
102// static
103constexpr int SwissNameDictionary::MaxUsableCapacity(int capacity) {
104 DCHECK(IsValidCapacity(capacity));
105
106 if (Group::kWidth == 8 && capacity == 4) {
107 // If the group size is 16 we can fully utilize capacity 4: There will be
108 // enough kEmpty entries in the ctrl table.
109 return 3;
110 }
111 return capacity - capacity / 8;
112}
113
114// Returns |at_least_space_for| * 8/7 for non-special cases. Similar to Abseil's
115// GrowthToLowerboundCapacity.
116// static
117int SwissNameDictionary::CapacityFor(int at_least_space_for) {
118 if (at_least_space_for <= 4) {
119 if (at_least_space_for == 0) {
120 return 0;
121 } else if (at_least_space_for < 4) {
122 return 4;
123 } else if (kGroupWidth == 16) {
124 DCHECK_EQ(4, at_least_space_for);
125 return 4;
126 } else if (kGroupWidth == 8) {
127 DCHECK_EQ(4, at_least_space_for);
128 return 8;
129 }
130 }
131
132 int non_normalized = at_least_space_for + at_least_space_for / 7;
133 return base::bits::RoundUpToPowerOfTwo32(non_normalized);
134}
135
137 DCHECK_LT(enumeration_index, UsedCapacity());
139 enumeration_index);
140}
141
143 int entry) {
144 DCHECK_LT(enumeration_index, UsedCapacity());
145 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
146 DCHECK(IsFull(GetCtrl(entry)));
147
149 entry);
150}
151
152template <typename IsolateT>
156 DCHECK(IsUniqueName(name));
157 uint32_t hash = name->hash();
158
159 // We probe the hash table in groups of |kGroupWidth| buckets. One bucket
160 // corresponds to a 1-byte entry in the control table.
161 // Each group can be uniquely identified by the index of its first bucket,
162 // which must be a value between 0 (inclusive) and Capacity() (exclusive).
163 // Note that logically, groups wrap around after index Capacity() - 1. This
164 // means that probing the group starting at, for example, index Capacity() - 1
165 // means probing CtrlTable()[Capacity() - 1] followed by CtrlTable()[0] to
166 // CtrlTable()[6], assuming a group width of 8. However, in memory, this is
167 // achieved by maintaining an additional |kGroupWidth| bytes after the first
168 // Capacity() entries of the control table. These contain a copy of the first
169 // max(Capacity(), kGroupWidth) entries of the control table. If Capacity() <
170 // |kGroupWidth|, then the remaining |kGroupWidth| - Capacity() control bytes
171 // are left as |kEmpty|.
172 // This means that actually, probing the group starting
173 // at index Capacity() - 1 is achieved by probing CtrlTable()[Capacity() - 1],
174 // followed by CtrlTable()[Capacity()] to CtrlTable()[Capacity() + 7].
175
176 ctrl_t* ctrl = CtrlTable();
177 auto seq = probe(hash, Capacity());
178 // At this point, seq.offset() denotes the index of the first bucket in the
179 // first group to probe. Note that this doesn't have to be divisible by
180 // |kGroupWidth|, but can have any value between 0 (inclusive) and Capacity()
181 // (exclusive).
182 while (true) {
183 Group g{ctrl + seq.offset()};
184 for (int i : g.Match(swiss_table::H2(hash))) {
185 int candidate_entry = seq.offset(i);
186 Tagged<Object> candidate_key = KeyAt(candidate_entry);
187 // This key matching is SwissNameDictionary specific!
188 if (candidate_key == key) return InternalIndex(candidate_entry);
189 }
190 if (g.MatchEmpty()) return InternalIndex::NotFound();
191
192 // The following selects the next group to probe. Note that seq.offset()
193 // always advances by a multiple of |kGroupWidth|, modulo Capacity(). This
194 // is done in a way such that we visit Capacity() / |kGroupWidth|
195 // non-overlapping (!) groups before we would visit the same group (or
196 // bucket) again.
197 seq.next();
198
199 // If the following DCHECK weren't true, we would have probed all Capacity()
200 // different buckets without finding one containing |kEmpty| (which would
201 // haved triggered the g.MatchEmpty() check above). This must not be the
202 // case because the maximum load factor of 7/8 guarantees that there must
203 // always remain empty buckets.
204 //
205 // The only exception from this rule are small tables, where 2 * Capacity()
206 // < |kGroupWidth|, in which case all Capacity() entries can be filled
207 // without leaving empty buckets. The layout of the control
208 // table guarantees that after the first Capacity() entries of the control
209 // table, the control table contains a copy of those first Capacity()
210 // entries, followed by kGroupWidth - 2 * Capacity() entries containing
211 // |kEmpty|. This guarantees that the g.MatchEmpty() check above will
212 // always trigger if the element wasn't found, correctly preventing us from
213 // probing more than one group in this special case.
214 DCHECK_LT(seq.index(), Capacity());
215 }
216}
217
218template <typename IsolateT>
221 return FindEntry(isolate, *key);
222}
223
225 int data_offset) {
226 return LoadFromDataTable(GetPtrComprCageBase(*this), entry, data_offset);
227}
228
230 PtrComprCageBase cage_base, int entry, int data_offset) {
231 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
233 (entry * kDataTableEntryCount + data_offset) * kTaggedSize;
234 return TaggedField<Object>::Relaxed_Load(cage_base, *this, offset);
235}
236
237void SwissNameDictionary::StoreToDataTable(int entry, int data_offset,
238 Tagged<Object> data) {
239 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
240
242 (entry * kDataTableEntryCount + data_offset) * kTaggedSize;
243
244 RELAXED_WRITE_FIELD(*this, offset, data);
245 WRITE_BARRIER(*this, offset, data);
246}
247
249 Tagged<Object> data) {
250 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
251
253 (entry * kDataTableEntryCount + data_offset) * kTaggedSize;
254
255 RELAXED_WRITE_FIELD(*this, offset, data);
256}
257
259 ReadOnlyRoots roots(isolate);
260
261 StoreToDataTable(entry, kDataTableKeyEntryIndex, roots.the_hole_value());
262 StoreToDataTable(entry, kDataTableValueEntryIndex, roots.the_hole_value());
263}
264
266 DCHECK(!IsTheHole(value));
268}
269
271 Tagged<Object> value) {
272 ValueAtPut(entry.as_int(), value);
273}
274
279
281 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
282 uint8_t encoded_details = details.ToByte();
283 PropertyDetailsTable()[entry] = encoded_details;
284}
285
287 PropertyDetails details) {
288 DetailsAtPut(entry.as_int(), details);
289}
290
294
298
302
303// This version can be called on empty buckets.
307
312
313std::optional<Tagged<Object>> SwissNameDictionary::TryValueAt(
314 InternalIndex entry) {
315#if DEBUG
317 GetIsolateFromHeapObject(*this, &isolate);
318 DCHECK_NE(isolate, nullptr);
319 SLOW_DCHECK(!isolate->heap()->IsPendingAllocation(Tagged(*this)));
320#endif // DEBUG
321 // We can read Capacity() in a non-atomic way since we are reading an
322 // initialized object which is not pending allocation.
323 if (static_cast<unsigned>(entry.as_int()) >=
324 static_cast<unsigned>(Capacity())) {
325 return {};
326 }
327 return ValueAtRaw(entry.as_int());
328}
329
331 // GetCtrl(entry) does a bounds check for |entry| value.
332 DCHECK(IsFull(GetCtrl(entry)));
333
334 uint8_t encoded_details = PropertyDetailsTable()[entry];
335 return PropertyDetails::FromByte(encoded_details);
336}
337
341
342// static
343template <typename IsolateT, template <typename> typename HandleType>
344 requires(std::is_convertible_v<HandleType<SwissNameDictionary>,
346HandleType<SwissNameDictionary> SwissNameDictionary::EnsureGrowable(
347 IsolateT* isolate, HandleType<SwissNameDictionary> table) {
348 int capacity = table->Capacity();
349
350 if (table->UsedCapacity() < MaxUsableCapacity(capacity)) {
351 // We have room for at least one more entry, nothing to do.
352 return table;
353 }
354
355 int new_capacity = capacity == 0 ? kInitialCapacity : capacity * 2;
356 return Rehash(isolate, table, new_capacity);
357}
358
360 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
361
362 return CtrlTable()[entry];
363}
364
366 int capacity = Capacity();
367 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(capacity));
368
369 ctrl_t* ctrl = CtrlTable();
370 ctrl[entry] = h;
371
372 // The ctrl table contains a copy of the first group (i.e., the group starting
373 // at entry 0) after the first |capacity| entries of the ctrl table. This
374 // means that the ctrl table always has size |capacity| + |kGroupWidth|.
375 // However, note that we may have |capacity| < |kGroupWidth|. For example, if
376 // Capacity() == 8 and |kGroupWidth| == 16, then ctrl[0] is copied to ctrl[8],
377 // ctrl[1] to ctrl[9], etc. In this case, ctrl[16] to ctrl[23] remain unused,
378 // which means that their values are always Ctrl::kEmpty.
379 // We achieve the necessary copying without branching here using some bit
380 // magic: We set {copy_entry = entry} in those cases where we don't actually
381 // have to perform a copy (meaning that we just repeat the {ctrl[entry] = h}
382 // from above). If we do need to do some actual copying, we set {copy_entry =
383 // Capacity() + entry}.
384
385 int mask = capacity - 1;
386 int copy_entry =
387 ((entry - Group::kWidth) & mask) + 1 + ((Group::kWidth - 1) & mask);
388 DCHECK_IMPLIES(entry < static_cast<int>(Group::kWidth),
389 copy_entry == capacity + entry);
390 DCHECK_IMPLIES(entry >= static_cast<int>(Group::kWidth), copy_entry == entry);
391 ctrl[copy_entry] = h;
392}
393
394// static
395inline int SwissNameDictionary::FindFirstEmpty(uint32_t hash) {
396 // See SwissNameDictionary::FindEntry for description of probing algorithm.
397
398 auto seq = probe(hash, Capacity());
399 while (true) {
400 Group g{CtrlTable() + seq.offset()};
401 auto mask = g.MatchEmpty();
402 if (mask) {
403 // Note that picking the lowest bit set here means using the leftmost
404 // empty bucket in the group. Here, "left" means smaller entry/bucket
405 // index.
406 return seq.offset(mask.LowestBitSet());
407 }
408 seq.next();
409 DCHECK_LT(seq.index(), Capacity());
410 }
411}
412
413void SwissNameDictionary::SetMetaTableField(int field_index, int value) {
414 // See the STATIC_ASSERTs on |kMax1ByteMetaTableCapacity| and
415 // |kMax2ByteMetaTableCapacity| in the .cc file for an explanation of these
416 // constants.
417 int capacity = Capacity();
418 Tagged<ByteArray> meta_table = this->meta_table();
419 if (capacity <= kMax1ByteMetaTableCapacity) {
420 SetMetaTableField<uint8_t>(meta_table, field_index, value);
421 } else if (capacity <= kMax2ByteMetaTableCapacity) {
422 SetMetaTableField<uint16_t>(meta_table, field_index, value);
423 } else {
424 SetMetaTableField<uint32_t>(meta_table, field_index, value);
425 }
426}
427
429 // See the STATIC_ASSERTs on |kMax1ByteMetaTableCapacity| and
430 // |kMax2ByteMetaTableCapacity| in the .cc file for an explanation of these
431 // constants.
432 int capacity = Capacity();
433 Tagged<ByteArray> meta_table = this->meta_table();
434 if (capacity <= kMax1ByteMetaTableCapacity) {
435 return GetMetaTableField<uint8_t>(meta_table, field_index);
436 } else if (capacity <= kMax2ByteMetaTableCapacity) {
437 return GetMetaTableField<uint16_t>(meta_table, field_index);
438 } else {
439 return GetMetaTableField<uint32_t>(meta_table, field_index);
440 }
441}
442
443// static
444template <typename T>
446 int field_index, int value) {
447 static_assert((std::is_same<T, uint8_t>::value) ||
448 (std::is_same<T, uint16_t>::value) ||
449 (std::is_same<T, uint32_t>::value));
450 DCHECK_LE(value, std::numeric_limits<T>::max());
451 DCHECK_LT(meta_table->begin() + field_index * sizeof(T), meta_table->end());
452 T* raw_data = reinterpret_cast<T*>(meta_table->begin());
453 raw_data[field_index] = value;
454}
455
456// static
457template <typename T>
459 int field_index) {
460 static_assert((std::is_same<T, uint8_t>::value) ||
461 (std::is_same<T, uint16_t>::value) ||
462 (std::is_same<T, uint32_t>::value));
463 DCHECK_LT(meta_table->begin() + field_index * sizeof(T), meta_table->end());
464 T* raw_data = reinterpret_cast<T*>(meta_table->begin());
465 return raw_data[field_index];
466}
467
469 DCHECK(IsValidCapacity(capacity));
470
471 // See the STATIC_ASSERTs on |kMax1ByteMetaTableCapacity| and
472 // |kMax2ByteMetaTableCapacity| in the .cc file for an explanation of these
473 // constants.
474 if (capacity <= kMax1ByteMetaTableCapacity) {
475 return sizeof(uint8_t);
476 } else if (capacity <= kMax2ByteMetaTableCapacity) {
477 return sizeof(uint16_t);
478 } else {
479 return sizeof(uint32_t);
480 }
481}
482
483constexpr int SwissNameDictionary::MetaTableSizeFor(int capacity) {
484 DCHECK(IsValidCapacity(capacity));
485
486 int per_entry_size = MetaTableSizePerEntryFor(capacity);
487
488 // The enumeration table only needs to have as many slots as there can be
489 // present + deleted entries in the hash table (= maximum load factor *
490 // capactiy). Two more slots to store the number of present and deleted
491 // entries.
492 return per_entry_size * (MaxUsableCapacity(capacity) + 2);
493}
494
496 Tagged<Object> key_candidate) {
497 return key_candidate != roots.the_hole_value();
498}
499
501 Tagged<Object>* out_key) {
502 Tagged<Object> k = KeyAt(entry);
503 if (!IsKey(roots, k)) return false;
504 *out_key = k;
505 return true;
506}
507
509 Tagged<Object>* out_key) {
510 return ToKey(roots, entry.as_int(), out_key);
511}
512
513// static
514template <typename IsolateT, template <typename> typename HandleType>
515 requires(std::is_convertible_v<HandleType<SwissNameDictionary>,
517HandleType<SwissNameDictionary> SwissNameDictionary::Add(
518 IsolateT* isolate, HandleType<SwissNameDictionary> original_table,
520 InternalIndex* entry_out) {
521 DCHECK(original_table->FindEntry(isolate, *key).is_not_found());
522
523 HandleType<SwissNameDictionary> table =
524 EnsureGrowable(isolate, original_table);
526 Tagged<SwissNameDictionary> raw_table = *table;
527 int nof = raw_table->NumberOfElements();
528 int nod = raw_table->NumberOfDeletedElements();
529 int new_enum_index = nof + nod;
530
531 int new_entry = raw_table->AddInternal(*key, *value, details);
532
533 raw_table->SetNumberOfElements(nof + 1);
534 raw_table->SetEntryForEnumerationIndex(new_enum_index, new_entry);
535
536 if (entry_out) {
537 *entry_out = InternalIndex(new_entry);
538 }
539
540 return table;
541}
542
544 PropertyDetails details) {
546
549
550 uint32_t hash = key->hash();
551
552 // For now we don't reuse deleted buckets (due to enumeration table
553 // complications), which is why we only look for empty buckets here, not
554 // deleted ones.
555 int target = FindFirstEmpty(hash);
556
557 SetCtrl(target, swiss_table::H2(hash));
558 SetKey(target, key);
559 ValueAtPut(target, value);
560 DetailsAtPut(target, details);
561
562 // Note that we do not update the number of elements or the enumeration table
563 // in this function.
564
565 return target;
566}
567
568template <typename IsolateT>
569void SwissNameDictionary::Initialize(IsolateT* isolate,
570 Tagged<ByteArray> meta_table,
571 int capacity) {
572 DCHECK(IsValidCapacity(capacity));
574 ReadOnlyRoots roots(isolate);
575
576 SetCapacity(capacity);
578
579 memset(CtrlTable(), Ctrl::kEmpty, CtrlTableSize(capacity));
580
581 MemsetTagged(RawField(DataTableStartOffset()), roots.the_hole_value(),
582 capacity * kDataTableEntryCount);
583
584 set_meta_table(meta_table);
585
588
589 // We leave the enumeration table PropertyDetails table and uninitialized.
590}
591
594 : enum_index_{start}, dict_{dict} {
595 if (dict.is_null()) {
596 used_capacity_ = 0;
597 } else {
598 used_capacity_ = dict->UsedCapacity();
599 }
600}
601
604 DCHECK_LT(enum_index_, used_capacity_);
605 ++enum_index_;
606 return *this;
607}
608
610 const SwissNameDictionary::IndexIterator& b) const {
611 DCHECK_LE(enum_index_, used_capacity_);
612 DCHECK_LE(b.enum_index_, used_capacity_);
613 DCHECK(dict_.equals(b.dict_));
614
615 return this->enum_index_ == b.enum_index_;
616}
617
619 const IndexIterator& b) const {
620 return !(*this == b);
621}
622
624 DCHECK_LE(enum_index_, used_capacity_);
625
626 if (enum_index_ == used_capacity_) return InternalIndex::NotFound();
627
628 return InternalIndex(dict_->EntryForEnumerationIndex(enum_index_));
629}
630
634
638
640 if (dict_.is_null()) {
641 return IndexIterator(dict_, 0);
642 } else {
643 DCHECK(!dict_.is_null());
644 return IndexIterator(dict_, dict_->UsedCapacity());
645 }
646}
647
650 // If we are supposed to iterate the empty dictionary (which is non-writable),
651 // we have no simple way to get the isolate, which we would need to create a
652 // handle.
653 // TODO(emrich): Consider always using roots.empty_swiss_dictionary()
654 // in the condition once this function gets Isolate as a parameter in order to
655 // avoid empty dict checks.
656 if (Capacity() == 0) {
658 }
659
661 GetIsolateFromHeapObject(*this, &isolate);
662 DCHECK_NE(isolate, nullptr);
663 return IndexIterable(direct_handle(*this, isolate));
664}
665
669
671 WriteField(PrefixOffset(), hash);
672}
673
675
676// static
680
681// static
683 return PrefixOffset() + sizeof(uint32_t);
684}
685
686// static
688 return CapacityOffset() + sizeof(int32_t);
689}
690
691// static
695
696// static
697constexpr int SwissNameDictionary::DataTableEndOffset(int capacity) {
698 return CtrlTableStartOffset(capacity);
699}
700
701// static
702constexpr int SwissNameDictionary::CtrlTableStartOffset(int capacity) {
703 return DataTableStartOffset() + DataTableSize(capacity);
704}
705
706// static
708 int capacity) {
709 return CtrlTableStartOffset(capacity) + CtrlTableSize(capacity);
710}
711
712// static
714 constexpr int kConstSize =
716 // Size for present and deleted element count at max capacity:
717 2 * sizeof(uint32_t);
718 constexpr int kPerEntrySize =
719 // size of data table entries:
721 // ctrl table entry size:
723 // PropertyDetails table entry size:
725 // Enumeration table entry size at maximum capacity:
726 sizeof(uint32_t);
727
728 constexpr int result =
729 (kMaxFixedArrayCapacity * kTaggedSize - kConstSize) / kPerEntrySize;
730 static_assert(Smi::kMaxValue >= result);
731
732 return result;
733}
734
735// static
736bool SwissNameDictionary::IsEmpty(ctrl_t c) { return c == Ctrl::kEmpty; }
737
738// static
740 static_assert(Ctrl::kEmpty < 0);
741 static_assert(Ctrl::kDeleted < 0);
742 static_assert(Ctrl::kSentinel < 0);
743 return c >= 0;
744}
745
746// static
747bool SwissNameDictionary::IsDeleted(ctrl_t c) { return c == Ctrl::kDeleted; }
748
749// static
751 static_assert(Ctrl::kDeleted < Ctrl::kSentinel);
752 static_assert(Ctrl::kEmpty < Ctrl::kSentinel);
753 static_assert(Ctrl::kSentinel < 0);
754 return c < Ctrl::kSentinel;
755}
756
757// static
759SwissNameDictionary::probe(uint32_t hash, int capacity) {
760 // If |capacity| is 0, we must produce 1 here, such that the - 1 below
761 // yields 0, which is the correct modulo mask for a table of capacity 0.
762 int non_zero_capacity = capacity | (capacity == 0);
764 swiss_table::H1(hash), static_cast<uint32_t>(non_zero_capacity - 1));
765}
766
769 value->length() >= kMetaTableEnumerationDataStartIndex)
770
771} // namespace v8::internal
772
774
775#endif // V8_OBJECTS_SWISS_NAME_DICTIONARY_INL_H_
#define SLOW_DCHECK(condition)
Definition checks.h:21
V8_INLINE bool is_null() const
Definition handles.h:693
static constexpr int kHeaderSize
Address field_address(size_t offset) const
ObjectSlot RawField(int byte_offset) const
T ReadField(size_t offset) const
void WriteField(size_t offset, T value) const
static InternalIndex NotFound()
constexpr int as_int() const
detail::ArrayHeaderBase< HeapObjectLayout, true > Header
static const int kNoHashSentinel
static PropertyDetails FromByte(uint8_t encoded_details)
static constexpr int kMaxValue
Definition smi.h:101
IndexIterable(DirectHandle< SwissNameDictionary > dict)
IndexIterator(DirectHandle< SwissNameDictionary > dict, int start)
static constexpr int MaxUsableCapacity(int capacity)
Tagged< Object > KeyAt(InternalIndex entry)
InternalIndex FindEntry(IsolateT *isolate, Tagged< Object > key)
static constexpr int DataTableSize(int capacity)
static constexpr int CtrlTableSize(int capacity)
void Initialize(IsolateT *isolate, Tagged< ByteArray > meta_table, int capacity)
static swiss_table::ProbeSequence< Group::kWidth > probe(uint32_t hash, int capacity)
static constexpr bool IsValidCapacity(int capacity)
Tagged< Name > NameAt(InternalIndex entry)
int EntryForEnumerationIndex(int enumeration_index)
void ValueAtPut(InternalIndex entry, Tagged< Object > value)
static constexpr int kMetaTableDeletedElementCountFieldIndex
void ClearDataTableEntry(Isolate *isolate, int entry)
Tagged< Object > ValueAt(InternalIndex entry)
static constexpr Offset DataTableEndOffset(int capacity)
static constexpr Offset MetaTablePointerOffset()
static constexpr int MetaTableSizePerEntryFor(int capacity)
void DetailsAtPut(InternalIndex entry, PropertyDetails value)
static constexpr int kMetaTableEnumerationDataStartIndex
static constexpr Offset CtrlTableStartOffset(int capacity)
Tagged< Object > LoadFromDataTable(int entry, int data_offset)
void SetEntryForEnumerationIndex(int enumeration_index, int entry)
std::optional< Tagged< Object > > TryValueAt(InternalIndex entry)
void SetNumberOfDeletedElements(int deleted_elements)
void StoreToDataTable(int entry, int data_offset, Tagged< Object > data)
static constexpr int kMetaTableElementCountFieldIndex
static HandleType< SwissNameDictionary > EnsureGrowable(IsolateT *isolate, HandleType< SwissNameDictionary > table)
static int CapacityFor(int at_least_space_for)
void StoreToDataTableNoBarrier(int entry, int data_offset, Tagged< Object > data)
static constexpr Offset PropertyDetailsTableStartOffset(int capacity)
bool ToKey(ReadOnlyRoots roots, InternalIndex entry, Tagged< Object > *out_key)
static bool IsKey(ReadOnlyRoots roots, Tagged< Object > key_candidate)
static constexpr int SizeFor(int capacity)
static HandleType< SwissNameDictionary > Add(IsolateT *isolate, HandleType< SwissNameDictionary > table, DirectHandle< Name > key, DirectHandle< Object > value, PropertyDetails details, InternalIndex *entry_out=nullptr)
void SetKey(int entry, Tagged< Object > key)
PropertyDetails DetailsAt(InternalIndex entry)
void SetMetaTableField(int field_index, int value)
int AddInternal(Tagged< Name > key, Tagged< Object > value, PropertyDetails details)
static constexpr int MetaTableSizeFor(int capacity)
static PtrType Relaxed_Load(Tagged< HeapObject > host, int offset=0)
int start
Isolate * isolate
int32_t offset
TNode< Object > target
ZoneVector< RpoNumber > & result
uint32_t const mask
V8_BASE_EXPORT constexpr uint32_t RoundUpToPowerOfTwo32(uint32_t value)
Definition bits.h:219
static uint32_t H1(uint32_t hash)
static swiss_table::ctrl_t H2(uint32_t hash)
constexpr int kTaggedSize
Definition globals.h:542
constexpr int kOneByteSize
Definition globals.h:703
V8_INLINE bool GetIsolateFromHeapObject(Tagged< HeapObject > object, Isolate **isolate)
void MemsetTagged(Tagged_t *start, Tagged< MaybeObject > value, size_t counter)
Definition slots-inl.h:486
V8_INLINE DirectHandle< T > direct_handle(Tagged< T > object, Isolate *isolate)
V8_INLINE PtrComprCageBase GetPtrComprCageBase()
bool IsUniqueName(Tagged< Name > obj)
return value
Definition map-inl.h:893
static constexpr int kMaxFixedArrayCapacity
Definition fixed-array.h:32
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
#define OBJECT_CONSTRUCTORS_IMPL(Type, Super)
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, set_condition)
#define WRITE_BARRIER(object, offset, value)
#define RELAXED_WRITE_FIELD(p, offset, value)
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_LT(v1, v2)
Definition logging.h:489
#define DCHECK_EQ(v1, v2)
Definition logging.h:485