v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
serializer.cc
Go to the documentation of this file.
1// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
11#include "src/heap/heap-inl.h" // For Space::identity().
16#include "src/objects/code.h"
21#include "src/objects/map.h"
24#include "src/objects/slots.h"
25#include "src/objects/smi.h"
30
31namespace v8 {
32namespace internal {
33
35 : isolate_(isolate),
36#if V8_COMPRESS_POINTERS
37 cage_base_(isolate),
38#endif // V8_COMPRESS_POINTERS
39 hot_objects_(isolate->heap()),
40 reference_map_(isolate),
41 external_reference_encoder_(isolate),
42 root_index_map_(isolate),
43 deferred_objects_(isolate->heap()),
44 forward_refs_per_pending_object_(isolate->heap()),
45 flags_(flags)
46#ifdef DEBUG
47 ,
48 back_refs_(isolate->heap()),
49 stack_(isolate->heap())
50#endif
51{
52#ifdef VERBOSE_SERIALIZATION_STATISTICS
53 if (v8_flags.serialization_statistics) {
54 for (int space = 0; space < kNumberOfSnapshotSpaces; ++space) {
55 // Value-initialized to 0.
56 instance_type_count_[space] = std::make_unique<int[]>(kInstanceTypes);
57 instance_type_size_[space] = std::make_unique<size_t[]>(kInstanceTypes);
58 }
59 }
60#endif // VERBOSE_SERIALIZATION_STATISTICS
61}
62
63#ifdef DEBUG
64void Serializer::PopStack() { stack_.Pop(); }
65#endif
66
67void Serializer::CountAllocation(Tagged<Map> map, int size,
68 SnapshotSpace space) {
69 DCHECK(v8_flags.serialization_statistics);
70
71 const int space_number = static_cast<int>(space);
72 allocation_size_[space_number] += size;
73#ifdef VERBOSE_SERIALIZATION_STATISTICS
74 int instance_type = map->instance_type();
75 instance_type_count_[space_number][instance_type]++;
76 instance_type_size_[space_number][instance_type] += size;
77#endif // VERBOSE_SERIALIZATION_STATISTICS
78}
79
80int Serializer::TotalAllocationSize() const {
81 int sum = 0;
82 for (int space = 0; space < kNumberOfSnapshotSpaces; space++) {
83 sum += allocation_size_[space];
84 }
85 return sum;
86}
87
88namespace {
89
90const char* ToString(SnapshotSpace space) {
91 switch (space) {
92 case SnapshotSpace::kReadOnlyHeap:
93 return "ReadOnlyHeap";
94 case SnapshotSpace::kOld:
95 return "Old";
96 case SnapshotSpace::kCode:
97 return "Code";
98 case SnapshotSpace::kTrusted:
99 return "Trusted";
100 }
101}
102
103} // namespace
104
105void Serializer::OutputStatistics(const char* name) {
106 if (!v8_flags.serialization_statistics) return;
107
108 PrintF("%s:\n", name);
109 if (!serializer_tracks_serialization_statistics()) {
110 PrintF(" <serialization statistics are not tracked>\n");
111 return;
112 }
113
114 PrintF(" Spaces (bytes):\n");
115
116 static constexpr SnapshotSpace kAllSnapshotSpaces[] = {
117 SnapshotSpace::kReadOnlyHeap,
118 SnapshotSpace::kOld,
119 SnapshotSpace::kCode,
120 };
121
122 for (SnapshotSpace space : kAllSnapshotSpaces) {
123 PrintF("%16s", ToString(space));
124 }
125 PrintF("\n");
126
127 for (SnapshotSpace space : kAllSnapshotSpaces) {
128 PrintF("%16zu", allocation_size_[static_cast<int>(space)]);
129 }
130 PrintF("\n");
131
132#ifdef VERBOSE_SERIALIZATION_STATISTICS
133 PrintF(" Instance types (count and bytes):\n");
134#define PRINT_INSTANCE_TYPE(Name) \
135 for (SnapshotSpace space : kAllSnapshotSpaces) { \
136 const int space_i = static_cast<int>(space); \
137 if (instance_type_count_[space_i][Name]) { \
138 PrintF("%10d %10zu %-10s %s\n", instance_type_count_[space_i][Name], \
139 instance_type_size_[space_i][Name], ToString(space), #Name); \
140 } \
141 }
142 INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
143#undef PRINT_INSTANCE_TYPE
144 PrintF("\n");
145#endif // VERBOSE_SERIALIZATION_STATISTICS
146}
147
148void Serializer::SerializeDeferredObjects() {
149 if (v8_flags.trace_serializer) {
150 PrintF("Serializing deferred objects\n");
151 }
152 WHILE_WITH_HANDLE_SCOPE(isolate(), !deferred_objects_.empty(), {
153 Handle<HeapObject> obj = handle(deferred_objects_.Pop(), isolate());
154
155 ObjectSerializer obj_serializer(this, obj, &sink_);
156 obj_serializer.SerializeDeferred();
157 });
158 sink_.Put(kSynchronize, "Finished with deferred objects");
159}
160
161void Serializer::SerializeObject(Handle<HeapObject> obj, SlotType slot_type) {
162 // ThinStrings are just an indirection to an internalized string, so elide the
163 // indirection and serialize the actual string directly.
164 if (IsThinString(*obj, isolate())) {
165 obj = handle(Cast<ThinString>(*obj)->actual(), isolate());
166 } else if (IsCode(*obj, isolate())) {
167 Tagged<Code> code = Cast<Code>(*obj);
168 // The only expected Code objects here are baseline code and builtins.
169 if (code->kind() == CodeKind::BASELINE) {
170 // For now just serialize the BytecodeArray instead of baseline code.
171 // TODO(v8:11429,pthier): Handle Baseline code in cases we want to
172 // serialize it.
173 obj = handle(code->bytecode_or_interpreter_data(), isolate());
174 } else {
175 CHECK(code->is_builtin());
176 }
177 }
178 SerializeObjectImpl(obj, slot_type);
179}
180
181bool Serializer::MustBeDeferred(Tagged<HeapObject> object) { return false; }
182
183void Serializer::VisitRootPointers(Root root, const char* description,
185 for (FullObjectSlot current = start; current < end; ++current) {
186 SerializeRootObject(current);
187 }
188}
189
190void Serializer::SerializeRootObject(FullObjectSlot slot) {
191 Tagged<Object> o = *slot;
192 if (IsSmi(o)) {
193 PutSmiRoot(slot);
194 } else {
195 SerializeObject(Handle<HeapObject>(slot.location()), SlotType::kAnySlot);
196 }
197}
198
199#ifdef DEBUG
200void Serializer::PrintStack() { PrintStack(std::cout); }
201
202void Serializer::PrintStack(std::ostream& out) {
203 for (const auto o : stack_) {
204 Print(*o, out);
205 out << "\n";
206 }
207}
208#endif // DEBUG
209
210bool Serializer::SerializeRoot(Tagged<HeapObject> obj) {
211 RootIndex root_index;
212 // Derived serializers are responsible for determining if the root has
213 // actually been serialized before calling this.
214 if (root_index_map()->Lookup(obj, &root_index)) {
215 PutRoot(root_index);
216 return true;
217 }
218 return false;
219}
220
221bool Serializer::SerializeHotObject(Tagged<HeapObject> obj) {
223 // Encode a reference to a hot object by its index in the working set.
224 int index = hot_objects_.Find(obj);
225 if (index == HotObjectsList::kNotFound) return false;
226 DCHECK(index >= 0 && index < kHotObjectCount);
227 if (v8_flags.trace_serializer) {
228 PrintF(" Encoding hot object %d:", index);
229 ShortPrint(obj);
230 PrintF("\n");
231 }
232 sink_.Put(HotObject::Encode(index), "HotObject");
233 return true;
234}
235
236bool Serializer::SerializeBackReference(Tagged<HeapObject> obj) {
238 const SerializerReference* reference = reference_map_.LookupReference(obj);
239 if (reference == nullptr) return false;
240 // Encode the location of an already deserialized object in order to write
241 // its location into a later object. We can encode the location as an
242 // offset from the start of the deserialized objects or as an offset
243 // backwards from the current allocation pointer.
244 if (reference->is_attached_reference()) {
245 if (v8_flags.trace_serializer) {
246 PrintF(" Encoding attached reference %d\n",
247 reference->attached_reference_index());
248 }
249 PutAttachedReference(*reference);
250 } else {
251 DCHECK(reference->is_back_reference());
252 if (v8_flags.trace_serializer) {
253 PrintF(" Encoding back reference to: ");
254 ShortPrint(obj);
255 PrintF("\n");
256 }
257
258 sink_.Put(kBackref, "Backref");
259 PutBackReference(obj, *reference);
260 }
261 return true;
262}
263
264bool Serializer::SerializePendingObject(Tagged<HeapObject> obj) {
265 PendingObjectReferences* refs_to_object =
266 forward_refs_per_pending_object_.Find(obj);
267 if (refs_to_object == nullptr) {
268 return false;
269 }
270 PutPendingForwardReference(*refs_to_object);
271 return true;
272}
273
274bool Serializer::ObjectIsBytecodeHandler(Tagged<HeapObject> obj) const {
275 if (!IsCode(obj)) return false;
276 return (Cast<Code>(obj)->kind() == CodeKind::BYTECODE_HANDLER);
277}
278
279void Serializer::PutRoot(RootIndex root) {
281 int root_index = static_cast<int>(root);
282 Tagged<HeapObject> object = Cast<HeapObject>(isolate()->root(root));
283 if (v8_flags.trace_serializer) {
284 PrintF(" Encoding root %d:", root_index);
285 ShortPrint(object);
286 PrintF("\n");
287 }
288
289 // Assert that the first 32 root array items are a conscious choice. They are
290 // chosen so that the most common ones can be encoded more efficiently.
291 static_assert(static_cast<int>(RootIndex::kArgumentsMarker) ==
292 kRootArrayConstantsCount - 1);
293
294 // TODO(ulan): Check that it works with young large objects.
295 if (root_index < kRootArrayConstantsCount &&
296 !HeapLayout::InYoungGeneration(object)) {
297 sink_.Put(RootArrayConstant::Encode(root), "RootConstant");
298 } else {
299 sink_.Put(kRootArray, "RootSerialization");
300 sink_.PutUint30(root_index, "root_index");
301 hot_objects_.Add(object);
302 }
303}
304
305void Serializer::PutSmiRoot(FullObjectSlot slot) {
306 // Serializing a smi root in compressed pointer builds will serialize the
307 // full object slot (of kSystemPointerSize) to avoid complications during
308 // deserialization (endianness or smi sequences).
309 static_assert(decltype(slot)::kSlotDataSize == sizeof(Address));
310 static_assert(decltype(slot)::kSlotDataSize == kSystemPointerSize);
311 static constexpr int bytes_to_output = decltype(slot)::kSlotDataSize;
312 static constexpr int size_in_tagged = bytes_to_output >> kTaggedSizeLog2;
313 sink_.Put(FixedRawDataWithSize::Encode(size_in_tagged), "Smi");
314
315 Address raw_value = Cast<Smi>(*slot).ptr();
316 const uint8_t* raw_value_as_bytes =
317 reinterpret_cast<const uint8_t*>(&raw_value);
318 sink_.PutRaw(raw_value_as_bytes, bytes_to_output, "Bytes");
319}
320
321void Serializer::PutBackReference(Tagged<HeapObject> object,
322 SerializerReference reference) {
323 DCHECK_EQ(object, *back_refs_[reference.back_ref_index()]);
324 sink_.PutUint30(reference.back_ref_index(), "BackRefIndex");
325 hot_objects_.Add(object);
326}
327
328void Serializer::PutAttachedReference(SerializerReference reference) {
329 DCHECK(reference.is_attached_reference());
330 sink_.Put(kAttachedReference, "AttachedRef");
331 sink_.PutUint30(reference.attached_reference_index(), "AttachedRefIndex");
332}
333
334void Serializer::PutRepeatRoot(int repeat_count, RootIndex root_index) {
335 if (repeat_count <= kLastEncodableFixedRepeatRootCount) {
336 sink_.Put(FixedRepeatRootWithCount::Encode(repeat_count),
337 "FixedRepeatRoot");
338 } else {
339 sink_.Put(kVariableRepeatRoot, "VariableRepeatRoot");
340 sink_.PutUint30(VariableRepeatRootCount::Encode(repeat_count),
341 "repeat count");
342 }
343 DCHECK_LE(static_cast<uint32_t>(root_index), UINT8_MAX);
344 sink_.Put(static_cast<uint8_t>(root_index), "root index");
345}
346
347void Serializer::PutPendingForwardReference(PendingObjectReferences& refs) {
348 sink_.Put(kRegisterPendingForwardRef, "RegisterPendingForwardRef");
349 unresolved_forward_refs_++;
350 // Register the current slot with the pending object.
351 int forward_ref_id = next_forward_ref_id_++;
352 if (refs == nullptr) {
353 // The IdentityMap holding the pending object reference vectors does not
354 // support non-trivial types; in particular it doesn't support destructors
355 // on values. So, we manually allocate a vector with new, and delete it when
356 // resolving the pending object.
357 refs = new std::vector<int>();
358 }
359 refs->push_back(forward_ref_id);
360}
361
362void Serializer::ResolvePendingForwardReference(int forward_reference_id) {
363 sink_.Put(kResolvePendingForwardRef, "ResolvePendingForwardRef");
364 sink_.PutUint30(forward_reference_id, "with this index");
365 unresolved_forward_refs_--;
366
367 // If there are no more unresolved forward refs, reset the forward ref id to
368 // zero so that future forward refs compress better.
369 if (unresolved_forward_refs_ == 0) {
370 next_forward_ref_id_ = 0;
371 }
372}
373
374ExternalReferenceEncoder::Value Serializer::EncodeExternalReference(
375 Address addr) {
377 external_reference_encoder_.TryEncode(addr);
378 if (result.IsNothing()) {
379#ifdef DEBUG
380 PrintStack(std::cerr);
381#endif
382 void* addr_ptr = reinterpret_cast<void*>(addr);
383 v8::base::OS::PrintError("Unknown external reference %p.\n", addr_ptr);
384 v8::base::OS::PrintError("%s\n",
385 ExternalReferenceTable::ResolveSymbol(addr_ptr));
387 }
388 return result.FromJust();
389}
390
391void Serializer::RegisterObjectIsPending(Tagged<HeapObject> obj) {
393 if (IsNotMappedSymbol(obj)) return;
394
395 // Add the given object to the pending objects -> forward refs map.
396 auto find_result = forward_refs_per_pending_object_.FindOrInsert(obj);
397 USE(find_result);
398
399 // If the above emplace didn't actually add the object, then the object must
400 // already have been registered pending by deferring. It might not be in the
401 // deferred objects queue though, since it may be the very object we just
402 // popped off that queue, so just check that it can be deferred.
403 DCHECK_IMPLIES(find_result.already_exists, *find_result.entry != nullptr);
404 DCHECK_IMPLIES(find_result.already_exists,
405 CanBeDeferred(obj, SlotType::kAnySlot));
406}
407
408void Serializer::ResolvePendingObject(Tagged<HeapObject> obj) {
410 if (IsNotMappedSymbol(obj)) return;
411
412 std::vector<int>* refs;
413 CHECK(forward_refs_per_pending_object_.Delete(obj, &refs));
414 if (refs) {
415 for (int index : *refs) {
416 ResolvePendingForwardReference(index);
417 }
418 // See PutPendingForwardReference -- we have to manually manage the memory
419 // of non-trivial IdentityMap values.
420 delete refs;
421 }
422}
423
424void Serializer::Pad(int padding_offset) {
425 // The non-branching GetInt will read up to 3 bytes too far, so we need
426 // to pad the snapshot to make sure we don't read over the end.
427 for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
428 sink_.Put(kNop, "Padding");
429 }
430 // Pad up to pointer size for checksum.
431 while (!IsAligned(sink_.Position() + padding_offset, kPointerAlignment)) {
432 sink_.Put(kNop, "Padding");
433 }
434}
435
436void Serializer::InitializeCodeAddressMap() {
437 isolate_->InitializeLoggingAndCounters();
438 code_address_map_ = std::make_unique<CodeAddressMap>(isolate_);
439}
440
441Tagged<InstructionStream> Serializer::CopyCode(
443 code_buffer_.clear(); // Clear buffer without deleting backing store.
444 // Add InstructionStream padding which is usually added by the allocator.
445 // While this doesn't guarantee the exact same alignment, it's enough to
446 // fulfill the alignment requirements of writes during relocation.
447 code_buffer_.resize(InstructionStream::kCodeAlignmentMinusCodeHeader);
448 int size = istream->Size();
449 code_buffer_.insert(code_buffer_.end(),
450 reinterpret_cast<uint8_t*>(istream.address()),
451 reinterpret_cast<uint8_t*>(istream.address() + size));
452 // When pointer compression is enabled the checked cast will try to
453 // decompress map field of off-heap InstructionStream object.
454 return UncheckedCast<InstructionStream>(
455 HeapObject::FromAddress(reinterpret_cast<Address>(
456 &code_buffer_[InstructionStream::kCodeAlignmentMinusCodeHeader])));
457}
458
459void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
460 int size,
461 Tagged<Map> map) {
462 if (serializer_->code_address_map_) {
463 const char* code_name =
464 serializer_->code_address_map_->Lookup(object_->address());
465 LOG(serializer_->isolate_,
466 CodeNameEvent(object_->address(), sink_->Position(), code_name));
467 }
468
469 if (map.SafeEquals(*object_)) {
470 if (map == ReadOnlyRoots(isolate()).meta_map()) {
471 DCHECK_EQ(space, SnapshotSpace::kReadOnlyHeap);
472 sink_->Put(kNewContextlessMetaMap, "NewContextlessMetaMap");
473 } else {
474 DCHECK_EQ(space, SnapshotSpace::kOld);
475 DCHECK(IsContext(map->native_context_or_null()));
476 sink_->Put(kNewContextfulMetaMap, "NewContextfulMetaMap");
477
478 // Defer serialization of the native context in order to break
479 // a potential cycle through the map slot:
480 // MAP -> meta map -> NativeContext -> ... -> MAP
481 // Otherwise it'll be a "forward ref to a map" problem: deserializer
482 // will not be able to create {obj} because {MAP} is not deserialized yet.
483 Tagged<NativeContext> native_context = map->native_context();
484
485 // Sanity check - the native context must not be serialized yet since
486 // it has a contextful map and thus the respective meta map must be
487 // serialized first. So we don't have to search the native context
488 // among the back refs before adding it to the deferred queue.
490 serializer_->reference_map()->LookupReference(native_context));
491
492 if (!serializer_->forward_refs_per_pending_object_.Find(native_context)) {
493 serializer_->RegisterObjectIsPending(native_context);
494 serializer_->QueueDeferredObject(native_context);
495 }
496 }
497 DCHECK_EQ(size, Map::kSize);
498 } else {
499 sink_->Put(NewObject::Encode(space), "NewObject");
500
501 // TODO(leszeks): Skip this when the map has a fixed size.
502 sink_->PutUint30(size >> kObjectAlignmentBits, "ObjectSizeInWords");
503
504 // Until the space for the object is allocated, it is considered "pending".
505 serializer_->RegisterObjectIsPending(*object_);
506
507 // Serialize map (first word of the object) before anything else, so that
508 // the deserializer can access it when allocating. Make sure that the map
509 // is known to be being serialized for the map slot, so that it is not
510 // deferred.
511 DCHECK(IsMap(map));
512 serializer_->SerializeObject(handle(map, isolate()), SlotType::kMapSlot);
513
514 // Make sure the map serialization didn't accidentally recursively serialize
515 // this object.
517 !serializer_->IsNotMappedSymbol(*object_),
518 serializer_->reference_map()->LookupReference(object_) == nullptr);
519
520 // To support deserializing pending objects referenced through indirect
521 // pointers, we need to make sure that the 'self' indirect pointer is
522 // initialized before the pending reference is resolved. Otherwise, the
523 // object cannot be referenced.
524 if (V8_ENABLE_SANDBOX_BOOL && IsExposedTrustedObject(*object_)) {
525 sink_->Put(kInitializeSelfIndirectPointer,
526 "InitializeSelfIndirectPointer");
527 }
528
529 // Now that the object is allocated, we can resolve pending references to
530 // it.
531 serializer_->ResolvePendingObject(*object_);
532 }
533
534 if (v8_flags.serialization_statistics) {
535 serializer_->CountAllocation(object_->map(), size, space);
536 }
537
538 // The snapshot should only contain internalized strings (since these end up
539 // in RO space). If this DCHECK fails, allocate the object_ String through
540 // Factory::InternalizeString instead.
541 // TODO(jgruber,v8:13789): Try to enable this DCHECK once custom snapshots
542 // can extend RO space. We may have to do a pass over the heap prior to
543 // serialization that in-place converts all strings to internalized strings.
544 // DCHECK_IMPLIES(object_->IsString(), object_->IsInternalizedString());
545
546 // Mark this object as already serialized, and add it to the reference map so
547 // that it can be accessed by backreference by future objects.
548 serializer_->num_back_refs_++;
549#ifdef DEBUG
550 serializer_->back_refs_.Push(*object_);
551 DCHECK_EQ(serializer_->back_refs_.size(), serializer_->num_back_refs_);
552#endif
553 if (!serializer_->IsNotMappedSymbol(*object_)) {
554 // Only add the object to the map if it's not not_mapped_symbol, else
555 // the reference IdentityMap has issues. We don't expect to have back
556 // references to the not_mapped_symbol anyway, so it's fine.
557 SerializerReference back_reference =
558 SerializerReference::BackReference(serializer_->num_back_refs_ - 1);
559 serializer_->reference_map()->Add(*object_, back_reference);
561 *serializer_->back_refs_[back_reference.back_ref_index()]);
562 DCHECK_EQ(back_reference.back_ref_index(), serializer_->reference_map()
563 ->LookupReference(object_)
564 ->back_ref_index());
565 }
566}
567
568uint32_t Serializer::ObjectSerializer::SerializeBackingStore(
569 void* backing_store, uint32_t byte_length,
570 Maybe<uint32_t> max_byte_length) {
572 const SerializerReference* reference_ptr =
573 serializer_->reference_map()->LookupBackingStore(backing_store);
574
575 // Serialize the off-heap backing store.
576 if (reference_ptr) {
577 return reference_ptr->off_heap_backing_store_index();
578 }
579 if (max_byte_length.IsJust()) {
580 sink_->Put(kOffHeapResizableBackingStore,
581 "Off-heap resizable backing store");
582 } else {
583 sink_->Put(kOffHeapBackingStore, "Off-heap backing store");
584 }
585 sink_->PutUint32(byte_length, "length");
586 if (max_byte_length.IsJust()) {
587 sink_->PutUint32(max_byte_length.FromJust(), "max length");
588 }
589 sink_->PutRaw(static_cast<uint8_t*>(backing_store), byte_length,
590 "BackingStore");
591 DCHECK_NE(0, serializer_->seen_backing_stores_index_);
592 SerializerReference reference =
593 SerializerReference::OffHeapBackingStoreReference(
594 serializer_->seen_backing_stores_index_++);
595 // Mark this backing store as already serialized.
596 serializer_->reference_map()->AddBackingStore(backing_store, reference);
597 return reference.off_heap_backing_store_index();
598}
599
600void Serializer::ObjectSerializer::SerializeJSTypedArray() {
601 {
603 Tagged<JSTypedArray> typed_array = Cast<JSTypedArray>(*object_);
604 if (typed_array->is_on_heap()) {
605 typed_array->RemoveExternalPointerCompensationForSerialization(isolate());
606 } else {
607 if (!typed_array->IsDetachedOrOutOfBounds()) {
608 // Explicitly serialize the backing store now.
609 Tagged<JSArrayBuffer> buffer =
610 Cast<JSArrayBuffer>(typed_array->buffer());
611 // We cannot store byte_length or max_byte_length larger than uint32
612 // range in the snapshot.
613 size_t byte_length_size = buffer->GetByteLength();
614 CHECK_LE(byte_length_size,
615 size_t{std::numeric_limits<uint32_t>::max()});
616 uint32_t byte_length = static_cast<uint32_t>(byte_length_size);
617 Maybe<uint32_t> max_byte_length = Nothing<uint32_t>();
618 if (buffer->is_resizable_by_js()) {
619 CHECK_LE(buffer->max_byte_length(),
620 std::numeric_limits<uint32_t>::max());
621 max_byte_length =
622 Just(static_cast<uint32_t>(buffer->max_byte_length()));
623 }
624 size_t byte_offset = typed_array->byte_offset();
625
626 // We need to calculate the backing store from the data pointer
627 // because the ArrayBuffer may already have been serialized.
628 void* backing_store = reinterpret_cast<void*>(
629 reinterpret_cast<Address>(typed_array->DataPtr()) - byte_offset);
630
631 uint32_t ref =
632 SerializeBackingStore(backing_store, byte_length, max_byte_length);
633 typed_array->SetExternalBackingStoreRefForSerialization(ref);
634 } else {
635 typed_array->SetExternalBackingStoreRefForSerialization(0);
636 }
637 }
638 }
639 SerializeObject();
640}
641
642void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
644 void* backing_store;
645 {
647 Tagged<JSArrayBuffer> buffer = Cast<JSArrayBuffer>(*object_);
648 backing_store = buffer->backing_store();
649 // We cannot store byte_length or max_byte_length larger than uint32 range
650 // in the snapshot.
651 size_t byte_length_size = buffer->GetByteLength();
652 CHECK_LE(byte_length_size, std::numeric_limits<uint32_t>::max());
653 uint32_t byte_length = static_cast<uint32_t>(byte_length_size);
654 Maybe<uint32_t> max_byte_length = Nothing<uint32_t>();
655 if (buffer->is_resizable_by_js()) {
656 CHECK_LE(buffer->max_byte_length(), std::numeric_limits<uint32_t>::max());
657 max_byte_length = Just(static_cast<uint32_t>(buffer->max_byte_length()));
658 }
659 extension = buffer->extension();
660
661 // Only serialize non-empty backing stores.
662 if (buffer->IsEmpty()) {
663 buffer->SetBackingStoreRefForSerialization(kEmptyBackingStoreRefSentinel);
664 } else {
665 uint32_t ref =
666 SerializeBackingStore(backing_store, byte_length, max_byte_length);
667 buffer->SetBackingStoreRefForSerialization(ref);
668 }
669
670 // Ensure deterministic output by setting extension to null during
671 // serialization.
672 buffer->set_extension(nullptr);
673 }
674 SerializeObject();
675 {
676 Tagged<JSArrayBuffer> buffer = Cast<JSArrayBuffer>(*object_);
677 buffer->set_backing_store(isolate(), backing_store);
678 buffer->set_extension(extension);
679 }
680}
681
682void Serializer::ObjectSerializer::SerializeExternalString() {
683 // For external strings with known resources, we replace the resource field
684 // with the encoded external reference, which we restore upon deserialize.
685 // For the rest we serialize them to look like ordinary sequential strings.
686 auto string = Cast<ExternalString>(object_);
687 Address resource = string->resource_as_address();
689 if (serializer_->external_reference_encoder_.TryEncode(resource).To(
690 &reference)) {
691 DCHECK(reference.is_from_api());
692#ifdef V8_ENABLE_SANDBOX
693 uint32_t external_pointer_entry =
694 string->GetResourceRefForDeserialization();
695#endif
696 string->SetResourceRefForSerialization(reference.index());
697 SerializeObject();
698#ifdef V8_ENABLE_SANDBOX
699 string->SetResourceRefForSerialization(external_pointer_entry);
700#else
701 string->set_address_as_resource(isolate(), resource);
702#endif
703 } else {
704 SerializeExternalStringAsSequentialString();
705 }
706}
707
708void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
709 // Instead of serializing this as an external string, we serialize
710 // an imaginary sequential string with the same content.
711 ReadOnlyRoots roots(isolate());
712 PtrComprCageBase cage_base(isolate());
713 DCHECK(IsExternalString(*object_, cage_base));
714 DirectHandle<ExternalString> string = Cast<ExternalString>(object_);
715 uint32_t length = string->length();
717 int content_size;
718 int allocation_size;
719 const uint8_t* resource;
720 // Find the map and size for the imaginary sequential string.
721 bool internalized = IsInternalizedString(*object_, cage_base);
722 if (IsExternalOneByteString(*object_, cage_base)) {
723 map = internalized ? roots.internalized_one_byte_string_map()
724 : roots.seq_one_byte_string_map();
725 allocation_size = SeqOneByteString::SizeFor(length);
726 content_size = length * kCharSize;
727 resource = reinterpret_cast<const uint8_t*>(
728 Cast<ExternalOneByteString>(string)->resource()->data());
729 } else {
730 map = internalized ? roots.internalized_two_byte_string_map()
731 : roots.seq_two_byte_string_map();
732 allocation_size = SeqTwoByteString::SizeFor(length);
733 content_size = length * kShortSize;
734 resource = reinterpret_cast<const uint8_t*>(
735 Cast<ExternalTwoByteString>(string)->resource()->data());
736 }
737
738 SnapshotSpace space = SnapshotSpace::kOld;
739 SerializePrologue(space, allocation_size, map);
740
741 // Output the rest of the imaginary string.
742 int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
743 DCHECK(IsAligned(bytes_to_output, kTaggedSize));
744 int slots_to_output = bytes_to_output >> kTaggedSizeLog2;
745
746 // Output raw data header. Do not bother with common raw length cases here.
747 sink_->Put(kVariableRawData, "RawDataForString");
748 sink_->PutUint30(slots_to_output, "length");
749
750 // Serialize string header (except for map).
751 uint8_t* string_start = reinterpret_cast<uint8_t*>(string->address());
752 for (size_t i = sizeof(HeapObjectLayout); i < sizeof(SeqString); i++) {
753 sink_->Put(string_start[i], "StringHeader");
754 }
755
756 // Serialize string content.
757 sink_->PutRaw(resource, content_size, "StringContent");
758
759 // Since the allocation size is rounded up to object alignment, there
760 // maybe left-over bytes that need to be padded.
761 size_t padding_size = allocation_size - sizeof(SeqString) - content_size;
762 DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
763 for (size_t i = 0; i < padding_size; i++) {
764 sink_->Put(static_cast<uint8_t>(0), "StringPadding");
765 }
766}
767
768// Clear and later restore the next link in the weak cell or allocation site.
769// TODO(all): replace this with proper iteration of weak slots in serializer.
771 public:
773 Isolate* isolate = heap->isolate();
774 if (TryCast<AllocationSiteWithWeakNext>(object, &object_)) {
775 next_ = object_->weak_next();
776 object_->set_weak_next(ReadOnlyRoots(isolate).undefined_value());
777 }
778 }
779
781 if (next_ == Smi::zero()) return;
782 object_->set_weak_next(
785 }
786
787 private:
790 Smi::zero();
792};
793
794void Serializer::ObjectSerializer::Serialize(SlotType slot_type) {
795 RecursionScope recursion(serializer_);
796
797 {
800 // Defer objects as "pending" if they cannot be serialized now, or if we
801 // exceed a certain recursion depth. Some objects cannot be deferred.
802 bool should_defer =
803 recursion.ExceedsMaximum() || serializer_->MustBeDeferred(raw);
804 if (should_defer && CanBeDeferred(raw, slot_type)) {
805 if (v8_flags.trace_serializer) {
806 PrintF(" Deferring heap object: ");
808 PrintF("\n");
809 }
810 // Deferred objects are considered "pending".
811 serializer_->RegisterObjectIsPending(raw);
812 serializer_->PutPendingForwardReference(
813 *serializer_->forward_refs_per_pending_object_.Find(raw));
814 serializer_->QueueDeferredObject(raw);
815 return;
816 } else {
817 if (v8_flags.trace_serializer && recursion.ExceedsMaximum()) {
818 PrintF(" Exceeding max recursion depth by %d for: ",
819 recursion.ExceedsMaximumBy());
821 PrintF("\n");
822 }
823 }
824
825 if (v8_flags.trace_serializer) {
826 PrintF(" Encoding heap object: ");
828 PrintF("\n");
829 }
830 }
831
832 PtrComprCageBase cage_base(isolate());
833 InstanceType instance_type = object_->map(cage_base)->instance_type();
834 if (InstanceTypeChecker::IsExternalString(instance_type)) {
835 SerializeExternalString();
836 return;
837 }
838 if (InstanceTypeChecker::IsJSTypedArray(instance_type)) {
839 SerializeJSTypedArray();
840 return;
841 }
842 if (InstanceTypeChecker::IsJSArrayBuffer(instance_type)) {
843 SerializeJSArrayBuffer();
844 return;
845 }
846 if (InstanceTypeChecker::IsScript(instance_type)) {
847 // Clear cached line ends & compiled lazy function positions.
848 Cast<Script>(object_)->set_line_ends(Smi::zero());
849 Cast<Script>(object_)->set_compiled_lazy_function_positions(
850 ReadOnlyRoots(isolate()).undefined_value());
851 }
852
853#if V8_ENABLE_WEBASSEMBLY
854 // The padding for wasm null is a free space filler. We put it into the roots
855 // table to be able to skip its payload when serializing the read only heap
856 // in the ReadOnlyHeapImageSerializer.
858 !object_->SafeEquals(ReadOnlyRoots(isolate()).wasm_null_padding()),
859 !IsFreeSpaceOrFiller(*object_, cage_base));
860#else
861 DCHECK(!IsFreeSpaceOrFiller(*object_, cage_base));
862#endif
863
864 SerializeObject();
865}
866
867namespace {
868SnapshotSpace GetSnapshotSpace(Tagged<HeapObject> object) {
869 if (ReadOnlyHeap::Contains(object)) {
870 return SnapshotSpace::kReadOnlyHeap;
871 } else {
872 AllocationSpace heap_space =
873 MutablePageMetadata::FromHeapObject(object)->owner_identity();
874 // Large code objects are not supported and cannot be expressed by
875 // SnapshotSpace.
876 DCHECK_NE(heap_space, CODE_LO_SPACE);
877 switch (heap_space) {
878 case OLD_SPACE:
879 // Young generation objects are tenured, as objects that have survived
880 // until snapshot building probably deserve to be considered 'old'.
881 case NEW_SPACE:
882 // Large objects (young and old) are encoded as simply 'old' snapshot
883 // objects, as "normal" objects vs large objects is a heap implementation
884 // detail and isn't relevant to the snapshot.
885 case NEW_LO_SPACE:
886 case LO_SPACE:
887 // Shared objects are currently encoded as 'old' snapshot objects. This
888 // basically duplicates shared heap objects for each isolate again.
889 case SHARED_SPACE:
890 case SHARED_LO_SPACE:
891 return SnapshotSpace::kOld;
892 case CODE_SPACE:
893 return SnapshotSpace::kCode;
894 case TRUSTED_SPACE:
895 case TRUSTED_LO_SPACE:
896 return SnapshotSpace::kTrusted;
897 // Shared objects are currently encoded as 'trusteds' snapshot objects.
898 // This basically duplicates shared trusted heap objects for each isolate
899 // again.
902 return SnapshotSpace::kTrusted;
903 case CODE_LO_SPACE:
904 case RO_SPACE:
905 UNREACHABLE();
906 }
907 }
908}
909} // namespace
910
911void Serializer::ObjectSerializer::SerializeObject() {
912 Tagged<Map> map = object_->map(serializer_->cage_base());
913 int size = object_->SizeFromMap(map);
914
915 // Descriptor arrays have complex element weakness, that is dependent on the
916 // maps pointing to them. During deserialization, this can cause them to get
917 // prematurely trimmed if one of their owners isn't deserialized yet. We work
918 // around this by forcing all descriptor arrays to be serialized as "strong",
919 // i.e. no custom weakness, and "re-weaken" them in the deserializer once
920 // deserialization completes.
921 //
922 // See also `Deserializer::WeakenDescriptorArrays`.
923 if (map == ReadOnlyRoots(isolate()).descriptor_array_map()) {
924 map = ReadOnlyRoots(isolate()).strong_descriptor_array_map();
925 }
926 SnapshotSpace space = GetSnapshotSpace(*object_);
927 SerializePrologue(space, size, map);
928
929 // Serialize the rest of the object.
930 CHECK_EQ(0, bytes_processed_so_far_);
931 bytes_processed_so_far_ = kTaggedSize;
932
933 SerializeContent(map, size);
934}
935
936void Serializer::ObjectSerializer::SerializeDeferred() {
937 const SerializerReference* back_reference =
938 serializer_->reference_map()->LookupReference(object_);
939
940 if (back_reference != nullptr) {
941 if (v8_flags.trace_serializer) {
942 PrintF(" Deferred heap object ");
944 PrintF(" was already serialized\n");
945 }
946 return;
947 }
948
949 if (v8_flags.trace_serializer) {
950 PrintF(" Encoding deferred heap object\n");
951 }
952 Serialize(SlotType::kAnySlot);
953}
954
955void Serializer::ObjectSerializer::SerializeContent(Tagged<Map> map, int size) {
957 UnlinkWeakNextScope unlink_weak_next(isolate()->heap(), raw);
958 // Iterate references first.
959 VisitObjectBody(isolate(), map, raw, this);
960 // Then output data payload, if any.
961 OutputRawData(raw.address() + size);
962}
963
964void Serializer::ObjectSerializer::VisitPointers(Tagged<HeapObject> host,
966 ObjectSlot end) {
967 VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
968}
969
970void Serializer::ObjectSerializer::VisitPointers(Tagged<HeapObject> host,
973 HandleScope scope(isolate());
974 PtrComprCageBase cage_base(isolate());
976
977 MaybeObjectSlot current = start;
978 while (current < end) {
979 while (current < end && current.load(cage_base).IsSmi()) {
980 ++current;
981 }
982 if (current < end) {
983 OutputRawData(current.address());
984 }
985 // TODO(ishell): Revisit this change once we stick to 32-bit compressed
986 // tagged values.
987 while (current < end && current.load(cage_base).IsCleared()) {
988 sink_->Put(kClearedWeakReference, "ClearedWeakReference");
989 bytes_processed_so_far_ += kTaggedSize;
990 ++current;
991 }
992 Tagged<HeapObject> current_contents;
993 HeapObjectReferenceType reference_type;
994 while (current < end && current.load(cage_base).GetHeapObject(
995 &current_contents, &reference_type)) {
996 // Write a weak prefix if we need it. This has to be done before the
997 // potential pending object serialization.
998 if (reference_type == HeapObjectReferenceType::WEAK) {
999 sink_->Put(kWeakPrefix, "WeakReference");
1000 }
1001
1002 Handle<HeapObject> obj = handle(current_contents, isolate());
1003 if (serializer_->SerializePendingObject(*obj)) {
1004 bytes_processed_so_far_ += kTaggedSize;
1005 ++current;
1006 continue;
1007 }
1008
1009 RootIndex root_index;
1010 // Compute repeat count and write repeat prefix if applicable.
1011 // Repeats are not subject to the write barrier so we can only use
1012 // immortal immovable root members. In practice we're most likely to only
1013 // repeat smaller root indices, so we limit the root index to 256 to keep
1014 // decoding simple.
1015 static_assert(UINT8_MAX <=
1016 static_cast<int>(RootIndex::kLastImmortalImmovableRoot));
1017 MaybeObjectSlot repeat_end = current + 1;
1018 if (repeat_end < end &&
1019 serializer_->root_index_map()->Lookup(*obj, &root_index) &&
1020 static_cast<uint32_t>(root_index) <= UINT8_MAX &&
1021 current.load(cage_base) == repeat_end.load(cage_base) &&
1022 reference_type == HeapObjectReferenceType::STRONG) {
1023 DCHECK(!HeapLayout::InYoungGeneration(*obj));
1024 while (repeat_end < end &&
1025 repeat_end.load(cage_base) == current.load(cage_base)) {
1026 repeat_end++;
1027 }
1028 int repeat_count = static_cast<int>(repeat_end - current);
1029 current = repeat_end;
1030 bytes_processed_so_far_ += repeat_count * kTaggedSize;
1031 serializer_->PutRepeatRoot(repeat_count, root_index);
1032 } else {
1033 bytes_processed_so_far_ += kTaggedSize;
1034 ++current;
1035 serializer_->SerializeObject(obj, SlotType::kAnySlot);
1036 }
1037 }
1038 }
1039}
1040
1041void Serializer::ObjectSerializer::VisitInstructionStreamPointer(
1043 DCHECK(!host->has_instruction_stream());
1044}
1045
1046// All of these visitor functions are unreachable since we don't serialize
1047// InstructionStream objects anymore.
1048void Serializer::ObjectSerializer::VisitEmbeddedPointer(
1049 Tagged<InstructionStream> host, RelocInfo* rinfo) {
1050 UNREACHABLE();
1051}
1052
1053void Serializer::ObjectSerializer::VisitExternalReference(
1054 Tagged<InstructionStream> host, RelocInfo* rinfo) {
1055 UNREACHABLE();
1056}
1057
1058void Serializer::ObjectSerializer::VisitInternalReference(
1059 Tagged<InstructionStream> host, RelocInfo* rinfo) {
1060 UNREACHABLE();
1061}
1062
1063void Serializer::ObjectSerializer::VisitOffHeapTarget(
1064 Tagged<InstructionStream> host, RelocInfo* rinfo) {
1065 UNREACHABLE();
1066}
1067
1068void Serializer::ObjectSerializer::VisitCodeTarget(
1069 Tagged<InstructionStream> host, RelocInfo* rinfo) {
1070 UNREACHABLE();
1071}
1072
1073void Serializer::ObjectSerializer::OutputExternalReference(
1074 Address target, int target_size, bool sandboxify, ExternalPointerTag tag) {
1075 DCHECK_LE(target_size, sizeof(target)); // Must fit in Address.
1077 DCHECK_IMPLIES(sandboxify, tag != kExternalPointerNullTag);
1078 ExternalReferenceEncoder::Value encoded_reference;
1079 bool encoded_successfully;
1080
1081 if (serializer_->allow_unknown_external_references_for_testing()) {
1082 encoded_successfully =
1083 serializer_->TryEncodeExternalReference(target).To(&encoded_reference);
1084 } else {
1085 encoded_reference = serializer_->EncodeExternalReference(target);
1086 encoded_successfully = true;
1087 }
1088
1089 if (!encoded_successfully) {
1090 // In this case the serialized snapshot will not be used in a different
1091 // Isolate and thus the target address will not change between
1092 // serialization and deserialization. We can serialize seen external
1093 // references verbatim.
1094 CHECK(serializer_->allow_unknown_external_references_for_testing());
1095 CHECK(IsAligned(target_size, kTaggedSize));
1096 CHECK_LE(target_size, kFixedRawDataCount * kTaggedSize);
1097 if (sandboxify) {
1098 CHECK_EQ(target_size, kSystemPointerSize);
1099 sink_->Put(kSandboxedRawExternalReference, "SandboxedRawReference");
1100 sink_->PutRaw(reinterpret_cast<uint8_t*>(&target), target_size,
1101 "raw pointer");
1102 } else {
1103 // Encode as FixedRawData instead of RawExternalReference as the target
1104 // may be less than kSystemPointerSize large.
1105 int size_in_tagged = target_size >> kTaggedSizeLog2;
1106 sink_->Put(FixedRawDataWithSize::Encode(size_in_tagged), "FixedRawData");
1107 sink_->PutRaw(reinterpret_cast<uint8_t*>(&target), target_size,
1108 "raw pointer");
1109 }
1110 } else if (encoded_reference.is_from_api()) {
1111 if (sandboxify) {
1112 sink_->Put(kSandboxedApiReference, "SandboxedApiRef");
1113 } else {
1114 sink_->Put(kApiReference, "ApiRef");
1115 }
1116 sink_->PutUint30(encoded_reference.index(), "reference index");
1117 } else {
1118 if (sandboxify) {
1119 sink_->Put(kSandboxedExternalReference, "SandboxedExternalRef");
1120 } else {
1121 sink_->Put(kExternalReference, "ExternalRef");
1122 }
1123 sink_->PutUint30(encoded_reference.index(), "reference index");
1124 }
1125 if (sandboxify) {
1126 sink_->PutUint30(tag, "external pointer tag");
1127 }
1128}
1129
1130void Serializer::ObjectSerializer::VisitCppHeapPointer(
1132 PtrComprCageBase cage_base(isolate());
1133 // Currently there's only very limited support for CppHeapPointerSlot
1134 // serialization as it's only used for API wrappers.
1135 //
1136 // We serialize the slot as initialized-but-unused slot. The actual API
1137 // wrapper serialization is implemented in
1138 // `ContextSerializer::SerializeApiWrapperFields()`.
1139 DCHECK(IsJSApiWrapperObject(object_->map(cage_base)));
1140 static_assert(kCppHeapPointerSlotSize % kTaggedSize == 0);
1141 sink_->Put(
1142 FixedRawDataWithSize::Encode(kCppHeapPointerSlotSize >> kTaggedSizeLog2),
1143 "FixedRawData");
1144 sink_->PutRaw(reinterpret_cast<const uint8_t*>(&kNullCppHeapPointer),
1145 kCppHeapPointerSlotSize, "empty cpp heap pointer handle");
1146 bytes_processed_so_far_ += kCppHeapPointerSlotSize;
1147}
1148
1149void Serializer::ObjectSerializer::VisitExternalPointer(
1151 PtrComprCageBase cage_base(isolate());
1152 InstanceType instance_type = object_->map(cage_base)->instance_type();
1153 if (InstanceTypeChecker::IsForeign(instance_type) ||
1154 InstanceTypeChecker::IsJSExternalObject(instance_type) ||
1155 InstanceTypeChecker::IsAccessorInfo(instance_type) ||
1156 InstanceTypeChecker::IsFunctionTemplateInfo(instance_type)) {
1157 // Output raw data payload, if any.
1158 OutputRawData(slot.address());
1159 Address value = slot.load(isolate());
1160#ifdef V8_ENABLE_SANDBOX
1161 // We need to load the actual tag from the table here since the slot may
1162 // use a generic tag (e.g. kAnyExternalPointerTag) if the concrete tag is
1163 // unknown by the visitor (for example the case for Foreigns).
1164 ExternalPointerHandle handle = slot.Relaxed_LoadHandle();
1165 ExternalPointerTag tag = isolate()->external_pointer_table().GetTag(handle);
1166#else
1168#endif // V8_ENABLE_SANDBOX
1169 const bool sandboxify = V8_ENABLE_SANDBOX_BOOL;
1170 OutputExternalReference(value, kSystemPointerSize, sandboxify, tag);
1171 bytes_processed_so_far_ += kExternalPointerSlotSize;
1172 } else {
1173 // Serialization of external references in other objects is handled
1174 // elsewhere or not supported.
1175 DCHECK(
1176 // Serialization of external pointers stored in EmbedderDataArray
1177 // is not supported yet, mostly because it's not used.
1178 InstanceTypeChecker::IsEmbedderDataArray(instance_type) ||
1179 // See ObjectSerializer::SerializeJSTypedArray().
1180 InstanceTypeChecker::IsJSTypedArray(instance_type) ||
1181 // See ObjectSerializer::SerializeJSArrayBuffer().
1182 InstanceTypeChecker::IsJSArrayBuffer(instance_type) ||
1183 // See ObjectSerializer::SerializeExternalString().
1184 InstanceTypeChecker::IsExternalString(instance_type) ||
1185 // See ObjectSerializer::SanitizeNativeContextScope.
1186 InstanceTypeChecker::IsNativeContext(instance_type) ||
1187 // Serialization of external pointers stored in
1188 // JSSynchronizationPrimitive is not supported.
1189 // TODO(v8:12547): JSSynchronizationPrimitives should also be sanitized
1190 // to always be serialized in an unlocked state.
1191 InstanceTypeChecker::IsJSSynchronizationPrimitive(instance_type) ||
1192 // See ContextSerializer::SerializeObjectWithEmbedderFields().
1193 (InstanceTypeChecker::IsJSObject(instance_type) &&
1194 Cast<JSObject>(host)->GetEmbedderFieldCount() > 0));
1195 }
1196}
1197
1198void Serializer::ObjectSerializer::VisitIndirectPointer(
1200 IndirectPointerMode mode) {
1201#ifdef V8_ENABLE_SANDBOX
1202 // If the slot is empty (i.e. contains a null handle), then we can just skip
1203 // it since in that case the correct action is to encode the null handle as
1204 // raw data, which will automatically happen if the slot is skipped here.
1205 if (slot.IsEmpty()) return;
1206
1207 // If necessary, output any raw data preceding this slot.
1208 OutputRawData(slot.address());
1209
1210 // The slot must be properly initialized at this point, so will always contain
1211 // a reference to a HeapObject.
1212 Handle<HeapObject> slot_value(Cast<HeapObject>(slot.load(isolate())),
1213 isolate());
1214 CHECK(IsHeapObject(*slot_value));
1215 bytes_processed_so_far_ += kIndirectPointerSize;
1216
1217 // Currently we cannot see pending objects here, but we may need to support
1218 // them in the future. They should already be supported by the deserializer.
1219 CHECK(!serializer_->SerializePendingObject(*slot_value));
1220 sink_->Put(kIndirectPointerPrefix, "IndirectPointer");
1221 serializer_->SerializeObject(slot_value, SlotType::kAnySlot);
1222#else
1223 UNREACHABLE();
1224#endif
1225}
1226
1227void Serializer::ObjectSerializer::VisitTrustedPointerTableEntry(
1229#ifdef V8_ENABLE_SANDBOX
1230 // These fields only exist on the ExposedTrustedObject class, and they are
1231 // located directly after the Map word.
1232 DCHECK_EQ(bytes_processed_so_far_,
1233 ExposedTrustedObject::kSelfIndirectPointerOffset);
1234
1235 // Nothing to do here. We already emitted the kInitializeSelfIndirectPointer
1236 // after processing the Map word in SerializePrologue.
1237 bytes_processed_so_far_ += kIndirectPointerSize;
1238#else
1239 UNREACHABLE();
1240#endif
1241}
1242
1243void Serializer::ObjectSerializer::VisitProtectedPointer(
1245 Tagged<Object> content = slot.load(isolate());
1246
1247 // Similar to the indirect pointer case, if the slot is empty (i.e. contains
1248 // Smi::zero()), then we skip it here.
1249 if (content == Smi::zero()) return;
1250 DCHECK(!IsSmi(content));
1251
1252 // If necessary, output any raw data preceeding this slot.
1253 OutputRawData(slot.address());
1254
1255 Handle<HeapObject> object(Cast<HeapObject>(content), isolate());
1256 bytes_processed_so_far_ += kTaggedSize;
1257
1258 // Currently we cannot see pending objects here, but we may need to support
1259 // them in the future. They should already be supported by the deserializer.
1260 CHECK(!serializer_->SerializePendingObject(*object));
1261 sink_->Put(kProtectedPointerPrefix, "ProtectedPointer");
1262 serializer_->SerializeObject(object, SlotType::kAnySlot);
1263}
1264
1265void Serializer::ObjectSerializer::VisitProtectedPointer(
1267 Tagged<MaybeObject> maybe_content = slot.load();
1268 // Empty slots (Smi::zero()) and Smi slots can be skipped here.
1269 if (maybe_content.IsSmi()) return;
1270
1271 // If necessary, output any raw data preceding this slot.
1272 OutputRawData(slot.address());
1273
1274 if (maybe_content.IsCleared()) {
1275 sink_->Put(kClearedWeakReference, "ClearedWeakReference");
1276 bytes_processed_so_far_ += kTaggedSize;
1277 return;
1278 }
1279
1280 Tagged<HeapObject> content;
1281 HeapObjectReferenceType reference_type;
1282 if (maybe_content.GetHeapObject(&content, &reference_type)) {
1283 if (reference_type == HeapObjectReferenceType::WEAK) {
1284 sink_->Put(kWeakPrefix, "WeakReference");
1285 }
1286 Handle<HeapObject> object = handle(content, isolate());
1287 // Currently we cannot see pending objects here, but we may need to support
1288 // them in the future. They should already be supported by the deserializer.
1289 CHECK(!serializer_->SerializePendingObject(*object));
1290 sink_->Put(kProtectedPointerPrefix, "ProtectedPointer");
1291 serializer_->SerializeObject(object, SlotType::kAnySlot);
1292 }
1293}
1294
1295void Serializer::ObjectSerializer::VisitJSDispatchTableEntry(
1296 Tagged<HeapObject> host, JSDispatchHandle handle) {
1297#ifdef V8_ENABLE_LEAPTIERING
1298 JSDispatchTable* jdt = IsolateGroup::current()->js_dispatch_table();
1299 // If the slot is empty, we will skip it here and then just serialize the
1300 // null handle as raw data.
1301 if (handle == kNullJSDispatchHandle) return;
1302
1303 // TODO(saelo): we might want to call OutputRawData here, but for that we
1304 // first need to pass the slot address to this method (e.g. as part of a
1305 // JSDispatchHandleSlot struct).
1306#if !defined(V8_COMPRESS_POINTERS) && defined(V8_TARGET_ARCH_64_BIT)
1307 static_assert(kJSDispatchHandleSize + JSFunction::kPaddingOffsetEnd + 1 -
1308 JSFunction::kPaddingOffset ==
1310#endif // COMPRESS_POINTERS
1311 bytes_processed_so_far_ += RoundUp(kJSDispatchHandleSize, kTaggedSize);
1312
1313 auto it = serializer_->dispatch_handle_map_.find(handle);
1314 if (it == serializer_->dispatch_handle_map_.end()) {
1315 auto id = static_cast<uint32_t>(serializer_->dispatch_handle_map_.size());
1316 serializer_->dispatch_handle_map_[handle] = id;
1317 sink_->Put(kAllocateJSDispatchEntry, "AllocateJSDispatchEntry");
1318 sink_->PutUint30(jdt->GetParameterCount(handle), "ParameterCount");
1319
1320 // Currently we cannot see pending objects here, but we may need to support
1321 // them in the future. They should already be supported by the deserializer.
1322 Handle<Code> code(jdt->GetCode(handle), isolate());
1323 CHECK(!serializer_->SerializePendingObject(*code));
1324 serializer_->SerializeObject(code, SlotType::kAnySlot);
1325 } else {
1326 sink_->Put(kJSDispatchEntry, "JSDispatchEntry");
1327 sink_->PutUint30(it->second, "EntryID");
1328 }
1329
1330#else
1331 UNREACHABLE();
1332#endif // V8_ENABLE_LEAPTIERING
1333}
1334namespace {
1335
1336// Similar to OutputRawData, but substitutes the given field with the given
1337// value instead of reading it from the object.
1338void OutputRawWithCustomField(SnapshotByteSink* sink, Address object_start,
1339 int written_so_far, int bytes_to_write,
1340 int field_offset, int field_size,
1341 const uint8_t* field_value) {
1342 int offset = field_offset - written_so_far;
1343 if (0 <= offset && offset < bytes_to_write) {
1344 DCHECK_GE(bytes_to_write, offset + field_size);
1345 sink->PutRaw(reinterpret_cast<uint8_t*>(object_start + written_so_far),
1346 offset, "Bytes");
1347 sink->PutRaw(field_value, field_size, "Bytes");
1348 written_so_far += offset + field_size;
1349 bytes_to_write -= offset + field_size;
1350 sink->PutRaw(reinterpret_cast<uint8_t*>(object_start + written_so_far),
1351 bytes_to_write, "Bytes");
1352 } else {
1353 sink->PutRaw(reinterpret_cast<uint8_t*>(object_start + written_so_far),
1354 bytes_to_write, "Bytes");
1355 }
1356}
1357} // anonymous namespace
1358
1359void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
1360 Address object_start = object_->address();
1361 int base = bytes_processed_so_far_;
1362 int up_to_offset = static_cast<int>(up_to - object_start);
1363 int to_skip = up_to_offset - bytes_processed_so_far_;
1364 int bytes_to_output = to_skip;
1365 DCHECK(IsAligned(bytes_to_output, kTaggedSize));
1366 int tagged_to_output = bytes_to_output / kTaggedSize;
1367 bytes_processed_so_far_ += to_skip;
1368 DCHECK_GE(to_skip, 0);
1369 if (bytes_to_output != 0) {
1370 DCHECK(to_skip == bytes_to_output);
1371 if (tagged_to_output <= kFixedRawDataCount) {
1372 sink_->Put(FixedRawDataWithSize::Encode(tagged_to_output),
1373 "FixedRawData");
1374 } else {
1375 sink_->Put(kVariableRawData, "VariableRawData");
1376 sink_->PutUint30(tagged_to_output, "length");
1377 }
1378#ifdef MEMORY_SANITIZER
1379 // Check that we do not serialize uninitialized memory.
1380 __msan_check_mem_is_initialized(
1381 reinterpret_cast<void*>(object_start + base), bytes_to_output);
1382#endif // MEMORY_SANITIZER
1383 PtrComprCageBase cage_base(isolate_);
1384 if (IsSharedFunctionInfo(*object_, cage_base)) {
1385 // The bytecode age field can be changed by GC concurrently.
1386 static_assert(SharedFunctionInfo::kAgeSize == kUInt16Size);
1387 uint16_t field_value = 0;
1388 OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
1389 SharedFunctionInfo::kAgeOffset,
1390 sizeof(field_value),
1391 reinterpret_cast<uint8_t*>(&field_value));
1392 } else if (IsDescriptorArray(*object_, cage_base)) {
1393 // The number of marked descriptors field can be changed by GC
1394 // concurrently.
1395 const auto field_value = DescriptorArrayMarkingState::kInitialGCState;
1396 static_assert(sizeof(field_value) == DescriptorArray::kSizeOfRawGcState);
1397 OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
1398 DescriptorArray::kRawGcStateOffset,
1399 sizeof(field_value),
1400 reinterpret_cast<const uint8_t*>(&field_value));
1401 } else if (IsCode(*object_, cage_base)) {
1402#ifdef V8_ENABLE_SANDBOX
1403 // When the sandbox is enabled, this field contains the handle to this
1404 // Code object's code pointer table entry. This will be recomputed after
1405 // deserialization.
1406 static uint8_t field_value[kIndirectPointerSize] = {0};
1407 OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
1408 Code::kSelfIndirectPointerOffset,
1409 sizeof(field_value), field_value);
1410#else
1411 // In this case, instruction_start field contains a raw value that will
1412 // similarly be recomputed after deserialization, so write zeros to keep
1413 // the snapshot deterministic.
1414 static uint8_t field_value[kSystemPointerSize] = {0};
1415 OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
1416 Code::kInstructionStartOffset,
1417 sizeof(field_value), field_value);
1418#endif // V8_ENABLE_SANDBOX
1419 } else if (IsSeqString(*object_)) {
1420 // SeqStrings may contain padding. Serialize the padding bytes as 0s to
1421 // make the snapshot content deterministic.
1423 Cast<SeqString>(*object_)->GetDataAndPaddingSizes();
1424 DCHECK_EQ(bytes_to_output, sizes.data_size - base + sizes.padding_size);
1425 int data_bytes_to_output = sizes.data_size - base;
1426 sink_->PutRaw(reinterpret_cast<uint8_t*>(object_start + base),
1427 data_bytes_to_output, "SeqStringData");
1428 sink_->PutN(sizes.padding_size, 0, "SeqStringPadding");
1429 } else {
1430 sink_->PutRaw(reinterpret_cast<uint8_t*>(object_start + base),
1431 bytes_to_output, "Bytes");
1432 }
1433 }
1434}
1435
1436Serializer::HotObjectsList::HotObjectsList(Heap* heap) : heap_(heap) {
1437 strong_roots_entry_ = heap->RegisterStrongRoots(
1438 "Serializer::HotObjectsList", FullObjectSlot(&circular_queue_[0]),
1440}
1442 heap_->UnregisterStrongRoots(strong_roots_entry_);
1443}
1444
1446 if (size() == 0) {
1447 return isolate->factory()->empty_fixed_array();
1448 }
1449 DirectHandle<FixedArray> externals =
1450 isolate->factory()->NewFixedArray(size());
1452 Tagged<FixedArray> raw = *externals;
1454 &map_);
1455 for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
1456 raw->set(*it.entry(), it.key());
1457 }
1458
1459 return externals;
1460}
1461
1463 SnapshotByteSink* sink) {
1464 if (!ReadOnlyHeap::Contains(obj)) return false;
1465
1466 // For objects on the read-only heap, never serialize the object, but instead
1467 // create a back reference that encodes the page number as the chunk_index and
1468 // the offset within the page as the chunk_offset.
1469 Address address = obj.address();
1471 uint32_t chunk_index = 0;
1472 ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
1473 DCHECK(!read_only_space->writable());
1474 for (ReadOnlyPageMetadata* page : read_only_space->pages()) {
1475 if (chunk == page) break;
1476 ++chunk_index;
1477 }
1478 uint32_t chunk_offset = static_cast<uint32_t>(chunk->Offset(address));
1479 sink->Put(kReadOnlyHeapRef, "ReadOnlyHeapRef");
1480 sink->PutUint30(chunk_index, "ReadOnlyHeapRefChunkIndex");
1481 sink->PutUint30(chunk_offset, "ReadOnlyHeapRefChunkOffset");
1482 return true;
1483}
1484
1485} // namespace internal
1486} // namespace v8
Isolate * isolate_
#define DISALLOW_GARBAGE_COLLECTION(name)
Builtins::Kind kind
Definition builtins.cc:40
V8_INLINE bool IsJust() const
Definition v8-maybe.h:36
V8_INLINE T FromJust() const &
Definition v8-maybe.h:64
Isolate * isolate_
Definition d8.cc:6315
static void Abort()
Address load(IsolateForSandbox isolate)
Definition slots-inl.h:237
Tagged< MaybeObject > load() const
Definition slots-inl.h:130
Tagged< Object > load() const
Definition slots-inl.h:48
ReadOnlySpace * read_only_space() const
Definition heap.h:738
Tagged< Object > load(IsolateForSandbox isolate) const
Definition slots-inl.h:349
static V8_INLINE MemoryChunkMetadata * FromAddress(Address a)
DirectHandle< FixedArray > Values(Isolate *isolate)
static V8_EXPORT_PRIVATE bool Contains(Address address)
const std::vector< ReadOnlyPageMetadata * > & pages() const
uint32_t attached_reference_index() const
Definition references.h:77
uint32_t off_heap_backing_store_index() const
Definition references.h:68
Isolate * isolate() const
Definition serializer.h:195
bool SerializeReadOnlyObjectReference(Tagged< HeapObject > obj, SnapshotByteSink *sink)
Serializer(Isolate *isolate, Snapshot::SerializerFlags flags)
Definition serializer.cc:34
std::vector< int > * PendingObjectReferences
Definition serializer.h:210
Address address() const
Definition slots.h:78
TData * location() const
Definition slots.h:80
void PutUint30(uint32_t integer, const char *description)
void PutRaw(const uint8_t *data, int number_of_bytes, const char *description)
void Put(uint8_t b, const char *description)
constexpr bool IsCleared() const
bool GetHeapObject(Tagged< HeapObject > *result) const
constexpr V8_INLINE bool IsSmi() const
Definition tagged.h:508
UnlinkWeakNextScope(Heap *heap, Tagged< HeapObject > object)
Tagged< AllocationSiteWithWeakNext > object_
Register const object_
Handle< Code > code
#define V8_ENABLE_SANDBOX_BOOL
Definition globals.h:160
JSRegExp::Flags flags_
const MapRef map_
constexpr const char * ToString(DataViewOp op)
int start
int end
LineAndColumn current
#define WHILE_WITH_HANDLE_SCOPE(isolate, limit_check, body)
Definition isolate.h:472
DisallowGarbageCollection no_gc_
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enable experimental feedback collection in generic lowering enable Turboshaft s WasmLoadElimination enable Turboshaft s low level load elimination for JS enable Turboshaft s escape analysis for string concatenation use enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available space
Isolate * isolate
int32_t offset
std::string extension
std::map< const std::string, const std::string > map
ZoneVector< RpoNumber > & result
#define LOG(isolate, Call)
Definition log.h:78
V8_INLINE IndirectHandle< T > handle(Tagged< T > object, Isolate *isolate)
Definition handles-inl.h:72
constexpr intptr_t kPointerAlignment
Definition globals.h:945
constexpr int kTaggedSize
Definition globals.h:542
@ UPDATE_WRITE_BARRIER
Definition objects.h:55
void VisitObjectBody(Isolate *isolate, Tagged< HeapObject > object, ObjectVisitor *visitor)
constexpr intptr_t kObjectAlignment
Definition globals.h:930
void PrintF(const char *format,...)
Definition utils.cc:39
constexpr int kJSDispatchHandleSize
Definition globals.h:647
constexpr int kCharSize
Definition globals.h:396
V8_INLINE constexpr bool IsSmi(TaggedImpl< kRefType, StorageType > obj)
Definition objects.h:665
constexpr CppHeapPointer_t kNullCppHeapPointer
constexpr int kSystemPointerSize
Definition globals.h:410
constexpr int kTaggedSizeLog2
Definition globals.h:543
constexpr JSDispatchHandle kNullJSDispatchHandle(0)
void ShortPrint(Tagged< Object > obj, FILE *out)
Definition objects.cc:1865
typename detail::FlattenUnionHelper< Union<>, Ts... >::type UnionOf
Definition union.h:123
constexpr int kExternalPointerSlotSize
Definition globals.h:613
constexpr int kIndirectPointerSize
Definition globals.h:629
@ SHARED_TRUSTED_LO_SPACE
Definition globals.h:1319
@ SHARED_TRUSTED_SPACE
Definition globals.h:1314
@ kExternalPointerNullTag
V8_EXPORT_PRIVATE FlagValues v8_flags
constexpr int kCppHeapPointerSlotSize
Definition globals.h:622
uint32_t ExternalPointerHandle
constexpr int kUInt16Size
Definition globals.h:399
bool IsJSApiWrapperObject(Tagged< Map > map)
constexpr int kShortSize
Definition globals.h:397
constexpr int kObjectAlignmentBits
Definition globals.h:929
!IsContextMap !IsContextMap native_context
Definition map-inl.h:877
Tagged< To > Cast(Tagged< From > value, const v8::SourceLocation &loc=INIT_SOURCE_LOCATION_IN_DEBUG)
Definition casting.h:150
Maybe< T > Nothing()
Definition v8-maybe.h:112
Maybe< T > Just(const T &t)
Definition v8-maybe.h:117
#define INSTANCE_TYPE_LIST(V)
SnapshotByteSink *const sink_
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_LE(v1, v2)
Definition logging.h:490
#define DCHECK_NULL(val)
Definition logging.h:491
#define CHECK(condition)
Definition logging.h:124
#define CHECK_LE(lhs, rhs)
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
constexpr T RoundUp(T x, intptr_t m)
Definition macros.h:387
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403
Heap * heap_
#define V8_NODISCARD
Definition v8config.h:693