v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
code-memory-access.h
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMMON_CODE_MEMORY_ACCESS_H_
6#define V8_COMMON_CODE_MEMORY_ACCESS_H_
7
8#include <map>
9#include <optional>
10
11#include "include/v8-internal.h"
12#include "include/v8-platform.h"
14#include "src/base/macros.h"
15#include "src/base/memory.h"
17#include "src/common/globals.h"
18
19namespace v8 {
20namespace internal {
21
22// We protect writes to executable memory in some configurations and whenever
23// we write to it, we need to explicitely allow it first.
24//
25// For this purposed, there are a few scope objects with different semantics:
26//
27// - CodePageMemoryModificationScopeForDebugging:
28// A scope only used in non-release builds, e.g. for code zapping.
29// - wasm::CodeSpaceWriteScope:
30// Allows access to Wasm code
31//
32// - RwxMemoryWriteScope:
33// A scope that uses per-thread permissions to allow access. Should not be
34// used directly, but rather is the implementation of one of the above.
35// - RwxMemoryWriteScopeForTesting:
36// Same, but for use in testing.
37
38class RwxMemoryWriteScopeForTesting;
39namespace wasm {
40class CodeSpaceWriteScope;
41}
42
43#if V8_HAS_PKU_JIT_WRITE_PROTECT
44
45// Alignment macros.
46// Adapted from partition_allocator/thread_isolation/alignment.h.
47
48// Page size is not a compile time constant, but we need it for alignment and
49// padding of our global memory.
50// We use the maximum expected value here (currently x64 only) and test in
51// ThreadIsolation::Initialize() that it's a multiple of the real pagesize.
52#define THREAD_ISOLATION_ALIGN_SZ 0x1000
53#define THREAD_ISOLATION_ALIGN alignas(THREAD_ISOLATION_ALIGN_SZ)
54#define THREAD_ISOLATION_ALIGN_OFFSET_MASK (THREAD_ISOLATION_ALIGN_SZ - 1)
55#define THREAD_ISOLATION_FILL_PAGE_SZ(size) \
56 ((THREAD_ISOLATION_ALIGN_SZ - \
57 ((size) & THREAD_ISOLATION_ALIGN_OFFSET_MASK)) % \
58 THREAD_ISOLATION_ALIGN_SZ)
59
60#else // V8_HAS_PKU_JIT_WRITE_PROTECT
61
62#define THREAD_ISOLATION_ALIGN_SZ 0
63#define THREAD_ISOLATION_ALIGN
64#define THREAD_ISOLATION_FILL_PAGE_SZ(size) 0
65
66#endif // V8_HAS_PKU_JIT_WRITE_PROTECT
67
68// This scope is a wrapper for APRR/MAP_JIT machinery on MacOS on ARM64
69// ("Apple M1"/Apple Silicon) or Intel PKU (aka. memory protection keys)
70// with respective low-level semantics.
71//
72// The semantics on MacOS on ARM64 is the following:
73// The scope switches permissions between writable and executable for all the
74// pages allocated with RWX permissions. Only current thread is affected.
75// This achieves "real" W^X and it's fast (see pthread_jit_write_protect_np()
76// for details).
77// By default it is assumed that the state is executable.
78// It's also assumed that the process has the "com.apple.security.cs.allow-jit"
79// entitlement.
80//
81// The semantics on Intel with PKU support is the following:
82// When Intel PKU is available, the scope switches the protection key's
83// permission between writable and not writable. The executable permission
84// cannot be retracted with PKU. That is, this "only" achieves write
85// protection, but is similarly thread-local and fast.
86//
87// On other platforms the scope is a no-op and thus it's allowed to be used.
88//
89// The scope is reentrant and thread safe.
91 public:
92 // The comment argument is used only for ensuring that explanation about why
93 // the scope is needed is given at particular use case.
94 V8_INLINE explicit RwxMemoryWriteScope(const char* comment);
96
97 // Disable copy constructor and copy-assignment operator, since this manages
98 // a resource and implicit copying of the scope can yield surprising errors.
101
102 // Returns true if current configuration supports fast write-protection of
103 // executable pages.
104 V8_INLINE static bool IsSupported();
105
106#if V8_HAS_PKU_JIT_WRITE_PROTECT
107 static int memory_protection_key();
108
109 static bool IsPKUWritable();
110
111 // Linux resets key's permissions to kDisableAccess before executing signal
112 // handlers. If the handler requires access to code page bodies it should take
113 // care of changing permissions to the default state (kDisableWrite).
114 static V8_EXPORT void SetDefaultPermissionsForSignalHandler();
115#endif // V8_HAS_PKU_JIT_WRITE_PROTECT
116
117 private:
120 friend class WritableJumpTablePair;
121
122 // {SetWritable} and {SetExecutable} implicitly enters/exits the scope.
123 // These methods are exposed only for the purpose of implementing other
124 // scope classes that affect executable pages permissions.
125 V8_INLINE static void SetWritable();
126 V8_INLINE static void SetExecutable();
127};
128
129class WritableJitPage;
131class WritableJumpTablePair;
132
133// The ThreadIsolation API is used to protect executable memory using per-thread
134// memory permissions and perform validation for any writes into it.
135//
136// It keeps metadata about all JIT regions in write-protected memory and will
137// use it to validate that the writes are safe from a CFI perspective.
138// Its tasks are:
139// * track JIT pages and allocations and check for validity
140// * check for dangling pointers on the shadow stack (not implemented)
141// * validate code writes like code creation, relocation, etc. (not implemented)
143 public:
144 static bool Enabled();
145 static void Initialize(ThreadIsolatedAllocator* allocator);
146
147 enum class JitAllocationType {
148 kInstructionStream,
149 kWasmCode,
150 kWasmJumpTable,
151 kWasmFarJumpTable,
152 kWasmLazyCompileTable,
153 };
154
155 // Register a new JIT region.
156 static void RegisterJitPage(Address address, size_t size);
157 // Unregister a JIT region that is about to be unmpapped.
158 static void UnregisterJitPage(Address address, size_t size);
159 // Make a page executable. Needs to be registered first. Should only be called
160 // if Enabled() is true.
161 V8_NODISCARD static bool MakeExecutable(Address address, size_t size);
162
163 // Register a new JIT allocation for tracking and return a writable reference
164 // to it. All writes should go through the returned WritableJitAllocation
165 // object since it will perform additional validation required for CFI.
166 static WritableJitAllocation RegisterJitAllocation(
167 Address addr, size_t size, JitAllocationType type,
168 bool enforce_write_api = false);
169 // TODO(sroettger): remove this overwrite and use RegisterJitAllocation
170 // instead.
171 static WritableJitAllocation RegisterInstructionStreamAllocation(
172 Address addr, size_t size, bool enforce_write_api = false);
173 // Register multiple consecutive allocations together.
174 static void RegisterJitAllocations(Address start,
175 const std::vector<size_t>& sizes,
176 JitAllocationType type);
177
178 // Get writable reference to a previously registered allocation. All writes to
179 // executable memory need to go through one of these Writable* objects since
180 // this is where we perform CFI validation.
181 // If enforce_write_api is set, all writes to JIT memory need to go through
182 // this object.
183 static WritableJitAllocation LookupJitAllocation(
184 Address addr, size_t size, JitAllocationType type,
185 bool enforce_write_api = false);
186
187#ifdef V8_ENABLE_WEBASSEMBLY
188 // A special case of LookupJitAllocation since in Wasm, we sometimes have to
189 // unlock two allocations (jump tables) together.
190 static WritableJumpTablePair LookupJumpTableAllocations(
191 Address jump_table_address, size_t jump_table_size,
192 Address far_jump_table_address, size_t far_jump_table_size);
193#endif
194
195 // Unlock a larger region. This allowsV us to lookup allocations in this
196 // region more quickly without switching the write permissions all the time.
197 static WritableJitPage LookupWritableJitPage(Address addr, size_t size);
198
199 static void UnregisterWasmAllocation(Address addr, size_t size);
200
201 // Check for a potential dead lock in case we want to lookup the jit
202 // allocation from inside a signal handler.
203 static bool CanLookupStartOfJitAllocationAt(Address inner_pointer);
204 static std::optional<Address> StartOfJitAllocationAt(Address inner_pointer);
205
206 // Write-protect a given range of memory. Address and size need to be page
207 // aligned.
208 V8_NODISCARD static bool WriteProtectMemory(
209 Address addr, size_t size, PageAllocator::Permission page_permissions);
210
211 static void RegisterJitAllocationForTesting(Address obj, size_t size);
212 static void UnregisterJitAllocationForTesting(Address addr, size_t size);
213
214#if V8_HAS_PKU_JIT_WRITE_PROTECT
215 static int pkey() { return trusted_data_.pkey; }
216 static bool PkeyIsAvailable() { return trusted_data_.pkey != -1; }
217#endif
218
219#if DEBUG
220 static bool initialized() { return trusted_data_.initialized; }
221 static void CheckTrackedMemoryEmpty();
222#endif
223
224 // A std::allocator implementation that wraps the ThreadIsolated allocator.
225 // This is needed to create STL containers backed by ThreadIsolated memory.
226 template <class T>
228 typedef T value_type;
229
230 StlAllocator() = default;
231 template <class U>
232 explicit StlAllocator(const StlAllocator<U>&) noexcept {}
233
234 value_type* allocate(size_t n) {
235 if (Enabled()) {
236 return static_cast<value_type*>(
237 ThreadIsolation::allocator()->Allocate(n * sizeof(value_type)));
238 } else {
239 return static_cast<value_type*>(::operator new(n * sizeof(T)));
240 }
241 }
242
243 void deallocate(value_type* ptr, size_t n) {
244 if (Enabled()) {
245 ThreadIsolation::allocator()->Free(ptr);
246 } else {
247 ::operator delete(ptr);
248 }
249 }
250 };
251
253 public:
254 explicit JitAllocation(size_t size, JitAllocationType type)
255 : size_(size), type_(type) {}
256 size_t Size() const { return size_; }
257 JitAllocationType Type() const { return type_; }
258
259 private:
260 size_t size_;
262 };
263
264 class JitPage;
265
266 // All accesses to the JitPage go through the JitPageReference class, which
267 // will guard it against concurrent access.
269 public:
270 JitPageReference(class JitPage* page, Address address);
273 JitPageReference& operator=(const JitPageReference&) = delete;
274
275 base::Address Address() const { return address_; }
276 size_t Size() const;
277 base::Address End() const { return Address() + Size(); }
278 JitAllocation& RegisterAllocation(base::Address addr, size_t size,
279 JitAllocationType type);
280 JitAllocation& LookupAllocation(base::Address addr, size_t size,
281 JitAllocationType type);
282 bool Contains(base::Address addr, size_t size,
283 JitAllocationType type) const;
284 void UnregisterAllocation(base::Address addr);
285 void UnregisterAllocationsExcept(base::Address start, size_t size,
286 const std::vector<base::Address>& addr);
287 void UnregisterRange(base::Address addr, size_t size);
288
289 base::Address StartOfAllocationAt(base::Address inner_pointer);
290 std::pair<base::Address, JitAllocation&> AllocationContaining(
291 base::Address addr);
292
293 bool Empty() const { return jit_page_->allocations_.empty(); }
294 void Shrink(class JitPage* tail);
295 void Expand(size_t offset);
296 void Merge(JitPageReference& next);
297 class JitPage* JitPage() { return jit_page_; }
298
299 private:
302 // We get the address from the key of the map when we do a JitPage lookup.
303 // We can save some memory by storing it as part of the reference instead.
305 };
306
307 class JitPage {
308 public:
309 explicit JitPage(size_t size) : size_(size) {}
310 ~JitPage();
311
312 private:
314 typedef std::map<Address, JitAllocation, std::less<Address>,
318 size_t size_;
319
320 friend class JitPageReference;
321 // Allow CanLookupStartOfJitAllocationAt to check if the mutex is locked.
322 friend bool ThreadIsolation::CanLookupStartOfJitAllocationAt(Address);
323 };
324
325 private:
327 return trusted_data_.allocator;
328 }
329
330 // We store pointers in the map since we want to use the entries without
331 // keeping the map locked.
332 typedef std::map<Address, JitPage*, std::less<Address>,
333 StlAllocator<std::pair<const Address, JitPage*>>>
335
336 // The TrustedData needs to be page aligned so that we can protect it using
337 // per-thread memory permissions (e.g. pkeys on x64).
339 ThreadIsolatedAllocator* allocator = nullptr;
340
341#if V8_HAS_PKU_JIT_WRITE_PROTECT
342 int pkey = -1;
343#endif
344
347
348#if DEBUG
349 bool initialized = false;
350#endif
351 };
352
353 static struct TrustedData trusted_data_;
354
355 static_assert(THREAD_ISOLATION_ALIGN_SZ == 0 ||
356 sizeof(trusted_data_) == THREAD_ISOLATION_ALIGN_SZ);
357
358 // Allocate and construct C++ objects using memory backed by the
359 // ThreadIsolated allocator.
360 template <typename T, typename... Args>
361 static void ConstructNew(T** ptr, Args&&... args);
362 template <typename T>
363 static void Delete(T* ptr);
364
365 // Lookup a JitPage that spans a given range. Note that JitPages are not
366 // required to align with OS pages. There are no minimum size requirements and
367 // we can split and merge them under the hood for performance optimizations.
368 // IOW, the returned JitPage is guaranteed to span the given range, but
369 // doesn't need to be the exact previously registered JitPage.
370 static JitPageReference LookupJitPage(Address addr, size_t size);
371 static JitPageReference LookupJitPageLocked(Address addr, size_t size);
372 static std::optional<JitPageReference> TryLookupJitPage(Address addr,
373 size_t size);
374 // The caller needs to hold a lock of the jit_pages_mutex_
375 static std::optional<JitPageReference> TryLookupJitPageLocked(Address addr,
376 size_t size);
377 static JitPageReference SplitJitPageLocked(Address addr, size_t size);
378 static JitPageReference SplitJitPage(Address addr, size_t size);
379 static std::pair<JitPageReference, JitPageReference> SplitJitPages(
380 Address addr1, size_t size1, Address addr2, size_t size2);
381
382 template <class T>
383 friend struct StlAllocator;
384 friend class WritableJitPage;
386 friend class WritableJumpTablePair;
387};
388
389// A scope class that temporarily makes the JitAllocation writable. All writes
390// to executable memory should go through this object since it adds validation
391// that the writes are safe for CFI.
393 public:
397
400
401 // WritableJitAllocations are used during reloc iteration. But in some
402 // cases, we relocate code off-heap, e.g. when growing AssemblerBuffers.
403 // This function creates a WritableJitAllocation that doesn't unlock the
404 // executable memory.
406 Address addr, size_t size, ThreadIsolation::JitAllocationType type);
407
408 // Writes a header slot either as a primitive or as a Tagged value.
409 // Important: this function will not trigger a write barrier by itself,
410 // since we want to keep the code running with write access to executable
411 // memory to a minimum. You should trigger the write barriers after this
412 // function goes out of scope.
413 template <typename T, size_t offset>
415 template <typename T, size_t offset>
417 template <typename T, size_t offset>
419 template <typename T, size_t offset>
422 template <typename T, size_t offset>
425 template <typename T>
427
428 // CopyCode and CopyData have the same implementation at the moment, but
429 // they will diverge once we implement validation.
430 V8_INLINE void CopyCode(size_t dst_offset, const uint8_t* src,
431 size_t num_bytes);
432 V8_INLINE void CopyData(size_t dst_offset, const uint8_t* src,
433 size_t num_bytes);
434
435 template <typename T>
437 template <typename T>
438 V8_INLINE void WriteValue(Address address, T value);
439 template <typename T>
441
442 V8_INLINE void ClearBytes(size_t offset, size_t len);
443
444 Address address() const { return address_; }
445 size_t size() const { return allocation_.Size(); }
446
447 private:
449 kRegister,
450 kLookup,
451 };
452 V8_INLINE WritableJitAllocation(Address addr, size_t size,
454 JitAllocationSource source,
455 bool enforce_write_api = false);
456 // Used for non-executable memory.
457 V8_INLINE WritableJitAllocation(Address addr, size_t size,
459 bool enforce_write_api);
460
462
463 // In DEBUG mode, we only make RWX memory writable during the write operations
464 // themselves to ensure that all writes go through this object.
465 // This function returns a write scope that can be used for these writes.
466 V8_INLINE std::optional<RwxMemoryWriteScope> WriteScopeForApiEnforcement()
467 const;
468
470 // TODO(sroettger): we can move the memory write scopes into the Write*
471 // functions in debug builds. This would allow us to ensure that all writes
472 // go through this object.
473 // The scope and page reference are optional in case we're creating a
474 // WritableJitAllocation for off-heap memory. See ForNonExecutableMemory
475 // above.
476 std::optional<RwxMemoryWriteScope> write_scope_;
477 std::optional<ThreadIsolation::JitPageReference> page_ref_;
479 bool enforce_write_api_ = false;
480
481 friend class ThreadIsolation;
482 friend class WritableJitPage;
484};
485
486// Similar to the WritableJitAllocation, all writes to free space should go
487// through this object since it adds validation that the writes are safe for
488// CFI.
489// For convenience, it can also be used for writes to non-executable memory for
490// which it will skip the CFI checks.
492 public:
493 // This function can be used to create a WritableFreeSpace object for
494 // non-executable memory only, i.e. it won't perform CFI validation and
495 // doesn't unlock the code space.
496 // For executable memory, use the WritableJitPage::FreeRange function.
498 size_t size);
499
503
504 template <typename T, size_t offset>
506 template <size_t offset>
507 void ClearTagged(size_t count) const;
508
509 base::Address Address() const { return address_; }
510 int Size() const { return size_; }
511 bool Executable() const { return executable_; }
512
513 private:
514 WritableFreeSpace(base::Address addr, size_t size, bool executable);
515
517 const int size_;
518 const bool executable_;
519
520 friend class WritableJitPage;
521};
522
524 size_t count) const;
526 size_t count) const;
527
529 public:
530 V8_INLINE WritableJitPage(Address addr, size_t size);
531
535 friend class ThreadIsolation;
536
537 V8_INLINE WritableJitAllocation LookupAllocationContaining(Address addr);
538
539 V8_INLINE WritableFreeSpace FreeRange(Address addr, size_t size);
540
541 bool Empty() const { return page_ref_.Empty(); }
542
543 private:
546};
547
548#ifdef V8_ENABLE_WEBASSEMBLY
549
550class V8_EXPORT_PRIVATE WritableJumpTablePair {
551 public:
552 WritableJitAllocation& jump_table() { return writable_jump_table_; }
553 WritableJitAllocation& far_jump_table() { return writable_far_jump_table_; }
554
555 ~WritableJumpTablePair();
556 WritableJumpTablePair(const WritableJumpTablePair&) = delete;
557 WritableJumpTablePair& operator=(const WritableJumpTablePair&) = delete;
558
559 static WritableJumpTablePair ForTesting(Address jump_table_address,
560 size_t jump_table_size,
561 Address far_jump_table_address,
562 size_t far_jump_table_size);
563
564 private:
565 V8_INLINE WritableJumpTablePair(Address jump_table_address,
566 size_t jump_table_size,
567 Address far_jump_table_address,
568 size_t far_jump_table_size);
569
570 // This constructor is only used for testing.
571 struct ForTestingTag {};
572 WritableJumpTablePair(Address jump_table_address, size_t jump_table_size,
573 Address far_jump_table_address,
574 size_t far_jump_table_size, ForTestingTag);
575
576 // The WritableJitAllocation objects need to come before the write scope since
577 // we rely on the destructors to reset the write permissions in the right
578 // order when enforcing the write API in debug mode.
579 WritableJitAllocation writable_jump_table_;
580 WritableJitAllocation writable_far_jump_table_;
581
582 RwxMemoryWriteScope write_scope_;
583 std::optional<std::pair<ThreadIsolation::JitPageReference,
584 ThreadIsolation::JitPageReference>>
585 jump_table_pages_;
586
587 friend class ThreadIsolation;
588};
589
590#endif
591
592template <class T>
595 return true;
596}
597
598template <class T>
601 return false;
602}
603
604// This class is a no-op version of the RwxMemoryWriteScope class above.
605// It's used as a target type for other scope type definitions when a no-op
606// semantics is required.
608 public:
610 V8_INLINE explicit NopRwxMemoryWriteScope(const char* comment) {
611 // Define a constructor to avoid unused variable warnings.
612 }
613};
614
615// Same as the RwxMemoryWriteScope but without inlining the code.
616// This is a workaround for component build issue (crbug/1316800), when
617// a thread_local value can't be properly exported.
619 : public RwxMemoryWriteScope {
620 public:
623
624 // Disable copy constructor and copy-assignment operator, since this manages
625 // a resource and implicit copying of the scope can yield surprising errors.
628 const RwxMemoryWriteScopeForTesting&) = delete;
629};
630
631#if V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT
632// Metadata are not protected yet with PTHREAD_JIT_WRITE_PROTECT
634#else
636#endif
637
638#ifdef V8_ENABLE_MEMORY_SEALING
640#else
642#endif
643
644} // namespace internal
645} // namespace v8
646
647#endif // V8_COMMON_CODE_MEMORY_ACCESS_H_
#define T
V8_INLINE NopRwxMemoryWriteScope()=default
V8_INLINE NopRwxMemoryWriteScope(const char *comment)
RwxMemoryWriteScopeForTesting(const RwxMemoryWriteScopeForTesting &)=delete
RwxMemoryWriteScopeForTesting & operator=(const RwxMemoryWriteScopeForTesting &)=delete
RwxMemoryWriteScope(const RwxMemoryWriteScope &)=delete
RwxMemoryWriteScope & operator=(const RwxMemoryWriteScope &)=delete
JitAllocation(size_t size, JitAllocationType type)
JitPageReference(class JitPage *page, Address address)
JitPageReference(JitPageReference &&) V8_NOEXCEPT=default
std::map< Address, JitAllocation, std::less< Address >, StlAllocator< std::pair< const Address, JitAllocation > > > AllocationMap
std::map< Address, JitPage *, std::less< Address >, StlAllocator< std::pair< const Address, JitPage * > > > JitPageMap
static ThreadIsolatedAllocator * allocator()
V8_INLINE void WriteHeaderSlot(Tagged< T > value, RelaxedStoreTag) const
WritableFreeSpace & operator=(const WritableFreeSpace &)=delete
void ClearTagged(size_t count) const
WritableFreeSpace(const WritableFreeSpace &)=delete
static V8_INLINE WritableFreeSpace ForNonExecutableMemory(base::Address addr, size_t size)
static V8_INLINE WritableJitAllocation ForNonExecutableMemory(Address addr, size_t size, ThreadIsolation::JitAllocationType type)
ThreadIsolation::JitPageReference & page_ref()
V8_INLINE void WriteUnalignedValue(Address address, T value)
const ThreadIsolation::JitAllocation allocation_
V8_INLINE void CopyCode(size_t dst_offset, const uint8_t *src, size_t num_bytes)
V8_INLINE void WriteValue(Address address, T value)
static WritableJitAllocation ForInstructionStream(Tagged< InstructionStream > istream)
std::optional< ThreadIsolation::JitPageReference > page_ref_
V8_INLINE void WriteHeaderSlot(Tagged< T > value, RelaxedStoreTag)
V8_INLINE void WriteHeaderSlot(T value)
V8_INLINE void WriteProtectedPointerHeaderSlot(Tagged< T > value, RelaxedStoreTag)
V8_INLINE void ClearBytes(size_t offset, size_t len)
V8_INLINE void WriteProtectedPointerHeaderSlot(Tagged< T > value, ReleaseStoreTag)
V8_INLINE void WriteHeaderSlot(Tagged< T > value, ReleaseStoreTag)
WritableJitAllocation(const WritableJitAllocation &)=delete
V8_INLINE void CopyData(size_t dst_offset, const uint8_t *src, size_t num_bytes)
V8_INLINE std::optional< RwxMemoryWriteScope > WriteScopeForApiEnforcement() const
std::optional< RwxMemoryWriteScope > write_scope_
WritableJitAllocation & operator=(const WritableJitAllocation &)=delete
WritableJitPage(const WritableJitPage &)=delete
WritableJitPage & operator=(const WritableJitPage &)=delete
ThreadIsolation::JitPageReference page_ref_
#define THREAD_ISOLATION_ALIGN
#define THREAD_ISOLATION_ALIGN_SZ
const int size_
Definition assembler.cc:132
const ObjectRef type_
int start
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
int32_t offset
uintptr_t Address
Definition memory.h:13
bool operator!=(ExternalReference lhs, ExternalReference rhs)
bool operator==(ExternalReference lhs, ExternalReference rhs)
Definition c-api.cc:87
#define V8_ALLOW_UNUSED
#define V8_NOEXCEPT
#define V8_EXPORT_PRIVATE
Definition macros.h:460
void deallocate(value_type *ptr, size_t n)
StlAllocator(const StlAllocator< U > &) noexcept
#define V8_EXPORT
Definition v8config.h:800
#define V8_INLINE
Definition v8config.h:500
#define V8_NODISCARD
Definition v8config.h:693