v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
code-memory-access.cc
Go to the documentation of this file.
1// Copyright 2022 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
6
7#include <optional>
8
12#ifdef V8_ENABLE_WEBASSEMBLY
14#endif
15
16namespace v8 {
17namespace internal {
18
19ThreadIsolation::TrustedData ThreadIsolation::trusted_data_;
20
21#if V8_HAS_PKU_JIT_WRITE_PROTECT
22
23// static
24int RwxMemoryWriteScope::memory_protection_key() {
25 return ThreadIsolation::pkey();
26}
27
28bool RwxMemoryWriteScope::IsPKUWritable() {
29 DCHECK(ThreadIsolation::initialized());
30 return base::MemoryProtectionKey::GetKeyPermission(ThreadIsolation::pkey()) ==
31 base::MemoryProtectionKey::kNoRestrictions;
32}
33
34void RwxMemoryWriteScope::SetDefaultPermissionsForSignalHandler() {
35 DCHECK(ThreadIsolation::initialized());
37 base::MemoryProtectionKey::SetPermissionsForKey(
38 ThreadIsolation::pkey(), base::MemoryProtectionKey::kDisableWrite);
39}
40
41#endif // V8_HAS_PKU_JIT_WRITE_PROTECT
42
45
47
48// static
50#if V8_HEAP_USE_PKU_JIT_WRITE_PROTECT
51 return allocator() != nullptr;
52#else
53 return false;
54#endif
55}
56
57// static
58template <typename T, typename... Args>
59void ThreadIsolation::ConstructNew(T** ptr, Args&&... args) {
60 if (Enabled()) {
61 *ptr = reinterpret_cast<T*>(trusted_data_.allocator->Allocate(sizeof(T)));
62 if (!*ptr) return;
63 new (*ptr) T(std::forward<Args>(args)...);
64 } else {
65 *ptr = new T(std::forward<Args>(args)...);
66 }
67}
68
69// static
70template <typename T>
72 if (Enabled()) {
73 ptr->~T();
75 } else {
76 delete ptr;
77 }
78}
79
80// static
82 ThreadIsolatedAllocator* thread_isolated_allocator) {
83#if DEBUG
84 trusted_data_.initialized = true;
85#endif
86
87 bool enable = thread_isolated_allocator != nullptr && !v8_flags.jitless;
88
89#ifdef THREAD_SANITIZER
90 // TODO(sroettger): with TSAN enabled, we get crashes because
91 // SetDefaultPermissionsForSignalHandler gets called while a
92 // RwxMemoryWriteScope is active. It seems that tsan's ProcessPendingSignals
93 // doesn't restore the pkru value after executing the signal handler.
94 enable = false;
95#endif
96
97#if V8_HAS_PKU_JIT_WRITE_PROTECT
98 if (!v8_flags.memory_protection_keys ||
99 !base::MemoryProtectionKey::HasMemoryProtectionKeySupport()) {
100 enable = false;
101 }
102#endif
103
104 if (enable) {
105 trusted_data_.allocator = thread_isolated_allocator;
106#if V8_HAS_PKU_JIT_WRITE_PROTECT
108#endif
109 }
110
111 {
112 // We need to allocate the memory for jit page tracking even if we don't
113 // enable the ThreadIsolation protections.
114 CFIMetadataWriteScope write_scope("Initialize thread isolation.");
117 }
118
119 if (!enable) {
120 return;
121 }
122
123#if V8_HAS_PKU_JIT_WRITE_PROTECT
124 // Check that our compile time assumed page size that we use for padding was
125 // large enough.
128
129 // TODO(sroettger): make this immutable once there's OS support.
130 base::MemoryProtectionKey::SetPermissionsAndKey(
131 {reinterpret_cast<Address>(&trusted_data_), sizeof(trusted_data_)},
133 base::MemoryProtectionKey::kDefaultProtectionKey);
134#endif
135}
136
137// static
139 Address addr, size_t size) {
141 std::optional<JitPageReference> jit_page = TryLookupJitPageLocked(addr, size);
142 CHECK(jit_page.has_value());
143 return std::move(jit_page.value());
144}
145
146// static
152
153// static
155 size_t size) {
156 return WritableJitPage(addr, size);
157}
158
159// static
160std::optional<ThreadIsolation::JitPageReference>
165
166// static
167std::optional<ThreadIsolation::JitPageReference>
170
171 Address end = addr + size;
172 CHECK_GT(end, addr);
173
174 // upper_bound gives us an iterator to the position after address.
175 auto it = trusted_data_.jit_pages_->upper_bound(addr);
176
177 // The previous page should be the one we're looking for.
178 if (it == trusted_data_.jit_pages_->begin()) {
179 return {};
180 }
181
182 it--;
183
184 JitPageReference jit_page(it->second, it->first);
185
186 // If the address is not in the range of the jit page, return.
187 if (jit_page.End() <= addr) {
188 return {};
189 }
190
191 if (jit_page.End() >= end) {
192 return jit_page;
193 }
194
195 // It's possible that the allocation spans multiple pages, merge them.
196 auto to_delete_start = ++it;
197 for (; jit_page.End() < end && it != trusted_data_.jit_pages_->end(); it++) {
198 {
199 JitPageReference next_page(it->second, it->first);
200 CHECK_EQ(next_page.Address(), jit_page.End());
201 jit_page.Merge(next_page);
202 }
203 Delete(it->second);
204 }
205
206 trusted_data_.jit_pages_->erase(to_delete_start, it);
207
208 if (jit_page.End() < end) {
209 return {};
210 }
211
212 return jit_page;
213}
214
215namespace {
216
217size_t GetSize(ThreadIsolation::JitPage* jit_page) {
218 return ThreadIsolation::JitPageReference(jit_page, 0).Size();
219}
220
221size_t GetSize(ThreadIsolation::JitAllocation allocation) {
222 return allocation.Size();
223}
224
225template <class T>
226void CheckForRegionOverlap(const T& map, Address addr, size_t size) {
227 // The data is untrusted from the pov of CFI, so we check that there's no
228 // overlaps with existing regions etc.
229 CHECK_GE(addr + size, addr);
230
231 // Find an entry in the map with key > addr
232 auto it = map.upper_bound(addr);
233 bool is_begin = it == map.begin();
234 bool is_end = it == map.end();
235
236 // Check for overlap with the next entry
237 if (!is_end) {
238 Address next_addr = it->first;
239 Address offset = next_addr - addr;
240 CHECK_LE(size, offset);
241 }
242
243 // Check the previous entry for overlap
244 if (!is_begin) {
245 it--;
246 Address prev_addr = it->first;
247 const typename T::value_type::second_type& prev_entry = it->second;
248 Address offset = addr - prev_addr;
249 CHECK_LE(GetSize(prev_entry), offset);
250 }
251}
252
253template <typename Iterator>
254bool AllocationIsBehindRange(Address range_start, Address range_size,
255 const Iterator& it) {
256 Address range_end = range_start + range_size;
257 Address allocation_start = it->first;
258 Address allocation_size = it->second.Size();
259 Address allocation_end = allocation_start + allocation_size;
260
261 if (allocation_start >= range_end) return true;
262
263 CHECK_LE(allocation_end, range_end);
264 return false;
265}
266
267} // namespace
268
270 base::Address address)
271 : page_lock_(&jit_page->mutex_), jit_page_(jit_page), address_(address) {}
272
274 // TODO(sroettger): check that the page is not in use (scan shadow stacks).
275}
276
278 return jit_page_->size_;
279}
280
282 jit_page_->size_ -= tail->size_;
283 // Move all allocations that are out of bounds.
284 auto it = jit_page_->allocations_.lower_bound(End());
285 tail->allocations_.insert(it, jit_page_->allocations_.end());
286 jit_page_->allocations_.erase(it, jit_page_->allocations_.end());
287}
288
290 jit_page_->size_ += offset;
291}
292
294 DCHECK_EQ(End(), next.Address());
295 jit_page_->size_ += next.jit_page_->size_;
296 next.jit_page_->size_ = 0;
297 jit_page_->allocations_.merge(next.jit_page_->allocations_);
298 DCHECK(next.jit_page_->allocations_.empty());
299}
300
303 size_t size,
304 JitAllocationType type) {
305 // The data is untrusted from the pov of CFI, so the checks are security
306 // sensitive.
307 CHECK_GE(addr, address_);
308 base::Address offset = addr - address_;
309 base::Address end_offset = offset + size;
310 CHECK_GT(end_offset, offset);
311 CHECK_GT(jit_page_->size_, offset);
312 CHECK_GE(jit_page_->size_, end_offset);
313
314 CheckForRegionOverlap(jit_page_->allocations_, addr, size);
315 return jit_page_->allocations_.emplace(addr, JitAllocation(size, type))
316 .first->second;
317}
318
321 size_t size,
322 JitAllocationType type) {
323 auto it = jit_page_->allocations_.find(addr);
324 CHECK_NE(it, jit_page_->allocations_.end());
325 CHECK_EQ(it->second.Size(), size);
326 CHECK_EQ(it->second.Type(), type);
327 return it->second;
328}
329
331 size_t size,
332 JitAllocationType type) const {
333 auto it = jit_page_->allocations_.find(addr);
334 return it != jit_page_->allocations_.end() && it->second.Size() == size &&
335 it->second.Type() == type;
336}
337
339 base::Address addr) {
340 // TODO(sroettger): check that the memory is not in use (scan shadow stacks).
341 CHECK_EQ(jit_page_->allocations_.erase(addr), 1);
342}
343
345 size_t size) {
346 auto begin = jit_page_->allocations_.lower_bound(start);
347 auto end = begin;
348 while (end != jit_page_->allocations_.end() &&
349 !AllocationIsBehindRange(start, size, end)) {
350 end++;
351 }
352
353 // TODO(sroettger): check that the memory is not in use (scan shadow stacks).
354 jit_page_->allocations_.erase(begin, end);
355}
356
358 base::Address start, size_t size, const std::vector<base::Address>& keep) {
359 // TODO(sroettger): check that the page is not in use (scan shadow stacks).
360 JitPage::AllocationMap keep_allocations;
361
362 auto keep_before = jit_page_->allocations_.lower_bound(start);
363 auto keep_after = jit_page_->allocations_.lower_bound(start + size);
364
365 // keep all allocations before the start address.
366 if (keep_before != jit_page_->allocations_.begin()) {
367 keep_before--;
368 keep_allocations.insert(jit_page_->allocations_.begin(), keep_before);
369 }
370
371 // from the start address, keep only allocations passed in the vector
372 auto keep_iterator = keep.begin();
373 for (auto it = keep_before; it != keep_after; it++) {
374 if (keep_iterator == keep.end()) break;
375 if (it->first == *keep_iterator) {
376 keep_allocations.emplace_hint(keep_allocations.end(), it->first,
377 it->second);
378 keep_iterator++;
379 }
380 }
381 CHECK_EQ(keep_iterator, keep.end());
382
383 // keep all allocations after the region
384 keep_allocations.insert(keep_after, jit_page_->allocations_.end());
385
386 jit_page_->allocations_.swap(keep_allocations);
387}
388
390 base::Address inner_pointer) {
391 return AllocationContaining(inner_pointer).first;
392}
393
394std::pair<base::Address, ThreadIsolation::JitAllocation&>
396 base::Address inner_pointer) {
397 auto it = jit_page_->allocations_.upper_bound(inner_pointer);
398 CHECK_NE(it, jit_page_->allocations_.begin());
399 it--;
400 size_t offset = inner_pointer - it->first;
401 CHECK_GT(it->second.Size(), offset);
402 return {it->first, it->second};
403}
404
405// static
406void ThreadIsolation::RegisterJitPage(Address address, size_t size) {
407 CFIMetadataWriteScope write_scope("Adding new executable memory.");
408
410 CheckForRegionOverlap(*trusted_data_.jit_pages_, address, size);
411 JitPage* jit_page;
412 ConstructNew(&jit_page, size);
413 trusted_data_.jit_pages_->emplace(address, jit_page);
414}
415
416void ThreadIsolation::UnregisterJitPage(Address address, size_t size) {
417 // TODO(sroettger): merge the write scopes higher up.
418 CFIMetadataWriteScope write_scope("Removing executable memory.");
419
420 JitPage* to_delete;
421 {
423 JitPageReference jit_page = LookupJitPageLocked(address, size);
424
425 // We're merging jit pages together, so potentially split them back up
426 // if we're only freeing a subrange.
427
428 Address to_free_end = address + size;
429 Address jit_page_end = jit_page.Address() + jit_page.Size();
430
431 if (to_free_end < jit_page_end) {
432 // There's a tail after the page that we release. Shrink the page and
433 // add the tail to the map.
434 size_t tail_size = jit_page_end - to_free_end;
435 JitPage* tail;
436 ConstructNew(&tail, tail_size);
437 jit_page.Shrink(tail);
438 trusted_data_.jit_pages_->emplace(to_free_end, tail);
439 }
440
441 DCHECK_EQ(to_free_end, jit_page.Address() + jit_page.Size());
442
443 if (address == jit_page.Address()) {
444 // We remove the start of the region, just remove it from the map.
445 to_delete = jit_page.JitPage();
446 trusted_data_.jit_pages_->erase(address);
447 } else {
448 // Otherwise, we need to shrink the region.
449 DCHECK_GT(address, jit_page.Address());
450 JitPage* tail;
451 ConstructNew(&tail, size);
452 jit_page.Shrink(tail);
453 to_delete = tail;
454 }
455 }
456 Delete(to_delete);
457}
458
459// static
460bool ThreadIsolation::MakeExecutable(Address address, size_t size) {
461 DCHECK(Enabled());
462
463 // TODO(sroettger): ensure that this can only happen at prcoess startup.
464
465#if V8_HAS_PKU_JIT_WRITE_PROTECT
466 return base::MemoryProtectionKey::SetPermissionsAndKey(
467 {address, size}, PageAllocator::Permission::kReadWriteExecute, pkey());
468#else // V8_HAS_PKU_JIT_WRITE_PROTECT
469 UNREACHABLE();
470#endif // V8_HAS_PKU_JIT_WRITE_PROTECT
471}
472
473// static
475 Address obj, size_t size, JitAllocationType type, bool enforce_write_api) {
478 enforce_write_api);
479}
480
481// static
483 Address addr, size_t size, bool enforce_write_api) {
485 addr, size, JitAllocationType::kInstructionStream, enforce_write_api);
486}
487
488// static
490 Address addr, size_t size, JitAllocationType type, bool enforce_write_api) {
493 enforce_write_api);
494}
495
496// static
498 const std::vector<size_t>& sizes,
499 JitAllocationType type) {
500 CFIMetadataWriteScope write_scope("Register bulk allocations.");
501
502 size_t total_size = 0;
503 for (auto size : sizes) {
504 total_size += size;
505 }
506
507 constexpr size_t kSplitThreshold = 0x40000;
508 JitPageReference page_ref = total_size >= kSplitThreshold
509 ? SplitJitPage(start, total_size)
510 : LookupJitPage(start, total_size);
511
512 for (auto size : sizes) {
513 page_ref.RegisterAllocation(start, size, type);
514 start += size;
515 }
516}
517
522
523// static
528
529// static
531 CFIMetadataWriteScope write_scope("UnregisterWasmAllocation");
532 LookupJitPage(addr, size).UnregisterAllocation(addr);
533}
534
540
542 Address addr, size_t size) {
544
545 JitPageReference jit_page = LookupJitPageLocked(addr, size);
546
547 // Split the JitPage into upto three pages.
548 size_t head_size = addr - jit_page.Address();
549 size_t tail_size = jit_page.Size() - size - head_size;
550 if (tail_size > 0) {
551 JitPage* tail;
552 ConstructNew(&tail, tail_size);
553 jit_page.Shrink(tail);
554 trusted_data_.jit_pages_->emplace(addr + size, tail);
555 }
556 if (head_size > 0) {
557 JitPage* mid;
558 ConstructNew(&mid, size);
559 jit_page.Shrink(mid);
560 trusted_data_.jit_pages_->emplace(addr, mid);
561 return JitPageReference(mid, addr);
562 }
563
564 return jit_page;
565}
566
567std::pair<ThreadIsolation::JitPageReference, ThreadIsolation::JitPageReference>
569 size_t size2) {
570 if (addr1 > addr2) {
571 auto reversed_pair = SplitJitPages(addr2, size2, addr1, size1);
572 return {std::move(reversed_pair.second), std::move(reversed_pair.first)};
573 }
574 // Make sure there's no overlap. SplitJitPageLocked will do additional checks
575 // that the sizes don't overflow.
576 CHECK_LE(addr1 + size1, addr2);
577
579 return {SplitJitPageLocked(addr1, size1), SplitJitPageLocked(addr2, size2)};
580}
581
582// static
584 Address inner_pointer) {
585 CFIMetadataWriteScope write_scope("StartOfJitAllocationAt");
586 std::optional<JitPageReference> page = TryLookupJitPage(inner_pointer, 1);
587 if (!page) {
588 return {};
589 }
590 return page->StartOfAllocationAt(inner_pointer);
591}
592
593// static
595 Address addr, size_t size, PageAllocator::Permission page_permissions) {
596 if (!Enabled()) {
597 return true;
598 }
599
600#if V8_HEAP_USE_PKU_JIT_WRITE_PROTECT
601 return base::MemoryProtectionKey::SetPermissionsAndKey(
603 ThreadIsolation::pkey());
604#else
605 UNREACHABLE();
606#endif
607}
608
609namespace {
610
611class MutexUnlocker {
612 public:
613 explicit MutexUnlocker(base::Mutex& mutex) : mutex_(mutex) {
614 mutex_.AssertHeld();
615 }
616
617 ~MutexUnlocker() {
618 mutex_.AssertHeld();
619 mutex_.Unlock();
620 }
621
622 private:
623 base::Mutex& mutex_;
624};
625
626} // namespace
627
628// static
630 CFIMetadataWriteScope write_scope("CanLookupStartOfJitAllocationAt");
631
632 // Try to lock the pages mutex and the mutex of the page itself to prevent
633 // potential dead locks. The profiler can try to do a lookup from a signal
634 // handler. If that signal handler runs while the thread locked one of these
635 // mutexes, it would result in a dead lock.
637 return false;
638 }
639 MutexUnlocker pages_mutex_unlocker(*trusted_data_.jit_pages_mutex_);
640
641 // upper_bound gives us an iterator to the position after address.
642 auto it = trusted_data_.jit_pages_->upper_bound(inner_pointer);
643
644 // The previous page should be the one we're looking for.
645 if (it == trusted_data_.jit_pages_->begin()) {
646 return {};
647 }
648 it--;
649
650 JitPage* jit_page = it->second;
651 if (jit_page->mutex_.TryLock()) {
652 jit_page->mutex_.Unlock();
653 return true;
654 }
655 return false;
656}
657
658// static
666
667#ifdef V8_ENABLE_WEBASSEMBLY
668
669// static
670WritableJumpTablePair ThreadIsolation::LookupJumpTableAllocations(
671 Address jump_table_address, size_t jump_table_size,
672 Address far_jump_table_address, size_t far_jump_table_size) {
673 return WritableJumpTablePair(jump_table_address, jump_table_size,
674 far_jump_table_address, far_jump_table_size);
675}
676
677WritableJumpTablePair::~WritableJumpTablePair() {
678#ifdef DEBUG
679 if (jump_table_pages_.has_value()) {
680 // We disabled RWX write access for debugging. But we'll need it in the
681 // destructor again to release the jit page reference.
682 write_scope_.SetWritable();
683 }
684#endif
685}
686
687WritableJumpTablePair::WritableJumpTablePair(
688 Address jump_table_address, size_t jump_table_size,
689 Address far_jump_table_address, size_t far_jump_table_size,
690 WritableJumpTablePair::ForTestingTag)
691 : writable_jump_table_(WritableJitAllocation::ForNonExecutableMemory(
692 jump_table_address, jump_table_size,
693 ThreadIsolation::JitAllocationType::kWasmJumpTable)),
694 writable_far_jump_table_(WritableJitAllocation::ForNonExecutableMemory(
695 far_jump_table_address, far_jump_table_size,
696 ThreadIsolation::JitAllocationType::kWasmFarJumpTable)),
697 write_scope_("for testing") {}
698
699// static
700WritableJumpTablePair WritableJumpTablePair::ForTesting(
701 Address jump_table_address, size_t jump_table_size,
702 Address far_jump_table_address, size_t far_jump_table_size) {
703 return WritableJumpTablePair(jump_table_address, jump_table_size,
704 far_jump_table_address, far_jump_table_size,
705 ForTestingTag{});
706}
707
708#endif
709
710template <size_t offset>
713 // TODO(v8:13355): add validation before the write.
715 count);
716}
717
718template void WritableFreeSpace::ClearTagged<kTaggedSize>(size_t count) const;
720 size_t count) const;
721
722#if DEBUG
723
724// static
725void ThreadIsolation::CheckTrackedMemoryEmpty() {
727}
728
729#endif // DEBUG
730
731} // namespace internal
732} // namespace v8
#define T
virtual int Pkey() const
virtual void * Allocate(size_t size)=0
virtual void Free(void *object)=0
void Unlock()
Definition mutex.cc:67
bool TryLock() V8_WARN_UNUSED_RESULT
Definition mutex.cc:72
V8_INLINE void AssertHeld() const
Definition mutex.h:58
JitAllocation & RegisterAllocation(base::Address addr, size_t size, JitAllocationType type)
void UnregisterRange(base::Address addr, size_t size)
std::pair< base::Address, JitAllocation & > AllocationContaining(base::Address addr)
JitPageReference(class JitPage *page, Address address)
base::Address StartOfAllocationAt(base::Address inner_pointer)
JitAllocation & LookupAllocation(base::Address addr, size_t size, JitAllocationType type)
bool Contains(base::Address addr, size_t size, JitAllocationType type) const
void UnregisterAllocationsExcept(base::Address start, size_t size, const std::vector< base::Address > &addr)
std::map< Address, JitAllocation, std::less< Address >, StlAllocator< std::pair< const Address, JitAllocation > > > AllocationMap
static void Initialize(ThreadIsolatedAllocator *allocator)
static std::optional< JitPageReference > TryLookupJitPage(Address addr, size_t size)
static V8_NODISCARD bool WriteProtectMemory(Address addr, size_t size, PageAllocator::Permission page_permissions)
static JitPageReference LookupJitPage(Address addr, size_t size)
static struct TrustedData trusted_data_
static WritableJitPage LookupWritableJitPage(Address addr, size_t size)
static JitPageReference SplitJitPage(Address addr, size_t size)
static JitPageReference LookupJitPageLocked(Address addr, size_t size)
static std::pair< JitPageReference, JitPageReference > SplitJitPages(Address addr1, size_t size1, Address addr2, size_t size2)
static void RegisterJitAllocations(Address start, const std::vector< size_t > &sizes, JitAllocationType type)
static void UnregisterJitPage(Address address, size_t size)
static JitPageReference SplitJitPageLocked(Address addr, size_t size)
static bool CanLookupStartOfJitAllocationAt(Address inner_pointer)
static void RegisterJitPage(Address address, size_t size)
static void ConstructNew(T **ptr, Args &&... args)
static void UnregisterJitAllocationForTesting(Address addr, size_t size)
static ThreadIsolatedAllocator * allocator()
static WritableJitAllocation RegisterInstructionStreamAllocation(Address addr, size_t size, bool enforce_write_api=false)
static WritableJitAllocation LookupJitAllocation(Address addr, size_t size, JitAllocationType type, bool enforce_write_api=false)
static V8_NODISCARD bool MakeExecutable(Address address, size_t size)
static std::optional< JitPageReference > TryLookupJitPageLocked(Address addr, size_t size)
static void UnregisterWasmAllocation(Address addr, size_t size)
static WritableJitAllocation RegisterJitAllocation(Address addr, size_t size, JitAllocationType type, bool enforce_write_api=false)
static void RegisterJitAllocationForTesting(Address obj, size_t size)
static std::optional< Address > StartOfJitAllocationAt(Address inner_pointer)
void ClearTagged(size_t count) const
static WritableJitAllocation ForInstructionStream(Tagged< InstructionStream > istream)
base::Mutex & mutex_
#define THREAD_ISOLATION_ALIGN_SZ
int start
int end
base::Vector< const DirectHandle< Object > > args
Definition execution.cc:74
int32_t offset
base::Mutex mutex
uintptr_t Address
Definition memory.h:13
v8::PageAllocator * GetPlatformPageAllocator()
Definition allocation.cc:66
SlotTraits::TObjectSlot ObjectSlot
Definition globals.h:1243
void MemsetTagged(Tagged_t *start, Tagged< MaybeObject > value, size_t counter)
Definition slots-inl.h:486
size_t CommitPageSize()
constexpr uint32_t kClearedFreeMemoryValue
Definition globals.h:1004
V8_EXPORT_PRIVATE FlagValues v8_flags
#define CHECK_GE(lhs, rhs)
#define CHECK(condition)
Definition logging.h:124
#define CHECK_GT(lhs, rhs)
#define CHECK_LE(lhs, rhs)
#define CHECK_NE(lhs, rhs)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define DCHECK_GT(v1, v2)
Definition logging.h:487
wasm::ValueType type