12#ifdef V8_ENABLE_WEBASSEMBLY
21#if V8_HAS_PKU_JIT_WRITE_PROTECT
24int RwxMemoryWriteScope::memory_protection_key() {
25 return ThreadIsolation::pkey();
28bool RwxMemoryWriteScope::IsPKUWritable() {
29 DCHECK(ThreadIsolation::initialized());
30 return base::MemoryProtectionKey::GetKeyPermission(ThreadIsolation::pkey()) ==
31 base::MemoryProtectionKey::kNoRestrictions;
34void RwxMemoryWriteScope::SetDefaultPermissionsForSignalHandler() {
35 DCHECK(ThreadIsolation::initialized());
37 base::MemoryProtectionKey::SetPermissionsForKey(
38 ThreadIsolation::pkey(), base::MemoryProtectionKey::kDisableWrite);
50#if V8_HEAP_USE_PKU_JIT_WRITE_PROTECT
58template <
typename T,
typename... Args>
63 new (*ptr)
T(std::forward<Args>(
args)...);
65 *ptr =
new T(std::forward<Args>(
args)...);
87 bool enable = thread_isolated_allocator !=
nullptr && !
v8_flags.jitless;
89#ifdef THREAD_SANITIZER
97#if V8_HAS_PKU_JIT_WRITE_PROTECT
98 if (!
v8_flags.memory_protection_keys ||
99 !base::MemoryProtectionKey::HasMemoryProtectionKeySupport()) {
106#if V8_HAS_PKU_JIT_WRITE_PROTECT
123#if V8_HAS_PKU_JIT_WRITE_PROTECT
130 base::MemoryProtectionKey::SetPermissionsAndKey(
133 base::MemoryProtectionKey::kDefaultProtectionKey);
142 CHECK(jit_page.has_value());
143 return std::move(jit_page.value());
160std::optional<ThreadIsolation::JitPageReference>
167std::optional<ThreadIsolation::JitPageReference>
187 if (jit_page.
End() <= addr) {
191 if (jit_page.
End() >=
end) {
196 auto to_delete_start = ++it;
201 jit_page.
Merge(next_page);
208 if (jit_page.
End() <
end) {
221size_t GetSize(ThreadIsolation::JitAllocation allocation) {
222 return allocation.Size();
226void CheckForRegionOverlap(
const T& map,
Address addr,
size_t size) {
232 auto it = map.upper_bound(addr);
233 bool is_begin = it == map.begin();
234 bool is_end = it == map.end();
247 const typename T::value_type::second_type& prev_entry = it->second;
253template <
typename Iterator>
254bool AllocationIsBehindRange(
Address range_start,
Address range_size,
255 const Iterator& it) {
256 Address range_end = range_start + range_size;
257 Address allocation_start = it->first;
258 Address allocation_size = it->second.Size();
259 Address allocation_end = allocation_start + allocation_size;
261 if (allocation_start >= range_end)
return true;
263 CHECK_LE(allocation_end, range_end);
271 : page_lock_(&jit_page->
mutex_), jit_page_(jit_page), address_(address) {}
278 return jit_page_->size_;
282 jit_page_->size_ -= tail->
size_;
284 auto it = jit_page_->allocations_.lower_bound(End());
285 tail->
allocations_.insert(it, jit_page_->allocations_.end());
286 jit_page_->allocations_.erase(it, jit_page_->allocations_.end());
290 jit_page_->size_ +=
offset;
312 CHECK_GE(jit_page_->size_, end_offset);
314 CheckForRegionOverlap(jit_page_->allocations_, addr, size);
315 return jit_page_->allocations_.emplace(addr,
JitAllocation(size, type))
323 auto it = jit_page_->allocations_.find(addr);
324 CHECK_NE(it, jit_page_->allocations_.end());
333 auto it = jit_page_->allocations_.find(addr);
334 return it != jit_page_->allocations_.end() && it->second.Size() == size &&
335 it->second.Type() ==
type;
341 CHECK_EQ(jit_page_->allocations_.erase(addr), 1);
346 auto begin = jit_page_->allocations_.lower_bound(
start);
348 while (
end != jit_page_->allocations_.end() &&
349 !AllocationIsBehindRange(
start, size,
end)) {
354 jit_page_->allocations_.erase(begin,
end);
362 auto keep_before = jit_page_->allocations_.lower_bound(
start);
363 auto keep_after = jit_page_->allocations_.lower_bound(
start + size);
366 if (keep_before != jit_page_->allocations_.begin()) {
368 keep_allocations.insert(jit_page_->allocations_.begin(), keep_before);
372 auto keep_iterator = keep.begin();
373 for (
auto it = keep_before; it != keep_after; it++) {
374 if (keep_iterator == keep.end())
break;
375 if (it->first == *keep_iterator) {
376 keep_allocations.emplace_hint(keep_allocations.end(), it->first,
381 CHECK_EQ(keep_iterator, keep.end());
384 keep_allocations.insert(keep_after, jit_page_->allocations_.end());
386 jit_page_->allocations_.swap(keep_allocations);
391 return AllocationContaining(inner_pointer).first;
394std::pair<base::Address, ThreadIsolation::JitAllocation&>
397 auto it = jit_page_->allocations_.upper_bound(inner_pointer);
398 CHECK_NE(it, jit_page_->allocations_.begin());
400 size_t offset = inner_pointer - it->first;
402 return {it->first, it->second};
431 if (to_free_end < jit_page_end) {
434 size_t tail_size = jit_page_end - to_free_end;
443 if (address == jit_page.
Address()) {
445 to_delete = jit_page.
JitPage();
465#if V8_HAS_PKU_JIT_WRITE_PROTECT
466 return base::MemoryProtectionKey::SetPermissionsAndKey(
483 Address addr,
size_t size,
bool enforce_write_api) {
498 const std::vector<size_t>& sizes,
502 size_t total_size = 0;
503 for (
auto size : sizes) {
507 constexpr size_t kSplitThreshold = 0x40000;
512 for (
auto size : sizes) {
548 size_t head_size = addr - jit_page.
Address();
549 size_t tail_size = jit_page.
Size() - size - head_size;
567std::pair<ThreadIsolation::JitPageReference, ThreadIsolation::JitPageReference>
571 auto reversed_pair =
SplitJitPages(addr2, size2, addr1, size1);
572 return {std::move(reversed_pair.second), std::move(reversed_pair.first)};
590 return page->StartOfAllocationAt(inner_pointer);
600#if V8_HEAP_USE_PKU_JIT_WRITE_PROTECT
601 return base::MemoryProtectionKey::SetPermissionsAndKey(
603 ThreadIsolation::pkey());
650 JitPage* jit_page = it->second;
662 istream->
address(), istream->Size(),
664 JitAllocationSource::kLookup);
667#ifdef V8_ENABLE_WEBASSEMBLY
671 Address jump_table_address,
size_t jump_table_size,
672 Address far_jump_table_address,
size_t far_jump_table_size) {
674 far_jump_table_address, far_jump_table_size);
677WritableJumpTablePair::~WritableJumpTablePair() {
679 if (jump_table_pages_.has_value()) {
682 write_scope_.SetWritable();
687WritableJumpTablePair::WritableJumpTablePair(
688 Address jump_table_address,
size_t jump_table_size,
689 Address far_jump_table_address,
size_t far_jump_table_size,
690 WritableJumpTablePair::ForTestingTag)
692 jump_table_address, jump_table_size,
695 far_jump_table_address, far_jump_table_size,
697 write_scope_(
"for testing") {}
700WritableJumpTablePair WritableJumpTablePair::ForTesting(
701 Address jump_table_address,
size_t jump_table_size,
702 Address far_jump_table_address,
size_t far_jump_table_size) {
703 return WritableJumpTablePair(jump_table_address, jump_table_size,
704 far_jump_table_address, far_jump_table_size,
710template <
size_t offset>
725void ThreadIsolation::CheckTrackedMemoryEmpty() {
virtual void * Allocate(size_t size)=0
virtual void Free(void *object)=0
bool TryLock() V8_WARN_UNUSED_RESULT
V8_INLINE void AssertHeld() const
V8_EXPORT_PRIVATE ~RwxMemoryWriteScopeForTesting()
V8_EXPORT_PRIVATE RwxMemoryWriteScopeForTesting()
static V8_INLINE bool IsSupported()
AllocationMap allocations_
std::map< Address, JitAllocation, std::less< Address >, StlAllocator< std::pair< const Address, JitAllocation > > > AllocationMap
friend class WritableJitAllocation
friend class WritableJumpTablePair
friend class WritableJitPage
static void Initialize(ThreadIsolatedAllocator *allocator)
static std::optional< JitPageReference > TryLookupJitPage(Address addr, size_t size)
static V8_NODISCARD bool WriteProtectMemory(Address addr, size_t size, PageAllocator::Permission page_permissions)
static JitPageReference LookupJitPage(Address addr, size_t size)
static struct TrustedData trusted_data_
static WritableJitPage LookupWritableJitPage(Address addr, size_t size)
static JitPageReference SplitJitPage(Address addr, size_t size)
static JitPageReference LookupJitPageLocked(Address addr, size_t size)
static std::pair< JitPageReference, JitPageReference > SplitJitPages(Address addr1, size_t size1, Address addr2, size_t size2)
static void RegisterJitAllocations(Address start, const std::vector< size_t > &sizes, JitAllocationType type)
static void UnregisterJitPage(Address address, size_t size)
static void Delete(T *ptr)
static JitPageReference SplitJitPageLocked(Address addr, size_t size)
static bool CanLookupStartOfJitAllocationAt(Address inner_pointer)
static void RegisterJitPage(Address address, size_t size)
static void ConstructNew(T **ptr, Args &&... args)
static void UnregisterJitAllocationForTesting(Address addr, size_t size)
static ThreadIsolatedAllocator * allocator()
static WritableJitAllocation RegisterInstructionStreamAllocation(Address addr, size_t size, bool enforce_write_api=false)
static WritableJitAllocation LookupJitAllocation(Address addr, size_t size, JitAllocationType type, bool enforce_write_api=false)
static V8_NODISCARD bool MakeExecutable(Address address, size_t size)
static std::optional< JitPageReference > TryLookupJitPageLocked(Address addr, size_t size)
static void UnregisterWasmAllocation(Address addr, size_t size)
static WritableJitAllocation RegisterJitAllocation(Address addr, size_t size, JitAllocationType type, bool enforce_write_api=false)
static void RegisterJitAllocationForTesting(Address obj, size_t size)
static std::optional< Address > StartOfJitAllocationAt(Address inner_pointer)
void ClearTagged(size_t count) const
const base::Address address_
static WritableJitAllocation ForInstructionStream(Tagged< InstructionStream > istream)
#define THREAD_ISOLATION_ALIGN_SZ
base::Vector< const DirectHandle< Object > > args
v8::PageAllocator * GetPlatformPageAllocator()
SlotTraits::TObjectSlot ObjectSlot
void MemsetTagged(Tagged_t *start, Tagged< MaybeObject > value, size_t counter)
constexpr uint32_t kClearedFreeMemoryValue
V8_EXPORT_PRIVATE FlagValues v8_flags
#define CHECK_GE(lhs, rhs)
#define CHECK_GT(lhs, rhs)
#define CHECK_LE(lhs, rhs)
#define CHECK_NE(lhs, rhs)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define DCHECK_GT(v1, v2)
base::Mutex * jit_pages_mutex_
ThreadIsolatedAllocator * allocator