8#include <AvailabilityMacros.h>
11#include <libkern/OSAtomic.h>
12#include <mach-o/dyld.h>
13#include <mach-o/getsect.h>
15#include <mach/mach_init.h>
16#include <mach/semaphore.h>
18#include <mach/vm_map.h>
19#include <mach/vm_statistics.h>
27#include <sys/resource.h>
28#include <sys/sysctl.h>
42#if defined(V8_TARGET_OS_IOS)
45#include <mach/mach_vm.h>
61 return VM_PROT_READ | VM_PROT_WRITE;
63 return VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
65 return VM_PROT_READ | VM_PROT_EXECUTE;
70kern_return_t mach_vm_map_wrapper(mach_vm_address_t* address,
71 mach_vm_size_t size,
int flags,
73 memory_object_offset_t
offset,
75 vm_prot_t current_prot = prot;
76 vm_prot_t maximum_prot = current_prot;
78 FALSE, current_prot, maximum_prot, VM_INHERIT_NONE);
84 std::vector<SharedLibraryAddress>
result;
85 unsigned int images_count = _dyld_image_count();
86 for (
unsigned int i = 0;
i < images_count; ++
i) {
87 const mach_header* header = _dyld_get_image_header(
i);
88 if (header ==
nullptr)
continue;
91 uint8_t* code_ptr = getsectiondata(header, SEG_TEXT, SECT_TEXT, &size);
93 const mach_header_64* header64 =
94 reinterpret_cast<const mach_header_64*
>(header);
95 uint8_t* code_ptr = getsectiondata(header64, SEG_TEXT, SECT_TEXT, &size);
97 if (code_ptr ==
nullptr)
continue;
98 const intptr_t slide = _dyld_get_image_vmaddr_slide(
i);
99 const uintptr_t
start =
reinterpret_cast<uintptr_t
>(code_ptr);
100 result.push_back(SharedLibraryAddress(_dyld_get_image_name(
i),
start,
101 start + size, slide));
109 return new PosixDefaultTimezoneCache();
113#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
117 size_t valSize =
sizeof(val);
118 int rc = sysctlbyname(
"kern.tcsm_available", &val, &valSize, NULL, 0);
119 if (rc < 0 || !val)
return;
125 int rc = sysctlbyname(
"kern.tcsm_enable", NULL, NULL, &val,
sizeof(val));
140 return pthread_get_stackaddr_np(pthread_self());
145 mach_vm_size_t vm_size =
size;
147 kern_return_t kr = mach_make_memory_entry_64(
148 mach_task_self(), &vm_size, 0,
149 MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port,
152 return SharedMemoryHandleFromMachMemoryEntry(port);
158 mach_port_t port = MachMemoryEntryFromSharedMemoryHandle(handle);
159 CHECK_EQ(KERN_SUCCESS, mach_port_deallocate(mach_task_self(), port));
167 mach_vm_address_t addr =
reinterpret_cast<mach_vm_address_t
>(hint);
168 vm_prot_t prot = GetVMProtFromMemoryPermission(access);
169 mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
170 kern_return_t kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED,
171 shared_mem_port,
offset, prot);
173 if (kr != KERN_SUCCESS) {
175 kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_ANYWHERE, shared_mem_port,
179 if (kr != KERN_SUCCESS)
return nullptr;
180 return reinterpret_cast<void*
>(addr);
191 vm_prot_t cur_protection = GetVMProtFromMemoryPermission(access);
192 vm_prot_t max_protection;
195 int flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
196 mach_vm_address_t target =
reinterpret_cast<mach_vm_address_t
>(new_address);
198 mach_vm_remap(mach_task_self(), &target, size, 0, flags, mach_task_self(),
199 reinterpret_cast<mach_vm_address_t
>(address), FALSE,
200 &cur_protection, &max_protection, VM_INHERIT_NONE);
202 if (ret != KERN_SUCCESS)
return false;
205 CHECK_EQ(new_address,
reinterpret_cast<void*
>(target));
216 vm_prot_t prot = GetVMProtFromMemoryPermission(access);
217 mach_vm_address_t addr =
reinterpret_cast<mach_vm_address_t
>(address);
218 mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
220 mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
221 shared_mem_port,
offset, prot);
222 return kr == KERN_SUCCESS;
226#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT && !defined(V8_OS_IOS)
229#pragma clang diagnostic push
230#pragma clang diagnostic ignored "-Wunguarded-availability-new"
233 pthread_jit_write_protect_np(enable);
236#pragma clang diagnostic pop
bool Contains(void *region_addr, size_t region_size) const
V8_WARN_UNUSED_RESULT bool AllocateShared(void *address, size_t size, OS::MemoryPermission access, PlatformSharedMemoryHandle handle, uint64_t offset)
static V8_WARN_UNUSED_RESULT void * AllocateShared(size_t size, MemoryPermission access)
static void SignalCodeMovingGC()
static size_t AllocatePageSize()
static PlatformSharedMemoryHandle CreateSharedMemoryHandleForTesting(size_t size)
static std::vector< SharedLibraryAddress > GetSharedLibraryAddresses()
static V8_WARN_UNUSED_RESULT bool RemapPages(const void *address, size_t size, void *new_address, MemoryPermission access)
static void DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle)
static TimezoneCache * CreateTimezoneCache()
static std::optional< MemoryRange > GetFirstFreeMemoryRangeWithin(Address boundary_start, Address boundary_end, size_t minimum_size, size_t alignment)
static void AdjustSchedulingParams()
static Stack::StackSlot ObtainCurrentThreadStackStart()
ZoneVector< RpoNumber > & result
static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle
intptr_t PlatformSharedMemoryHandle
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
constexpr bool IsAligned(T value, U alignment)