v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
platform-darwin.cc
Go to the documentation of this file.
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// Platform-specific code shared between macOS and iOS goes here. The
6// POSIX-compatible parts in platform-posix.cc.
7
8#include <AvailabilityMacros.h>
9#include <dlfcn.h>
10#include <errno.h>
11#include <libkern/OSAtomic.h>
12#include <mach-o/dyld.h>
13#include <mach-o/getsect.h>
14#include <mach/mach.h>
15#include <mach/mach_init.h>
16#include <mach/semaphore.h>
17#include <mach/task.h>
18#include <mach/vm_map.h>
19#include <mach/vm_statistics.h>
20#include <pthread.h>
21#include <semaphore.h>
22#include <signal.h>
23#include <stdarg.h>
24#include <stdlib.h>
25#include <string.h>
26#include <sys/mman.h>
27#include <sys/resource.h>
28#include <sys/sysctl.h>
29#include <sys/time.h>
30#include <sys/types.h>
31#include <unistd.h>
32
33#include <cmath>
34
35#undef MAP_TYPE
36
37#include "src/base/macros.h"
41
42#if defined(V8_TARGET_OS_IOS)
44#else
45#include <mach/mach_vm.h>
46#endif
47
48namespace v8 {
49namespace base {
50
51namespace {
52
53vm_prot_t GetVMProtFromMemoryPermission(OS::MemoryPermission access) {
54 switch (access) {
57 return VM_PROT_NONE;
59 return VM_PROT_READ;
61 return VM_PROT_READ | VM_PROT_WRITE;
63 return VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
65 return VM_PROT_READ | VM_PROT_EXECUTE;
66 }
68}
69
70kern_return_t mach_vm_map_wrapper(mach_vm_address_t* address,
71 mach_vm_size_t size, int flags,
72 mach_port_t port,
73 memory_object_offset_t offset,
74 vm_prot_t prot) {
75 vm_prot_t current_prot = prot;
76 vm_prot_t maximum_prot = current_prot;
77 return mach_vm_map(mach_task_self(), address, size, 0, flags, port, offset,
78 FALSE, current_prot, maximum_prot, VM_INHERIT_NONE);
79}
80
81} // namespace
82
83std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
84 std::vector<SharedLibraryAddress> result;
85 unsigned int images_count = _dyld_image_count();
86 for (unsigned int i = 0; i < images_count; ++i) {
87 const mach_header* header = _dyld_get_image_header(i);
88 if (header == nullptr) continue;
89 unsigned long size;
90#if V8_HOST_ARCH_I32
91 uint8_t* code_ptr = getsectiondata(header, SEG_TEXT, SECT_TEXT, &size);
92#else
93 const mach_header_64* header64 =
94 reinterpret_cast<const mach_header_64*>(header);
95 uint8_t* code_ptr = getsectiondata(header64, SEG_TEXT, SECT_TEXT, &size);
96#endif
97 if (code_ptr == nullptr) continue;
98 const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
99 const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr);
100 result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
101 start + size, slide));
102 }
103 return result;
104}
105
107
108TimezoneCache* OS::CreateTimezoneCache() {
109 return new PosixDefaultTimezoneCache();
110}
111
113#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
114 {
115 // Check availability of scheduling params.
116 uint32_t val = 0;
117 size_t valSize = sizeof(val);
118 int rc = sysctlbyname("kern.tcsm_available", &val, &valSize, NULL, 0);
119 if (rc < 0 || !val) return;
120 }
121
122 {
123 // Adjust scheduling params.
124 uint32_t val = 1;
125 int rc = sysctlbyname("kern.tcsm_enable", NULL, NULL, &val, sizeof(val));
126 DCHECK_GE(rc, 0);
127 USE(rc);
128 }
129#endif
130}
131
132std::optional<OS::MemoryRange> OS::GetFirstFreeMemoryRangeWithin(
133 OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
134 size_t alignment) {
135 return std::nullopt;
136}
137
138// static
139Stack::StackSlot Stack::ObtainCurrentThreadStackStart() {
140 return pthread_get_stackaddr_np(pthread_self());
141}
142
143// static
145 mach_vm_size_t vm_size = size;
146 mach_port_t port;
147 kern_return_t kr = mach_make_memory_entry_64(
148 mach_task_self(), &vm_size, 0,
149 MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port,
150 MACH_PORT_NULL);
151 if (kr != KERN_SUCCESS) return kInvalidSharedMemoryHandle;
152 return SharedMemoryHandleFromMachMemoryEntry(port);
153}
154
155// static
158 mach_port_t port = MachMemoryEntryFromSharedMemoryHandle(handle);
159 CHECK_EQ(KERN_SUCCESS, mach_port_deallocate(mach_task_self(), port));
160}
161
162// static
163void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
164 PlatformSharedMemoryHandle handle, uint64_t offset) {
165 DCHECK_EQ(0, size % AllocatePageSize());
166
167 mach_vm_address_t addr = reinterpret_cast<mach_vm_address_t>(hint);
168 vm_prot_t prot = GetVMProtFromMemoryPermission(access);
169 mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
170 kern_return_t kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED,
171 shared_mem_port, offset, prot);
172
173 if (kr != KERN_SUCCESS) {
174 // Retry without hint.
175 kr = mach_vm_map_wrapper(&addr, size, VM_FLAGS_ANYWHERE, shared_mem_port,
176 offset, prot);
177 }
178
179 if (kr != KERN_SUCCESS) return nullptr;
180 return reinterpret_cast<void*>(addr);
181}
182
183// static
184bool OS::RemapPages(const void* address, size_t size, void* new_address,
185 MemoryPermission access) {
186 DCHECK(IsAligned(reinterpret_cast<uintptr_t>(address), AllocatePageSize()));
187 DCHECK(
188 IsAligned(reinterpret_cast<uintptr_t>(new_address), AllocatePageSize()));
190
191 vm_prot_t cur_protection = GetVMProtFromMemoryPermission(access);
192 vm_prot_t max_protection;
193 // Asks the kernel to remap *on top* of an existing mapping, rather than
194 // copying the data.
195 int flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
196 mach_vm_address_t target = reinterpret_cast<mach_vm_address_t>(new_address);
197 kern_return_t ret =
198 mach_vm_remap(mach_task_self(), &target, size, 0, flags, mach_task_self(),
199 reinterpret_cast<mach_vm_address_t>(address), FALSE,
200 &cur_protection, &max_protection, VM_INHERIT_NONE);
201
202 if (ret != KERN_SUCCESS) return false;
203
204 // Did we get the address we wanted?
205 CHECK_EQ(new_address, reinterpret_cast<void*>(target));
206
207 return true;
208}
209
210bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
213 uint64_t offset) {
214 DCHECK(Contains(address, size));
215
216 vm_prot_t prot = GetVMProtFromMemoryPermission(access);
217 mach_vm_address_t addr = reinterpret_cast<mach_vm_address_t>(address);
218 mach_port_t shared_mem_port = MachMemoryEntryFromSharedMemoryHandle(handle);
219 kern_return_t kr =
220 mach_vm_map_wrapper(&addr, size, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
221 shared_mem_port, offset, prot);
222 return kr == KERN_SUCCESS;
223}
224
225// See platform-ios.cc for the iOS implementation.
226#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT && !defined(V8_OS_IOS)
227// Ignoring this warning is considered better than relying on
228// __builtin_available.
229#pragma clang diagnostic push
230#pragma clang diagnostic ignored "-Wunguarded-availability-new"
231
232V8_BASE_EXPORT void SetJitWriteProtected(int enable) {
233 pthread_jit_write_protect_np(enable);
234}
235
236#pragma clang diagnostic pop
237#endif
238
239} // namespace base
240} // namespace v8
#define V8_BASE_EXPORT
Definition base-export.h:26
bool Contains(void *region_addr, size_t region_size) const
Definition platform.h:459
V8_WARN_UNUSED_RESULT bool AllocateShared(void *address, size_t size, OS::MemoryPermission access, PlatformSharedMemoryHandle handle, uint64_t offset)
static V8_WARN_UNUSED_RESULT void * AllocateShared(size_t size, MemoryPermission access)
static void SignalCodeMovingGC()
static size_t AllocatePageSize()
static PlatformSharedMemoryHandle CreateSharedMemoryHandleForTesting(size_t size)
static std::vector< SharedLibraryAddress > GetSharedLibraryAddresses()
uintptr_t Address
Definition platform.h:315
static V8_WARN_UNUSED_RESULT bool RemapPages(const void *address, size_t size, void *new_address, MemoryPermission access)
static void DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle)
static TimezoneCache * CreateTimezoneCache()
static std::optional< MemoryRange > GetFirstFreeMemoryRangeWithin(Address boundary_start, Address boundary_end, size_t minimum_size, size_t alignment)
static void AdjustSchedulingParams()
static Stack::StackSlot ObtainCurrentThreadStackStart()
int start
int32_t offset
__BEGIN_DECLS kern_return_t mach_vm_remap(vm_map_t target_task, mach_vm_address_t *target_address, mach_vm_size_t size, mach_vm_offset_t mask, int flags, vm_map_t src_task, mach_vm_address_t src_address, boolean_t copy, vm_prot_t *cur_protection, vm_prot_t *max_protection, vm_inherit_t inheritance)
kern_return_t mach_vm_map(vm_map_t target_task, mach_vm_address_t *address, mach_vm_size_t size, mach_vm_offset_t mask, int flags, mem_entry_name_port_t object, memory_object_offset_t offset, boolean_t copy, vm_prot_t cur_protection, vm_prot_t max_protection, vm_inherit_t inheritance)
ZoneVector< RpoNumber > & result
static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle
intptr_t PlatformSharedMemoryHandle
#define UNREACHABLE()
Definition logging.h:67
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485
#define USE(...)
Definition macros.h:293
constexpr bool IsAligned(T value, U alignment)
Definition macros.h:403