v8
V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
Loading...
Searching...
No Matches
platform-fuchsia.cc
Go to the documentation of this file.
1// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <fidl/fuchsia.kernel/cpp/fidl.h>
6#include <lib/component/incoming/cpp/protocol.h>
7#include <lib/zx/resource.h>
8#include <lib/zx/thread.h>
9#include <lib/zx/vmar.h>
10#include <lib/zx/vmo.h>
11
12#include <optional>
13
14#include "src/base/bits.h"
15#include "src/base/macros.h"
19
20namespace v8 {
21namespace base {
22
23namespace {
24
25static zx_handle_t g_vmex_resource = ZX_HANDLE_INVALID;
26
27static void* g_root_vmar_base = nullptr;
28
29// If VmexResource is unavailable or does not return a valid handle then
30// this will be observed as failures in vmo_replace_as_executable() calls.
31void SetVmexResource() {
32 DCHECK_EQ(g_vmex_resource, ZX_HANDLE_INVALID);
33
34 auto vmex_resource_client =
35 component::Connect<fuchsia_kernel::VmexResource>();
36 if (vmex_resource_client.is_error()) {
37 return;
38 }
39
40 fidl::SyncClient sync_vmex_resource_client(
41 std::move(vmex_resource_client.value()));
42 auto result = sync_vmex_resource_client->Get();
43 if (result.is_error()) {
44 return;
45 }
46
47 g_vmex_resource = result->resource().release();
48}
49
51 switch (access) {
54 return 0; // no permissions
56 return ZX_VM_PERM_READ;
58 return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
60 return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
62 return ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
63 }
65}
66
67// Determine ZX_VM_ALIGN_X constant corresponding to the specified alignment.
68// Returns 0 if there is none.
69zx_vm_option_t GetAlignmentOptionFromAlignment(size_t alignment) {
70 // The alignment must be one of the ZX_VM_ALIGN_X constants.
71 // See zircon/system/public/zircon/types.h.
72 static_assert(
73 ZX_VM_ALIGN_1KB == (10 << ZX_VM_ALIGN_BASE),
74 "Fuchsia's ZX_VM_ALIGN_1KB constant doesn't match expected value");
75 static_assert(
76 ZX_VM_ALIGN_4GB == (32 << ZX_VM_ALIGN_BASE),
77 "Fuchsia's ZX_VM_ALIGN_4GB constant doesn't match expected value");
78 zx_vm_option_t alignment_log2 = 0;
79 for (int shift = 10; shift <= 32; shift++) {
80 if (alignment == (size_t{1} << shift)) {
81 alignment_log2 = shift;
82 break;
83 }
84 }
85 return alignment_log2 << ZX_VM_ALIGN_BASE;
86}
87
88enum class PlacementMode {
89 // Attempt to place the object at the provided address, otherwise elsewhere.
90 kUseHint,
91 // Place the object anywhere it fits.
92 kAnywhere,
93 // Place the object at the provided address, otherwise fail.
94 kFixed
95};
96
97void* MapVmo(const zx::vmar& vmar, void* vmar_base, size_t page_size,
98 void* address, const zx::vmo& vmo, uint64_t offset,
99 PlacementMode placement, size_t size, size_t alignment,
100 OS::MemoryPermission access) {
101 DCHECK_EQ(0, size % page_size);
102 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
103 DCHECK_IMPLIES(placement != PlacementMode::kAnywhere, address != nullptr);
104
105 zx_vm_option_t options = GetProtectionFromMemoryPermission(access);
106
107 size_t vmar_offset = 0;
108 if (placement == PlacementMode::kAnywhere) {
109 zx_vm_option_t alignment_option =
110 GetAlignmentOptionFromAlignment(alignment);
111 if (alignment_option == 0) {
112 // Invalid alignment specified, it is not possible to provide an
113 // allocation with correct alignment.
114 return nullptr;
115 }
116 options |= alignment_option;
117 } else {
118 CHECK_EQ(reinterpret_cast<intptr_t>(address) % alignment, 0);
119 // Try placing the mapping at the specified address.
120 uintptr_t target_addr = reinterpret_cast<uintptr_t>(address);
121 uintptr_t base = reinterpret_cast<uintptr_t>(vmar_base);
122 DCHECK_GE(target_addr, base);
123 vmar_offset = target_addr - base;
124 options |= ZX_VM_SPECIFIC;
125 }
126
127 zx_vaddr_t result;
128 zx_status_t status = vmar.map(options, vmar_offset, vmo, 0, size, &result);
129
130 if (status == ZX_OK) {
131 DCHECK_EQ(result % alignment, 0);
132 return reinterpret_cast<void*>(result);
133 }
134
135 if (placement != PlacementMode::kUseHint) {
136 return nullptr;
137 }
138 // The hint failed, so we try again without the hint but with alignment
139 // options.
140 // TODO(404563927): Support alignment > 4GB. CppGC's HeapCage allocates with a
141 // 32GB alignment, and the allocation fails on Fuchsia at the moment if the
142 // provided placement hint is not available. PartitionAlloc already solved
143 // this issue, so maybe that solution could be used here as well.
144 return MapVmo(vmar, vmar_base, page_size, nullptr, vmo, offset,
145 PlacementMode::kAnywhere, size, alignment, access);
146}
147
148void* CreateAndMapVmo(const zx::vmar& vmar, void* vmar_base, size_t page_size,
149 void* address, PlacementMode placement, size_t size,
150 size_t alignment, OS::MemoryPermission access) {
151 zx::vmo vmo;
152 if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
153 return nullptr;
154 }
155 static const char kVirtualMemoryName[] = "v8-virtualmem";
156 vmo.set_property(ZX_PROP_NAME, kVirtualMemoryName,
157 strlen(kVirtualMemoryName));
158
159 // Always call zx_vmo_replace_as_executable() in case the memory will need
160 // to be marked as executable in the future.
161 // TOOD(https://crbug.com/v8/8899): Only call this when we know that the
162 // region will need to be marked as executable in the future.
163 zx::unowned_resource vmex(g_vmex_resource);
164 if (vmo.replace_as_executable(*vmex, &vmo) != ZX_OK) {
165 return nullptr;
166 }
167
168 return MapVmo(vmar, vmar_base, page_size, address, vmo, 0, placement, size,
169 alignment, access);
170}
171
172bool UnmapVmo(const zx::vmar& vmar, size_t page_size, void* address,
173 size_t size) {
174 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
175 DCHECK_EQ(0, size % page_size);
176 return vmar.unmap(reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
177}
178
179bool SetPermissionsInternal(const zx::vmar& vmar, size_t page_size,
180 void* address, size_t size,
181 OS::MemoryPermission access) {
182 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
183 DCHECK_EQ(0, size % page_size);
184 uint32_t prot = GetProtectionFromMemoryPermission(access);
185 zx_status_t status =
186 vmar.protect(prot, reinterpret_cast<uintptr_t>(address), size);
187
188 // Any failure that's not OOM likely indicates a bug in the caller (e.g.
189 // using an invalid mapping) so attempt to catch that here to facilitate
190 // debugging of these failures. According to the documentation,
191 // zx_vmar_protect cannot return ZX_ERR_NO_MEMORY, so any error here is
192 // unexpected.
193 CHECK_EQ(status, ZX_OK);
194 return status == ZX_OK;
195}
196
197bool DiscardSystemPagesInternal(const zx::vmar& vmar, size_t page_size,
198 void* address, size_t size) {
199 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
200 DCHECK_EQ(0, size % page_size);
201 uint64_t address_int = reinterpret_cast<uint64_t>(address);
202 return vmar.op_range(ZX_VMO_OP_DECOMMIT, address_int, size, nullptr, 0) ==
203 ZX_OK;
204}
205
206zx_status_t CreateAddressSpaceReservationInternal(
207 const zx::vmar& vmar, void* vmar_base, size_t page_size, void* address,
208 PlacementMode placement, size_t size, size_t alignment,
209 OS::MemoryPermission max_permission, zx::vmar* child,
210 zx_vaddr_t* child_addr) {
211 DCHECK_EQ(0, size % page_size);
212 DCHECK_EQ(0, alignment % page_size);
213 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % alignment);
214 DCHECK_IMPLIES(placement != PlacementMode::kAnywhere, address != nullptr);
215
216 // TODO(v8) determine these based on max_permission.
217 zx_vm_option_t options = ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
218 ZX_VM_CAN_MAP_EXECUTE | ZX_VM_CAN_MAP_SPECIFIC;
219
220 zx_vm_option_t alignment_option = GetAlignmentOptionFromAlignment(alignment);
221 CHECK_NE(0, alignment_option); // Invalid alignment specified
222 options |= alignment_option;
223
224 size_t vmar_offset = 0;
225 if (placement != PlacementMode::kAnywhere) {
226 // Try placing the mapping at the specified address.
227 uintptr_t target_addr = reinterpret_cast<uintptr_t>(address);
228 uintptr_t base = reinterpret_cast<uintptr_t>(vmar_base);
229 DCHECK_GE(target_addr, base);
230 vmar_offset = target_addr - base;
231 options |= ZX_VM_SPECIFIC;
232 }
233
234 zx_status_t status =
235 vmar.allocate(options, vmar_offset, size, child, child_addr);
236 if (status != ZX_OK && placement == PlacementMode::kUseHint) {
237 // If a placement hint was specified but couldn't be used (for example,
238 // because the offset overlapped another mapping), then retry again without
239 // a vmar_offset to let the kernel pick another location.
240 options &= ~(ZX_VM_SPECIFIC);
241 status = vmar.allocate(options, 0, size, child, child_addr);
242 }
243
244 return status;
245}
246
247} // namespace
248
249TimezoneCache* OS::CreateTimezoneCache() {
250 return new PosixDefaultTimezoneCache();
251}
252
253// static
254void OS::Initialize(AbortMode abort_mode, const char* const gc_fake_mmap) {
255 PosixInitializeCommon(abort_mode, gc_fake_mmap);
256
257 // Determine base address of root VMAR.
258 zx_info_vmar_t info;
259 zx_status_t status = zx::vmar::root_self()->get_info(
260 ZX_INFO_VMAR, &info, sizeof(info), nullptr, nullptr);
261 CHECK_EQ(ZX_OK, status);
262 g_root_vmar_base = reinterpret_cast<void*>(info.base);
263
264 SetVmexResource();
265}
266
267// static
268void* OS::Allocate(void* address, size_t size, size_t alignment,
269 MemoryPermission access) {
270 PlacementMode placement =
271 address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
272 return CreateAndMapVmo(*zx::vmar::root_self(), g_root_vmar_base,
273 AllocatePageSize(), address, placement, size,
274 alignment, access);
275}
276
277// static
278void OS::Free(void* address, size_t size) {
279 CHECK(UnmapVmo(*zx::vmar::root_self(), AllocatePageSize(), address, size));
280}
281
282// static
283void* OS::AllocateShared(void* address, size_t size,
285 PlatformSharedMemoryHandle handle, uint64_t offset) {
286 PlacementMode placement =
287 address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
288 zx::unowned_vmo vmo(VMOFromSharedMemoryHandle(handle));
289 return MapVmo(*zx::vmar::root_self(), g_root_vmar_base, AllocatePageSize(),
290 address, *vmo, offset, placement, size, AllocatePageSize(),
291 access);
292}
293
294// static
295void OS::FreeShared(void* address, size_t size) {
296 CHECK(UnmapVmo(*zx::vmar::root_self(), AllocatePageSize(), address, size));
297}
298
299// static
300void OS::Release(void* address, size_t size) { Free(address, size); }
301
302// static
303bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
304 return SetPermissionsInternal(*zx::vmar::root_self(), CommitPageSize(),
305 address, size, access);
306}
307
308void OS::SetDataReadOnly(void* address, size_t size) {
310}
311
312// static
313bool OS::RecommitPages(void* address, size_t size, MemoryPermission access) {
314 return SetPermissions(address, size, access);
315}
316
317// static
318bool OS::DiscardSystemPages(void* address, size_t size) {
319 return DiscardSystemPagesInternal(*zx::vmar::root_self(), CommitPageSize(),
320 address, size);
321}
322
323// static
324bool OS::DecommitPages(void* address, size_t size) {
325 // We rely on DiscardSystemPages decommitting the pages immediately (via
326 // ZX_VMO_OP_DECOMMIT) so that they are guaranteed to be zero-initialized
327 // should they be accessed again later on.
328 return SetPermissions(address, size, MemoryPermission::kNoAccess) &&
329 DiscardSystemPages(address, size);
330}
331
332// static
333bool OS::SealPages(void* address, size_t size) { return false; }
334
335// static
336bool OS::CanReserveAddressSpace() { return true; }
337
338// static
339std::optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
340 void* hint, size_t size, size_t alignment,
341 MemoryPermission max_permission) {
342 DCHECK_EQ(0, reinterpret_cast<Address>(hint) % alignment);
343 zx::vmar child;
344 zx_vaddr_t child_addr;
345 PlacementMode placement =
346 hint != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
347 zx_status_t status = CreateAddressSpaceReservationInternal(
348 *zx::vmar::root_self(), g_root_vmar_base, AllocatePageSize(), hint,
349 placement, size, alignment, max_permission, &child, &child_addr);
350 if (status != ZX_OK) return {};
351 return AddressSpaceReservation(reinterpret_cast<void*>(child_addr), size,
352 child.release());
353}
354
355// static
357 // Destroy the vmar and release the handle.
358 zx::vmar vmar(reservation.vmar_);
359 CHECK_EQ(ZX_OK, vmar.destroy());
360}
361
362// static
364 zx::vmo vmo;
365 if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
367 }
368 return SharedMemoryHandleFromVMO(vmo.release());
369}
370
371// static
374 zx_handle_t vmo = VMOFromSharedMemoryHandle(handle);
375 zx_handle_close(vmo);
376}
377
378// static
379bool OS::HasLazyCommits() { return true; }
380
381std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
382 UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
383}
384
386 UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
387}
388
389int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
390 const auto kNanosPerMicrosecond = 1000ULL;
391 const auto kMicrosPerSecond = 1000000ULL;
392
393 zx_info_thread_stats_t info = {};
394 if (zx::thread::self()->get_info(ZX_INFO_THREAD_STATS, &info, sizeof(info),
395 nullptr, nullptr) != ZX_OK) {
396 return -1;
397 }
398
399 // First convert to microseconds, rounding up.
400 const uint64_t micros_since_thread_started =
401 (info.total_runtime + kNanosPerMicrosecond - 1ULL) / kNanosPerMicrosecond;
402
403 *secs = static_cast<uint32_t>(micros_since_thread_started / kMicrosPerSecond);
404 *usecs =
405 static_cast<uint32_t>(micros_since_thread_started % kMicrosPerSecond);
406 return 0;
407}
408
410
411std::optional<OS::MemoryRange> OS::GetFirstFreeMemoryRangeWithin(
412 OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
413 size_t alignment) {
414 return std::nullopt;
415}
416
417std::optional<AddressSpaceReservation>
419 void* address, size_t size, OS::MemoryPermission max_permission) {
420 DCHECK(Contains(address, size));
421
422 zx::vmar child;
423 zx_vaddr_t child_addr;
424 zx_status_t status = CreateAddressSpaceReservationInternal(
425 *zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(), address,
426 PlacementMode::kFixed, size, OS::AllocatePageSize(), max_permission,
427 &child, &child_addr);
428 if (status != ZX_OK) return {};
429 DCHECK_EQ(reinterpret_cast<void*>(child_addr), address);
430 return AddressSpaceReservation(reinterpret_cast<void*>(child_addr), size,
431 child.release());
432}
433
435 AddressSpaceReservation reservation) {
437 return true;
438}
439
440bool AddressSpaceReservation::Allocate(void* address, size_t size,
441 OS::MemoryPermission access) {
442 DCHECK(Contains(address, size));
443 void* allocation = CreateAndMapVmo(
444 *zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(), address,
445 PlacementMode::kFixed, size, OS::AllocatePageSize(), access);
446 DCHECK(!allocation || allocation == address);
447 return allocation != nullptr;
448}
449
450bool AddressSpaceReservation::Free(void* address, size_t size) {
451 DCHECK(Contains(address, size));
452 return UnmapVmo(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
453 size);
454}
455
456bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
459 uint64_t offset) {
460 DCHECK(Contains(address, size));
461 zx::unowned_vmo vmo(VMOFromSharedMemoryHandle(handle));
462 return MapVmo(*zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(),
463 address, *vmo, offset, PlacementMode::kFixed, size,
464 OS::AllocatePageSize(), access);
465}
466
467bool AddressSpaceReservation::FreeShared(void* address, size_t size) {
468 DCHECK(Contains(address, size));
469 return UnmapVmo(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
470 size);
471}
472
473bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
474 OS::MemoryPermission access) {
475 DCHECK(Contains(address, size));
476 return SetPermissionsInternal(*zx::unowned_vmar(vmar_), OS::CommitPageSize(),
477 address, size, access);
478}
479
480bool AddressSpaceReservation::RecommitPages(void* address, size_t size,
481 OS::MemoryPermission access) {
482 return SetPermissions(address, size, access);
483}
484
485bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
486 DCHECK(Contains(address, size));
487 return DiscardSystemPagesInternal(*zx::unowned_vmar(vmar_),
488 OS::CommitPageSize(), address, size);
489}
490
491bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
492 DCHECK(Contains(address, size));
493 // See comment in OS::DecommitPages.
494 return SetPermissions(address, size, OS::MemoryPermission::kNoAccess) &&
495 DiscardSystemPages(address, size);
496}
497
498} // namespace base
499} // namespace v8
V8_WARN_UNUSED_RESULT bool DiscardSystemPages(void *address, size_t size)
V8_WARN_UNUSED_RESULT bool Allocate(void *address, size_t size, OS::MemoryPermission access)
bool Contains(void *region_addr, size_t region_size) const
Definition platform.h:459
V8_WARN_UNUSED_RESULT bool RecommitPages(void *address, size_t size, OS::MemoryPermission access)
AddressSpaceReservation(void *base, size_t size)
Definition platform.h:513
V8_WARN_UNUSED_RESULT bool Free(void *address, size_t size)
V8_WARN_UNUSED_RESULT bool SetPermissions(void *address, size_t size, OS::MemoryPermission access)
V8_WARN_UNUSED_RESULT bool DecommitPages(void *address, size_t size)
V8_WARN_UNUSED_RESULT bool FreeShared(void *address, size_t size)
V8_WARN_UNUSED_RESULT std::optional< AddressSpaceReservation > CreateSubReservation(void *address, size_t size, OS::MemoryPermission max_permission)
V8_WARN_UNUSED_RESULT bool AllocateShared(void *address, size_t size, OS::MemoryPermission access, PlatformSharedMemoryHandle handle, uint64_t offset)
static V8_WARN_UNUSED_RESULT bool FreeSubReservation(AddressSpaceReservation reservation)
static V8_WARN_UNUSED_RESULT void * AllocateShared(size_t size, MemoryPermission access)
static V8_WARN_UNUSED_RESULT bool SealPages(void *address, size_t size)
static void SignalCodeMovingGC()
static bool HasLazyCommits()
static V8_WARN_UNUSED_RESULT bool CanReserveAddressSpace()
static V8_WARN_UNUSED_RESULT bool DiscardSystemPages(void *address, size_t size)
static size_t AllocatePageSize()
static void SetDataReadOnly(void *address, size_t size)
static void FreeAddressSpaceReservation(AddressSpaceReservation reservation)
static PlatformSharedMemoryHandle CreateSharedMemoryHandleForTesting(size_t size)
static void FreeShared(void *address, size_t size)
static V8_WARN_UNUSED_RESULT bool SetPermissions(void *address, size_t size, MemoryPermission access)
static void Initialize(AbortMode abort_mode, const char *const gc_fake_mmap)
static V8_WARN_UNUSED_RESULT bool DecommitPages(void *address, size_t size)
static size_t CommitPageSize()
static void Release(void *address, size_t size)
static V8_WARN_UNUSED_RESULT bool RecommitPages(void *address, size_t size, MemoryPermission access)
static std::vector< SharedLibraryAddress > GetSharedLibraryAddresses()
static int GetUserTime(uint32_t *secs, uint32_t *usecs)
uintptr_t Address
Definition platform.h:315
static V8_WARN_UNUSED_RESULT void * Allocate(void *address, size_t size, size_t alignment, MemoryPermission access)
static void DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle)
static TimezoneCache * CreateTimezoneCache()
static V8_WARN_UNUSED_RESULT std::optional< AddressSpaceReservation > CreateAddressSpaceReservation(void *hint, size_t size, size_t alignment, MemoryPermission max_permission)
friend class AddressSpaceReservation
Definition platform.h:364
static void Free(void *address, size_t size)
static std::optional< MemoryRange > GetFirstFreeMemoryRangeWithin(Address boundary_start, Address boundary_end, size_t minimum_size, size_t alignment)
static void AdjustSchedulingParams()
Handle< SharedFunctionInfo > info
int32_t offset
ZoneVector< RpoNumber > & result
int GetProtectionFromMemoryPermission(OS::MemoryPermission access)
void PosixInitializeCommon(AbortMode abort_mode, const char *const gc_fake_mmap)
static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle
intptr_t PlatformSharedMemoryHandle
#define UNREACHABLE()
Definition logging.h:67
#define CHECK(condition)
Definition logging.h:124
#define DCHECK_IMPLIES(v1, v2)
Definition logging.h:493
#define DCHECK_NE(v1, v2)
Definition logging.h:486
#define CHECK_NE(lhs, rhs)
#define DCHECK_GE(v1, v2)
Definition logging.h:488
#define CHECK_EQ(lhs, rhs)
#define DCHECK(condition)
Definition logging.h:482
#define DCHECK_EQ(v1, v2)
Definition logging.h:485