From acdad8ba284005e9a54d35fb8aa60b8d4c0e9ad8 Mon Sep 17 00:00:00 2001 From: Daniel Risto Date: Sun, 22 Mar 2026 10:34:25 +0100 Subject: [PATCH 1/2] Fix race condition in memory_decommit/memory_reset on Apple ARM64 The previous approach used munmap followed by mmap without MAP_FIXED (since Apple rejects MAP_FIXED | MAP_JIT). Between the two calls, another thread could claim the unmapped address range, causing mmap to return a different address and triggering a fatal verification error. Under concurrent load (e.g. PPU LLVM compilation with many worker threads), this race manifests reliably as "Verification failed (object: 0x0)" crashes across all PPUW threads in memory_decommit. Fix: Use MAP_FIXED without MAP_JIT instead. This atomically replaces the mapping without any window for other threads to interfere. The MAP_JIT attribute is lost on the replaced pages, but the application's code signing entitlements (allow-unsigned-executable-memory, disable-executable-page-protection) permit executable mappings without it. Applied the same fix to memory_reset which had the identical pattern. --- rpcs3/util/vm_native.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/rpcs3/util/vm_native.cpp b/rpcs3/util/vm_native.cpp index ed1e0060c9..d30e6c6318 100644 --- a/rpcs3/util/vm_native.cpp +++ b/rpcs3/util/vm_native.cpp @@ -343,12 +343,11 @@ namespace utils #else const u64 ptr64 = reinterpret_cast(pointer); #if defined(__APPLE__) && defined(ARCH_ARM64) - // Hack: on macOS, Apple explicitly fails mmap if you combine MAP_FIXED and MAP_JIT. - // So we unmap the space and just hope it maps to the same address we got before instead. - // The Xcode manpage says the pointer is a hint and the OS will try to map at the hint location - // so this isn't completely undefined behavior. - ensure(::munmap(pointer, size) != -1); - ensure(::mmap(pointer, size, PROT_NONE, MAP_ANON | MAP_PRIVATE | MAP_JIT, -1, 0) == pointer); + // Use MAP_FIXED without MAP_JIT to atomically replace the mapping. + // Apple rejects MAP_FIXED | MAP_JIT, but MAP_FIXED alone works and + // avoids the race condition of the previous munmap+mmap approach + // where another thread could claim the address range between the two calls. + ensure(::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE | c_map_noreserve, -1, 0) != reinterpret_cast(uptr{umax})); #else ensure(::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE | c_map_noreserve, -1, 0) != reinterpret_cast(uptr{umax})); #endif @@ -377,8 +376,9 @@ namespace utils #else const u64 ptr64 = reinterpret_cast(pointer); #if defined(__APPLE__) && defined(ARCH_ARM64) - ensure(::munmap(pointer, size) != -1); - ensure(::mmap(pointer, size, +prot, MAP_ANON | MAP_PRIVATE | MAP_JIT, -1, 0) == pointer); + // Use MAP_FIXED without MAP_JIT to atomically replace the mapping. + // See memory_decommit for details on why the munmap+mmap approach is unsafe. + ensure(::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast(uptr{umax})); #else ensure(::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast(uptr{umax})); #endif From 5cb8759e5ddf7cb9cd506d4bc6f75df8ce610f52 Mon Sep 17 00:00:00 2001 From: Daniel Risto Date: Sun, 22 Mar 2026 13:18:24 +0100 Subject: [PATCH 2/2] Remove redundant Apple ARM64 ifdef blocks The Apple ARM64 code paths are now identical to the generic case, so the ifdef blocks are unnecessary. --- rpcs3/util/vm_native.cpp | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/rpcs3/util/vm_native.cpp b/rpcs3/util/vm_native.cpp index d30e6c6318..f1083b927c 100644 --- a/rpcs3/util/vm_native.cpp +++ b/rpcs3/util/vm_native.cpp @@ -342,15 +342,7 @@ namespace utils ensure(::VirtualFree(pointer, size, MEM_DECOMMIT)); #else const u64 ptr64 = reinterpret_cast(pointer); -#if defined(__APPLE__) && defined(ARCH_ARM64) - // Use MAP_FIXED without MAP_JIT to atomically replace the mapping. - // Apple rejects MAP_FIXED | MAP_JIT, but MAP_FIXED alone works and - // avoids the race condition of the previous munmap+mmap approach - // where another thread could claim the address range between the two calls. ensure(::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE | c_map_noreserve, -1, 0) != reinterpret_cast(uptr{umax})); -#else - ensure(::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE | c_map_noreserve, -1, 0) != reinterpret_cast(uptr{umax})); -#endif if constexpr (c_madv_no_dump != 0) { @@ -375,13 +367,7 @@ namespace utils memory_commit(pointer, size, prot); #else const u64 ptr64 = reinterpret_cast(pointer); -#if defined(__APPLE__) && defined(ARCH_ARM64) - // Use MAP_FIXED without MAP_JIT to atomically replace the mapping. - // See memory_decommit for details on why the munmap+mmap approach is unsafe. ensure(::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast(uptr{umax})); -#else - ensure(::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast(uptr{umax})); -#endif if constexpr (c_madv_hugepage != 0) {