From 4cac76caadda064ea5cafc6f530133acb2302c50 Mon Sep 17 00:00:00 2001 From: Daniel Risto Date: Sun, 22 Mar 2026 10:34:25 +0100 Subject: [PATCH] Fix race condition in memory_decommit/memory_reset on Apple ARM64 The previous approach used munmap followed by mmap without MAP_FIXED (since Apple rejects MAP_FIXED | MAP_JIT). Between the two calls, another thread could claim the unmapped address range, causing mmap to return a different address and triggering a fatal verification error. Under concurrent load (e.g. PPU LLVM compilation with many worker threads), this race manifests reliably as "Verification failed (object: 0x0)" crashes across all PPUW threads in memory_decommit. Fix: Use MAP_FIXED without MAP_JIT instead. This atomically replaces the mapping without any window for other threads to interfere. The MAP_JIT attribute is lost on the replaced pages, but the application's code signing entitlements (allow-unsigned-executable-memory, disable-executable-page-protection) permit executable mappings without it. Applied the same fix to memory_reset which had the identical pattern. --- rpcs3/util/vm_native.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/rpcs3/util/vm_native.cpp b/rpcs3/util/vm_native.cpp index 5821300fc3..d569a26645 100644 --- a/rpcs3/util/vm_native.cpp +++ b/rpcs3/util/vm_native.cpp @@ -347,12 +347,11 @@ namespace utils #else const u64 ptr64 = reinterpret_cast(pointer); #if defined(__APPLE__) && defined(ARCH_ARM64) - // Hack: on macOS, Apple explicitly fails mmap if you combine MAP_FIXED and MAP_JIT. - // So we unmap the space and just hope it maps to the same address we got before instead. - // The Xcode manpage says the pointer is a hint and the OS will try to map at the hint location - // so this isn't completely undefined behavior. - ensure(::munmap(pointer, size) != -1); - ensure(::mmap(pointer, size, PROT_NONE, MAP_ANON | MAP_PRIVATE | MAP_JIT, -1, 0) == pointer); + // Use MAP_FIXED without MAP_JIT to atomically replace the mapping. + // Apple rejects MAP_FIXED | MAP_JIT, but MAP_FIXED alone works and + // avoids the race condition of the previous munmap+mmap approach + // where another thread could claim the address range between the two calls. + ensure(::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE | c_map_noreserve, -1, 0) != reinterpret_cast(uptr{umax})); #else ensure(::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE | c_map_noreserve, -1, 0) != reinterpret_cast(uptr{umax})); #endif @@ -381,8 +380,9 @@ namespace utils #else const u64 ptr64 = reinterpret_cast(pointer); #if defined(__APPLE__) && defined(ARCH_ARM64) - ensure(::munmap(pointer, size) != -1); - ensure(::mmap(pointer, size, +prot, MAP_ANON | MAP_PRIVATE | MAP_JIT, -1, 0) == pointer); + // Use MAP_FIXED without MAP_JIT to atomically replace the mapping. + // See memory_decommit for details on why the munmap+mmap approach is unsafe. + ensure(::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast(uptr{umax})); #else ensure(::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast(uptr{umax})); #endif