mirror of
https://github.com/RPCS3/rpcs3.git
synced 2026-04-29 23:41:12 -06:00
Fix race condition in memory_decommit/memory_reset on Apple ARM64
The previous approach used munmap followed by mmap without MAP_FIXED (since Apple rejects MAP_FIXED | MAP_JIT). Between the two calls, another thread could claim the unmapped address range, causing mmap to return a different address and triggering a fatal verification error. Under concurrent load (e.g. PPU LLVM compilation with many worker threads), this race manifests reliably as "Verification failed (object: 0x0)" crashes across all PPUW threads in memory_decommit. Fix: Use MAP_FIXED without MAP_JIT instead. This atomically replaces the mapping without any window for other threads to interfere. The MAP_JIT attribute is lost on the replaced pages, but the application's code signing entitlements (allow-unsigned-executable-memory, disable-executable-page-protection) permit executable mappings without it. Applied the same fix to memory_reset which had the identical pattern.
This commit is contained in:
parent
077f2a73e1
commit
4cac76caad
@ -347,12 +347,11 @@ namespace utils
|
||||
#else
|
||||
const u64 ptr64 = reinterpret_cast<u64>(pointer);
|
||||
#if defined(__APPLE__) && defined(ARCH_ARM64)
|
||||
// Hack: on macOS, Apple explicitly fails mmap if you combine MAP_FIXED and MAP_JIT.
|
||||
// So we unmap the space and just hope it maps to the same address we got before instead.
|
||||
// The Xcode manpage says the pointer is a hint and the OS will try to map at the hint location
|
||||
// so this isn't completely undefined behavior.
|
||||
ensure(::munmap(pointer, size) != -1);
|
||||
ensure(::mmap(pointer, size, PROT_NONE, MAP_ANON | MAP_PRIVATE | MAP_JIT, -1, 0) == pointer);
|
||||
// Use MAP_FIXED without MAP_JIT to atomically replace the mapping.
|
||||
// Apple rejects MAP_FIXED | MAP_JIT, but MAP_FIXED alone works and
|
||||
// avoids the race condition of the previous munmap+mmap approach
|
||||
// where another thread could claim the address range between the two calls.
|
||||
ensure(::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE | c_map_noreserve, -1, 0) != reinterpret_cast<void*>(uptr{umax}));
|
||||
#else
|
||||
ensure(::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE | c_map_noreserve, -1, 0) != reinterpret_cast<void*>(uptr{umax}));
|
||||
#endif
|
||||
@ -381,8 +380,9 @@ namespace utils
|
||||
#else
|
||||
const u64 ptr64 = reinterpret_cast<u64>(pointer);
|
||||
#if defined(__APPLE__) && defined(ARCH_ARM64)
|
||||
ensure(::munmap(pointer, size) != -1);
|
||||
ensure(::mmap(pointer, size, +prot, MAP_ANON | MAP_PRIVATE | MAP_JIT, -1, 0) == pointer);
|
||||
// Use MAP_FIXED without MAP_JIT to atomically replace the mapping.
|
||||
// See memory_decommit for details on why the munmap+mmap approach is unsafe.
|
||||
ensure(::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast<void*>(uptr{umax}));
|
||||
#else
|
||||
ensure(::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast<void*>(uptr{umax}));
|
||||
#endif
|
||||
|
||||
Loading…
Reference in New Issue
Block a user