vm: Make vm::g_exec_addr not use MAP_JIT

This commit is contained in:
Elad 2026-05-09 20:16:35 +03:00
parent 022de7a67b
commit 4ef86989ae
5 changed files with 18 additions and 14 deletions

View File

@ -122,7 +122,7 @@ static u8* get_jit_memory()
// Reserve 2G memory (magic static)
static void* const s_memory2 = []() -> void*
{
void* ptr = utils::memory_reserve(0x80000000);
void* ptr = utils::memory_reserve(0x80000000, true);
#ifdef CAN_OVERCOMMIT
utils::memory_commit(ptr, 0x80000000);
utils::memory_protect(ptr, 0x40000000, utils::protection::wx);
@ -348,7 +348,7 @@ jit_runtime_base& asmjit::get_global_runtime()
{
custom_runtime() noexcept
{
ensure(m_pos.raw() = static_cast<uchar*>(utils::memory_reserve(size)));
ensure(m_pos.raw() = static_cast<uchar*>(utils::memory_reserve(size, true)));
// Initialize "end" pointer
m_max = m_pos + size;

View File

@ -199,7 +199,7 @@ struct MemoryManager1 : llvm::RTDyldMemoryManager
MemoryManager1(std::function<u64(const std::string&)> symbols_cement = {}) noexcept
: m_symbols_cement(std::move(symbols_cement))
{
auto ptr = reinterpret_cast<u8*>(utils::memory_reserve(c_max_size * 3));
auto ptr = reinterpret_cast<u8*>(utils::memory_reserve(c_max_size * 3, true));
m_code_mems = ptr;
// ptr += c_max_size;
// m_data_ro_mems = ptr;

View File

@ -30,7 +30,7 @@ namespace vm
{
for (u64 addr = reinterpret_cast<u64>(_addr) + 0x100000000; addr < 0x8000'0000'0000; addr += 0x100000000)
{
if (auto ptr = utils::memory_reserve(size, reinterpret_cast<void*>(addr), is_memory_mapping))
if (auto ptr = utils::memory_reserve(size, reinterpret_cast<void*>(addr), is_memory_mapping, false))
{
return static_cast<u8*>(ptr);
}

View File

@ -26,11 +26,15 @@ namespace utils
rx, // Read + execute
};
/**
* Reserve `size` bytes of virtual memory and returns it.
* The memory should be committed before usage.
*/
void* memory_reserve(usz size, void* use_addr = nullptr, bool is_memory_mapping = false);
// Reserve `size` bytes of virtual memory and returns it.
// The memory should be committed before usage.
void* memory_reserve(usz size, void* use_addr, bool is_memory_mapping = false, bool can_be_jit = false);
// Non-fixed address memory_reserve usage
inline void* memory_reserve(usz size, bool can_be_jit = false)
{
return memory_reserve(size, nullptr, false, can_be_jit);
}
/**
* Commit `size` bytes of virtual memory starting at pointer.

View File

@ -222,7 +222,7 @@ namespace utils
return _prot;
}
void* memory_reserve(usz size, void* use_addr, [[maybe_unused]] bool is_memory_mapping)
void* memory_reserve(usz size, void* use_addr, [[maybe_unused]] bool is_memory_mapping, [[maybe_unused]] bool can_be_jit)
{
#ifdef _WIN32
if (is_memory_mapping && has_win10_memory_mapping_api())
@ -251,15 +251,15 @@ namespace utils
size += 0x10000;
}
#ifdef __APPLE__
#ifdef ARCH_ARM64
// Memory mapping regions will be replaced by file-backed MAP_FIXED mappings
// (via shm::map), which is incompatible with MAP_JIT. Only use MAP_JIT for
// non-mapping regions that need JIT executable support.
const int jit_flag = is_memory_mapping ? 0 : MAP_JIT;
#ifdef __APPLE__
const int jit_flag = is_memory_mapping || !can_be_jit ? 0 : MAP_JIT;
#ifdef ARCH_ARM64
auto ptr = ::mmap(use_addr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE | jit_flag | c_map_noreserve, -1, 0);
#else
auto ptr = ::mmap(use_addr, size, PROT_NONE, MAP_ANON | MAP_PRIVATE | MAP_JIT | c_map_noreserve, -1, 0);
auto ptr = ::mmap(use_addr, size, PROT_NONE, MAP_ANON | MAP_PRIVATE | jit_flag | c_map_noreserve, -1, 0);
#endif
#else
auto ptr = ::mmap(use_addr, size, PROT_NONE, MAP_ANON | MAP_PRIVATE | c_map_noreserve, -1, 0);