mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2026-04-01 18:40:57 -06:00
Merge 352ae6c7d5 into 969955b8a0
This commit is contained in:
commit
5dc47707ae
@ -1,6 +1,8 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025-2026 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/debug.h"
|
||||
@ -68,8 +70,26 @@ void MemoryManager::SetupMemoryRegions(u64 flexible_size, bool use_extended_mem1
|
||||
fmem_map.emplace(total_direct_size,
|
||||
PhysicalMemoryArea{total_direct_size, remaining_physical_space});
|
||||
|
||||
flexible_virtual_base = impl.SystemReservedVirtualBase();
|
||||
const u64 flexible_virtual_size =
|
||||
std::min<u64>(total_flexible_size, impl.SystemReservedVirtualSize());
|
||||
flexible_virtual_end = flexible_virtual_base + flexible_virtual_size;
|
||||
|
||||
{
|
||||
std::scoped_lock lk{mutex};
|
||||
InvalidateFlexibleMappedRangeCacheLocked();
|
||||
RecalculateFlexibleMappedUsageLocked();
|
||||
}
|
||||
|
||||
LOG_INFO(Kernel_Vmm, "Configured memory regions: flexible size = {:#x}, direct size = {:#x}",
|
||||
total_flexible_size, total_direct_size);
|
||||
if (Config::debugDump()) {
|
||||
LOG_DEBUG(
|
||||
Kernel_Vmm,
|
||||
"Flexible accounting region: [{:#x}, {:#x}), total = {:#x}, used = {:#x}, free = {:#x}",
|
||||
flexible_virtual_base, flexible_virtual_end, total_flexible_size, flexible_mapped_usage,
|
||||
GetAvailableFlexibleSize());
|
||||
}
|
||||
}
|
||||
|
||||
u64 MemoryManager::ClampRangeSize(VAddr virtual_addr, u64 size) {
|
||||
@ -394,6 +414,7 @@ s32 MemoryManager::PoolCommit(VAddr virtual_addr, u64 size, MemoryProt prot, s32
|
||||
new_vma.prot = prot;
|
||||
new_vma.name = "anon";
|
||||
new_vma.type = Core::VMAType::Pooled;
|
||||
new_vma.is_system_module = false;
|
||||
new_vma.phys_areas.clear();
|
||||
|
||||
// Find suitable physical addresses
|
||||
@ -445,7 +466,8 @@ s32 MemoryManager::PoolCommit(VAddr virtual_addr, u64 size, MemoryProt prot, s32
|
||||
|
||||
MemoryManager::VMAHandle MemoryManager::CreateArea(VAddr virtual_addr, u64 size, MemoryProt prot,
|
||||
MemoryMapFlags flags, VMAType type,
|
||||
std::string_view name, u64 alignment) {
|
||||
std::string_view name, u64 alignment,
|
||||
bool is_system_module) {
|
||||
// Locate the VMA representing the requested region
|
||||
auto vma = FindVMA(virtual_addr)->second;
|
||||
if (True(flags & MemoryMapFlags::Fixed)) {
|
||||
@ -469,6 +491,7 @@ MemoryManager::VMAHandle MemoryManager::CreateArea(VAddr virtual_addr, u64 size,
|
||||
// Create a memory area representing this mapping.
|
||||
const auto new_vma_handle = CarveVMA(virtual_addr, size);
|
||||
auto& new_vma = new_vma_handle->second;
|
||||
InvalidateFlexibleMappedRangeCacheLocked();
|
||||
const bool is_exec = True(prot & MemoryProt::CpuExec);
|
||||
if (True(prot & MemoryProt::CpuWrite)) {
|
||||
// On PS4, read is appended to write mappings.
|
||||
@ -480,13 +503,15 @@ MemoryManager::VMAHandle MemoryManager::CreateArea(VAddr virtual_addr, u64 size,
|
||||
new_vma.prot = prot;
|
||||
new_vma.name = name;
|
||||
new_vma.type = type;
|
||||
new_vma.is_system_module = is_system_module;
|
||||
new_vma.phys_areas.clear();
|
||||
return new_vma_handle;
|
||||
}
|
||||
|
||||
s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, MemoryProt prot,
|
||||
MemoryMapFlags flags, VMAType type, std::string_view name,
|
||||
bool validate_dmem, PAddr phys_addr, u64 alignment) {
|
||||
bool validate_dmem, PAddr phys_addr, u64 alignment,
|
||||
bool is_system_module) {
|
||||
// Certain games perform flexible mappings on loop to determine
|
||||
// the available flexible memory size. Questionable but we need to handle this.
|
||||
if (type == VMAType::Flexible && flexible_usage + size > total_flexible_size) {
|
||||
@ -557,9 +582,11 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo
|
||||
|
||||
// Acquire writer lock.
|
||||
std::unique_lock lk2{mutex};
|
||||
const u64 flexible_before = GetFlexibleMappedBytesInRangeLocked(virtual_addr, size);
|
||||
|
||||
// Create VMA representing this mapping.
|
||||
auto new_vma_handle = CreateArea(virtual_addr, size, prot, flags, type, name, alignment);
|
||||
auto new_vma_handle =
|
||||
CreateArea(virtual_addr, size, prot, flags, type, name, alignment, is_system_module);
|
||||
auto& new_vma = new_vma_handle->second;
|
||||
auto mapped_addr = new_vma.base;
|
||||
bool is_exec = True(prot & MemoryProt::CpuExec);
|
||||
@ -643,6 +670,14 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo
|
||||
MergeAdjacent(vma_map, new_vma_handle);
|
||||
}
|
||||
|
||||
u64 flexible_after = 0;
|
||||
if (type == VMAType::Flexible && !new_vma.phys_areas.empty()) {
|
||||
flexible_after = GetFlexibleRangeOverlapBytesLocked(mapped_addr, size);
|
||||
} else if (type == VMAType::Code && !is_system_module) {
|
||||
flexible_after = GetFlexibleRangeOverlapBytesLocked(mapped_addr, size);
|
||||
}
|
||||
AdjustFlexibleMappedUsageLocked(flexible_before, flexible_after);
|
||||
|
||||
*out_addr = std::bit_cast<void*>(mapped_addr);
|
||||
if (type != VMAType::Reserved && type != VMAType::PoolReserved) {
|
||||
// Flexible address space mappings were performed while finding direct memory areas.
|
||||
@ -731,9 +766,11 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
|
||||
|
||||
// Aquire writer lock
|
||||
std::scoped_lock lk2{mutex};
|
||||
const u64 flexible_before = GetFlexibleMappedBytesInRangeLocked(virtual_addr, size);
|
||||
|
||||
// Update VMA map and map to address space.
|
||||
auto new_vma_handle = CreateArea(virtual_addr, size, prot, flags, VMAType::File, "anon", 0);
|
||||
auto new_vma_handle =
|
||||
CreateArea(virtual_addr, size, prot, flags, VMAType::File, "anon", 0, false);
|
||||
|
||||
auto& new_vma = new_vma_handle->second;
|
||||
new_vma.fd = fd;
|
||||
@ -742,6 +779,8 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
|
||||
|
||||
impl.MapFile(mapped_addr, size, phys_addr, std::bit_cast<u32>(prot), handle);
|
||||
|
||||
AdjustFlexibleMappedUsageLocked(flexible_before, 0);
|
||||
|
||||
*out_addr = std::bit_cast<void*>(mapped_addr);
|
||||
return ORBIS_OK;
|
||||
}
|
||||
@ -814,6 +853,7 @@ s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
|
||||
vma.prot = MemoryProt::NoAccess;
|
||||
vma.disallow_merge = false;
|
||||
vma.name = "anon";
|
||||
vma.is_system_module = false;
|
||||
vma.phys_areas.clear();
|
||||
MergeAdjacent(vma_map, new_it);
|
||||
|
||||
@ -848,7 +888,12 @@ s32 MemoryManager::UnmapMemory(VAddr virtual_addr, u64 size) {
|
||||
|
||||
// Acquire writer lock.
|
||||
std::scoped_lock lk2{mutex};
|
||||
return UnmapMemoryImpl(virtual_addr, size);
|
||||
const u64 flexible_before = GetFlexibleMappedBytesInRangeLocked(virtual_addr, size);
|
||||
const s32 result = UnmapMemoryImpl(virtual_addr, size);
|
||||
if (result == ORBIS_OK) {
|
||||
AdjustFlexibleMappedUsageLocked(flexible_before, 0);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
u64 MemoryManager::UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma_base, u64 size) {
|
||||
@ -904,11 +949,13 @@ u64 MemoryManager::UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma
|
||||
// Mark region as free and attempt to coalesce it with neighbours.
|
||||
const auto new_it = CarveVMA(virtual_addr, size_in_vma);
|
||||
auto& vma = new_it->second;
|
||||
InvalidateFlexibleMappedRangeCacheLocked();
|
||||
vma.type = VMAType::Free;
|
||||
vma.prot = MemoryProt::NoAccess;
|
||||
vma.phys_areas.clear();
|
||||
vma.disallow_merge = false;
|
||||
vma.name = "";
|
||||
vma.is_system_module = false;
|
||||
MergeAdjacent(vma_map, new_it);
|
||||
|
||||
if (vma_type != VMAType::Reserved && vma_type != VMAType::PoolReserved) {
|
||||
@ -1352,6 +1399,137 @@ void MemoryManager::InvalidateMemory(const VAddr addr, const u64 size) const {
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryManager::RecalculateFlexibleUsageForDebug() {
|
||||
std::scoped_lock lk{mutex};
|
||||
RecalculateFlexibleMappedUsageLocked();
|
||||
}
|
||||
|
||||
bool MemoryManager::IsFlexibleCountedVmaType(VMAType type) const {
|
||||
return type == VMAType::Flexible || type == VMAType::Code;
|
||||
}
|
||||
|
||||
bool MemoryManager::IsFlexibleCommittedVma(const VirtualMemoryArea& vma) const {
|
||||
if (!vma.IsMapped()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (vma.type == VMAType::Flexible) {
|
||||
return !vma.phys_areas.empty();
|
||||
}
|
||||
|
||||
if (vma.type == VMAType::Code) {
|
||||
// System modules should not consume the game's flexible memory.
|
||||
return !vma.is_system_module;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
u64 MemoryManager::GetFlexibleRangeOverlapBytesLocked(VAddr virtual_addr, u64 size) const {
|
||||
if (!IsFlexibleRegionConfigured() || size == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const VAddr aligned_start = Common::AlignDown(virtual_addr, 16_KB);
|
||||
const u64 page_offset = virtual_addr - aligned_start;
|
||||
if (size > std::numeric_limits<u64>::max() - page_offset) {
|
||||
return 0;
|
||||
}
|
||||
const u64 aligned_size = Common::AlignUp(size + page_offset, 16_KB);
|
||||
if (aligned_size == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const VAddr aligned_end = aligned_start + aligned_size;
|
||||
const VAddr range_start = std::max(aligned_start, flexible_virtual_base);
|
||||
const VAddr range_end = std::min(aligned_end, flexible_virtual_end);
|
||||
return range_start < range_end ? range_end - range_start : 0;
|
||||
}
|
||||
|
||||
u64 MemoryManager::GetFlexibleMappedBytesInRangeLocked(VAddr virtual_addr, u64 size) const {
|
||||
if (!IsFlexibleRegionConfigured() || size == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const VAddr aligned_start = Common::AlignDown(virtual_addr, 16_KB);
|
||||
const u64 page_offset = virtual_addr - aligned_start;
|
||||
if (size > std::numeric_limits<u64>::max() - page_offset) {
|
||||
return 0;
|
||||
}
|
||||
const u64 aligned_size = Common::AlignUp(size + page_offset, 16_KB);
|
||||
if (aligned_size == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const VAddr aligned_end = aligned_start + aligned_size;
|
||||
const VAddr range_start = std::max(aligned_start, flexible_virtual_base);
|
||||
const VAddr range_end = std::min(aligned_end, flexible_virtual_end);
|
||||
if (range_start >= range_end) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (flexible_mapped_range_cache.valid && flexible_mapped_range_cache.revision == vma_revision &&
|
||||
flexible_mapped_range_cache.range_start == range_start &&
|
||||
flexible_mapped_range_cache.range_end == range_end) {
|
||||
return flexible_mapped_range_cache.mapped_bytes;
|
||||
}
|
||||
|
||||
u64 mapped_bytes = 0;
|
||||
auto it = vma_map.upper_bound(range_start);
|
||||
if (it != vma_map.begin()) {
|
||||
it = std::prev(it);
|
||||
}
|
||||
while (it != vma_map.end() && it->second.base < range_end) {
|
||||
const auto& vma = it->second;
|
||||
const bool counted_type = IsFlexibleCountedVmaType(vma.type);
|
||||
if (!counted_type) {
|
||||
++it;
|
||||
continue;
|
||||
}
|
||||
|
||||
const VAddr vma_end = vma.base + vma.size;
|
||||
const VAddr overlap_start = std::max(range_start, vma.base);
|
||||
const VAddr overlap_end = std::min(range_end, vma_end);
|
||||
const bool committed = IsFlexibleCommittedVma(vma);
|
||||
|
||||
if (overlap_start < overlap_end && committed) {
|
||||
mapped_bytes += overlap_end - overlap_start;
|
||||
}
|
||||
|
||||
++it;
|
||||
}
|
||||
|
||||
flexible_mapped_range_cache.range_start = range_start;
|
||||
flexible_mapped_range_cache.range_end = range_end;
|
||||
flexible_mapped_range_cache.mapped_bytes = mapped_bytes;
|
||||
flexible_mapped_range_cache.revision = vma_revision;
|
||||
flexible_mapped_range_cache.valid = true;
|
||||
return mapped_bytes;
|
||||
}
|
||||
|
||||
void MemoryManager::InvalidateFlexibleMappedRangeCacheLocked() {
|
||||
++vma_revision;
|
||||
flexible_mapped_range_cache.valid = false;
|
||||
}
|
||||
|
||||
void MemoryManager::AdjustFlexibleMappedUsageLocked(u64 mapped_before, u64 mapped_after) {
|
||||
if (mapped_after >= mapped_before) {
|
||||
flexible_mapped_usage += mapped_after - mapped_before;
|
||||
} else {
|
||||
const u64 delta = mapped_before - mapped_after;
|
||||
flexible_mapped_usage = delta > flexible_mapped_usage ? 0 : flexible_mapped_usage - delta;
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryManager::RecalculateFlexibleMappedUsageLocked() {
|
||||
if (!IsFlexibleRegionConfigured()) {
|
||||
flexible_mapped_usage = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
flexible_mapped_usage = GetFlexibleMappedBytesInRangeLocked(
|
||||
flexible_virtual_base, flexible_virtual_end - flexible_virtual_base);
|
||||
}
|
||||
|
||||
VAddr MemoryManager::SearchFree(VAddr virtual_addr, u64 size, u32 alignment) {
|
||||
// Calculate the minimum and maximum addresses present in our address space.
|
||||
auto min_search_address = impl.SystemManagedVirtualBase();
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2025 shadPS4 Emulator Project
|
||||
// SPDX-FileCopyrightText: Copyright 2025-2026 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
@ -111,6 +111,7 @@ struct VirtualMemoryArea {
|
||||
std::string name = "";
|
||||
s32 fd = 0;
|
||||
bool disallow_merge = false;
|
||||
bool is_system_module = false;
|
||||
|
||||
bool Contains(VAddr addr, u64 size) const {
|
||||
return addr >= base && (addr + size) <= (base + this->size);
|
||||
@ -149,6 +150,9 @@ struct VirtualMemoryArea {
|
||||
if (name.compare(next.name) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (is_system_module != next.is_system_module) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -157,9 +161,11 @@ struct VirtualMemoryArea {
|
||||
class MemoryManager {
|
||||
using PhysMap = std::map<PAddr, PhysicalMemoryArea>;
|
||||
using PhysHandle = PhysMap::iterator;
|
||||
using PhysConstHandle = PhysMap::const_iterator;
|
||||
|
||||
using VMAMap = std::map<VAddr, VirtualMemoryArea>;
|
||||
using VMAHandle = VMAMap::iterator;
|
||||
using VMAConstHandle = VMAMap::const_iterator;
|
||||
|
||||
public:
|
||||
explicit MemoryManager();
|
||||
@ -181,8 +187,17 @@ public:
|
||||
return total_flexible_size;
|
||||
}
|
||||
|
||||
u64 GetUsedFlexibleSize() const {
|
||||
return flexible_mapped_usage;
|
||||
}
|
||||
|
||||
u64 GetAvailableFlexibleSize() const {
|
||||
return total_flexible_size - flexible_usage;
|
||||
const u64 used = GetUsedFlexibleSize();
|
||||
return used < total_flexible_size ? total_flexible_size - used : 0;
|
||||
}
|
||||
|
||||
bool IsFlexibleRegionConfigured() const {
|
||||
return flexible_virtual_end > flexible_virtual_base;
|
||||
}
|
||||
|
||||
VAddr SystemReservedVirtualBase() noexcept {
|
||||
@ -252,7 +267,8 @@ public:
|
||||
|
||||
s32 MapMemory(void** out_addr, VAddr virtual_addr, u64 size, MemoryProt prot,
|
||||
MemoryMapFlags flags, VMAType type, std::string_view name = "anon",
|
||||
bool validate_dmem = false, PAddr phys_addr = -1, u64 alignment = 0);
|
||||
bool validate_dmem = false, PAddr phys_addr = -1, u64 alignment = 0,
|
||||
bool is_system_module = false);
|
||||
|
||||
s32 MapFile(void** out_addr, VAddr virtual_addr, u64 size, MemoryProt prot,
|
||||
MemoryMapFlags flags, s32 fd, s64 phys_addr);
|
||||
@ -288,26 +304,37 @@ public:
|
||||
|
||||
void InvalidateMemory(VAddr addr, u64 size) const;
|
||||
|
||||
void RecalculateFlexibleUsageForDebug();
|
||||
|
||||
private:
|
||||
VMAHandle FindVMA(VAddr target) {
|
||||
return std::prev(vma_map.upper_bound(target));
|
||||
}
|
||||
VMAConstHandle FindVMA(VAddr target) const {
|
||||
return std::prev(vma_map.upper_bound(target));
|
||||
}
|
||||
|
||||
PhysHandle FindDmemArea(PAddr target) {
|
||||
return std::prev(dmem_map.upper_bound(target));
|
||||
}
|
||||
PhysConstHandle FindDmemArea(PAddr target) const {
|
||||
return std::prev(dmem_map.upper_bound(target));
|
||||
}
|
||||
|
||||
PhysHandle FindFmemArea(PAddr target) {
|
||||
return std::prev(fmem_map.upper_bound(target));
|
||||
}
|
||||
PhysConstHandle FindFmemArea(PAddr target) const {
|
||||
return std::prev(fmem_map.upper_bound(target));
|
||||
}
|
||||
|
||||
bool HasPhysicalBacking(VirtualMemoryArea vma) {
|
||||
bool HasPhysicalBacking(const VirtualMemoryArea& vma) const {
|
||||
return vma.type == VMAType::Direct || vma.type == VMAType::Flexible ||
|
||||
vma.type == VMAType::Pooled;
|
||||
}
|
||||
|
||||
VMAHandle CreateArea(VAddr virtual_addr, u64 size, MemoryProt prot, MemoryMapFlags flags,
|
||||
VMAType type, std::string_view name, u64 alignment);
|
||||
VMAType type, std::string_view name, u64 alignment, bool is_system_module);
|
||||
|
||||
VAddr SearchFree(VAddr virtual_addr, u64 size, u32 alignment);
|
||||
|
||||
@ -327,6 +354,20 @@ private:
|
||||
|
||||
s32 UnmapMemoryImpl(VAddr virtual_addr, u64 size);
|
||||
|
||||
bool IsFlexibleCountedVmaType(VMAType type) const;
|
||||
|
||||
bool IsFlexibleCommittedVma(const VirtualMemoryArea& vma) const;
|
||||
|
||||
u64 GetFlexibleRangeOverlapBytesLocked(VAddr virtual_addr, u64 size) const;
|
||||
|
||||
u64 GetFlexibleMappedBytesInRangeLocked(VAddr virtual_addr, u64 size) const;
|
||||
|
||||
void InvalidateFlexibleMappedRangeCacheLocked();
|
||||
|
||||
void AdjustFlexibleMappedUsageLocked(u64 mapped_before, u64 mapped_after);
|
||||
|
||||
void RecalculateFlexibleMappedUsageLocked();
|
||||
|
||||
private:
|
||||
AddressSpace impl;
|
||||
PhysMap dmem_map;
|
||||
@ -337,6 +378,18 @@ private:
|
||||
u64 total_direct_size{};
|
||||
u64 total_flexible_size{};
|
||||
u64 flexible_usage{};
|
||||
VAddr flexible_virtual_base{};
|
||||
VAddr flexible_virtual_end{};
|
||||
u64 flexible_mapped_usage{};
|
||||
struct FlexibleMappedRangeCache {
|
||||
VAddr range_start{};
|
||||
VAddr range_end{};
|
||||
u64 mapped_bytes{};
|
||||
u64 revision{};
|
||||
bool valid{};
|
||||
};
|
||||
mutable FlexibleMappedRangeCache flexible_mapped_range_cache{};
|
||||
u64 vma_revision{};
|
||||
u64 pool_budget{};
|
||||
s32 sdk_version{};
|
||||
Vulkan::Rasterizer* rasterizer{};
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
|
||||
// SPDX-FileCopyrightText: Copyright 2024-2026 shadPS4 Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/alignment.h"
|
||||
@ -112,12 +112,20 @@ void Module::LoadModuleToMemory(u32& max_tls_index) {
|
||||
|
||||
// Map module segments (and possible TLS trampolines)
|
||||
void** out_addr = reinterpret_cast<void**>(&base_virtual_addr);
|
||||
memory->MapMemory(out_addr, ModuleLoadBase, aligned_base_size + TrampolineSize,
|
||||
const bool is_system_module = IsSystemLib();
|
||||
memory->MapMemory(out_addr, ModuleLoadBase, aligned_base_size,
|
||||
MemoryProt::CpuReadWrite | MemoryProt::CpuExec, MemoryMapFlags::NoFlags,
|
||||
VMAType::Code, name);
|
||||
VMAType::Code, name, false, -1, 0, is_system_module);
|
||||
LOG_INFO(Core_Linker, "Loading module {} to {}", name, fmt::ptr(*out_addr));
|
||||
|
||||
#ifdef ARCH_X86_64
|
||||
void* trampoline_region = std::bit_cast<void*>(base_virtual_addr + aligned_base_size);
|
||||
const int tramp_ret = memory->MapMemory(
|
||||
&trampoline_region, base_virtual_addr + aligned_base_size, TrampolineSize,
|
||||
MemoryProt::CpuReadWrite | MemoryProt::CpuExec,
|
||||
MemoryMapFlags::Fixed | MemoryMapFlags::NoOverwrite, VMAType::File, "Trampoline");
|
||||
ASSERT_MSG(tramp_ret == 0, "Unable to map trampoline memory");
|
||||
|
||||
// Initialize trampoline generator.
|
||||
void* trampoline_addr = std::bit_cast<void*>(base_virtual_addr + aligned_base_size);
|
||||
RegisterPatchModule(*out_addr, aligned_base_size, trampoline_addr, TrampolineSize);
|
||||
|
||||
Loading…
Reference in New Issue
Block a user