vk: Extend memory allocation system to explicitly allow requesting no VRAM recovery on allocation fail.

This commit is contained in:
kd-11 2025-12-15 13:34:45 +03:00 committed by Ani
parent c3db85c68e
commit 1a3e150a62
5 changed files with 82 additions and 46 deletions

View File

@ -50,6 +50,7 @@ namespace vk
: m_device(dev)
{
const bool nullable = !!(flags & VK_BUFFER_CREATE_ALLOW_NULL_RPCS3);
const bool no_vmem_recovery = !!(flags & VK_BUFFER_CREATE_IGNORE_VMEM_PRESSURE_RPCS3);
info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
info.flags = flags & ~VK_BUFFER_CREATE_SPECIAL_FLAGS_RPCS3;
@ -69,18 +70,27 @@ namespace vk
fmt::throw_exception("No compatible memory type was found!");
}
memory = std::make_unique<memory_block>(m_device, memory_reqs.size, memory_reqs.alignment, allocation_type_info, allocation_pool, nullable);
memory_allocation_request request
{
.size = memory_reqs.size,
.alignment = memory_reqs.alignment,
.memory_type = &allocation_type_info,
.pool = allocation_pool,
.throw_on_fail = !nullable,
.recover_vmem_on_fail = !no_vmem_recovery
};
memory = std::make_unique<memory_block>(m_device, request);
if (auto device_memory = memory->get_vk_device_memory();
device_memory != VK_NULL_HANDLE)
{
vkBindBufferMemory(dev, value, device_memory, memory->get_vk_device_memory_offset());
return;
}
else
{
ensure(nullable);
vkDestroyBuffer(m_device, value, nullptr);
value = VK_NULL_HANDLE;
}
ensure(nullable);
vkDestroyBuffer(m_device, value, nullptr);
value = VK_NULL_HANDLE;
}
buffer::buffer(const vk::render_device& dev, VkBufferUsageFlags usage, void* host_pointer, u64 size)

View File

@ -9,9 +9,10 @@ namespace vk
{
enum : u32
{
VK_BUFFER_CREATE_ALLOW_NULL_RPCS3 = 0x80000000,
VK_BUFFER_CREATE_ALLOW_NULL_RPCS3 = 0x10000000, // If we cannot allocate memory for the buffer, just return an empty but valid object with a null handle.
VK_BUFFER_CREATE_IGNORE_VMEM_PRESSURE_RPCS3 = 0x20000000, // If we cannot allocate memory for the buffer, do not run recovery routine to recover VRAM. Crash or return empty handle immediately instead.
VK_BUFFER_CREATE_SPECIAL_FLAGS_RPCS3 = (VK_BUFFER_CREATE_ALLOW_NULL_RPCS3)
VK_BUFFER_CREATE_SPECIAL_FLAGS_RPCS3 = (VK_BUFFER_CREATE_ALLOW_NULL_RPCS3 | VK_BUFFER_CREATE_IGNORE_VMEM_PRESSURE_RPCS3)
};
struct buffer_view : public unique_resource

View File

@ -128,7 +128,16 @@ namespace vk
fmt::throw_exception("No compatible memory type was found!");
}
memory = std::make_shared<vk::memory_block>(m_device, memory_req.size, memory_req.alignment, allocation_type_info, allocation_pool, nullable);
memory_allocation_request alloc_request
{
.size = memory_req.size,
.alignment = memory_req.alignment,
.memory_type = &allocation_type_info,
.pool = allocation_pool,
.throw_on_fail = !nullable
};
memory = std::make_shared<vk::memory_block>(m_device, alloc_request);
if (auto device_mem = memory->get_vk_device_memory();
device_mem != VK_NULL_HANDLE) [[likely]]
{

View File

@ -224,7 +224,7 @@ namespace vk
vmaDestroyAllocator(m_allocator);
}
mem_allocator_vk::mem_handle_t mem_allocator_vma::alloc(u64 block_sz, u64 alignment, const memory_type_info& memory_type, vmm_allocation_pool pool, bool throw_on_fail)
mem_allocator_vk::mem_handle_t mem_allocator_vma::alloc(const memory_allocation_request& request)
{
VmaAllocation vma_alloc;
VkMemoryRequirements mem_req = {};
@ -233,11 +233,11 @@ namespace vk
auto do_vma_alloc = [&]() -> std::tuple<VkResult, u32>
{
for (const auto& memory_type_index : memory_type)
for (const auto& memory_type_index : *request.memory_type)
{
mem_req.memoryTypeBits = 1u << memory_type_index;
mem_req.size = ::align2(block_sz, alignment);
mem_req.alignment = alignment;
mem_req.size = ::align2(request.size, request.alignment);
mem_req.alignment = request.alignment;
create_info.memoryTypeBits = 1u << memory_type_index;
create_info.flags = m_allocation_flags;
@ -256,26 +256,29 @@ namespace vk
const auto [status, type] = do_vma_alloc();
if (status == VK_SUCCESS)
{
vmm_notify_memory_allocated(vma_alloc, type, block_sz, pool);
vmm_notify_memory_allocated(vma_alloc, type, request.size, request.pool);
return vma_alloc;
}
}
const auto severity = (throw_on_fail) ? rsx::problem_severity::fatal : rsx::problem_severity::severe;
if (error_code == VK_ERROR_OUT_OF_DEVICE_MEMORY &&
vmm_handle_memory_pressure(severity))
if (request.recover_vmem_on_fail)
{
// Out of memory. Try again.
const auto [status, type] = do_vma_alloc();
if (status == VK_SUCCESS)
const auto severity = (request.throw_on_fail) ? rsx::problem_severity::fatal : rsx::problem_severity::severe;
if (error_code == VK_ERROR_OUT_OF_DEVICE_MEMORY &&
vmm_handle_memory_pressure(severity))
{
rsx_log.warning("Renderer ran out of video memory but successfully recovered.");
vmm_notify_memory_allocated(vma_alloc, type, block_sz, pool);
return vma_alloc;
// Out of memory. Try again.
const auto [status, type] = do_vma_alloc();
if (status == VK_SUCCESS)
{
rsx_log.warning("Renderer ran out of video memory but successfully recovered.");
vmm_notify_memory_allocated(vma_alloc, type, request.size, request.pool);
return vma_alloc;
}
}
}
if (!throw_on_fail)
if (!request.throw_on_fail)
{
return VK_NULL_HANDLE;
}
@ -361,18 +364,18 @@ namespace vk
m_allocation_flags = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
}
mem_allocator_vk::mem_handle_t mem_allocator_vk::alloc(u64 block_sz, u64 /*alignment*/, const memory_type_info& memory_type, vmm_allocation_pool pool, bool throw_on_fail)
mem_allocator_vk::mem_handle_t mem_allocator_vk::alloc(const memory_allocation_request& request)
{
VkResult error_code = VK_ERROR_UNKNOWN;
VkDeviceMemory memory;
VkMemoryAllocateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
info.allocationSize = block_sz;
info.allocationSize = request.size;
auto do_vk_alloc = [&]() -> std::tuple<VkResult, u32>
{
for (const auto& memory_type_index : memory_type)
for (const auto& memory_type_index : *request.memory_type)
{
info.memoryTypeIndex = memory_type_index;
error_code = vkAllocateMemory(m_device, &info, nullptr, &memory);
@ -389,26 +392,29 @@ namespace vk
const auto [status, type] = do_vk_alloc();
if (status == VK_SUCCESS)
{
vmm_notify_memory_allocated(memory, type, block_sz, pool);
vmm_notify_memory_allocated(memory, type, request.size, request.pool);
return memory;
}
}
const auto severity = (throw_on_fail) ? rsx::problem_severity::fatal : rsx::problem_severity::severe;
if (error_code == VK_ERROR_OUT_OF_DEVICE_MEMORY &&
vmm_handle_memory_pressure(severity))
if (request.recover_vmem_on_fail)
{
// Out of memory. Try again.
const auto [status, type] = do_vk_alloc();
if (status == VK_SUCCESS)
const auto severity = (request.throw_on_fail) ? rsx::problem_severity::fatal : rsx::problem_severity::severe;
if (error_code == VK_ERROR_OUT_OF_DEVICE_MEMORY &&
vmm_handle_memory_pressure(severity))
{
rsx_log.warning("Renderer ran out of video memory but successfully recovered.");
vmm_notify_memory_allocated(memory, type, block_sz, pool);
return memory;
// Out of memory. Try again.
const auto [status, type] = do_vk_alloc();
if (status == VK_SUCCESS)
{
rsx_log.warning("Renderer ran out of video memory but successfully recovered.");
vmm_notify_memory_allocated(memory, type, request.size, request.pool);
return memory;
}
}
}
if (!throw_on_fail)
if (!request.throw_on_fail)
{
return VK_NULL_HANDLE;
}
@ -455,11 +461,11 @@ namespace vk
return g_render_device->get_allocator();
}
memory_block::memory_block(VkDevice dev, u64 block_sz, u64 alignment, const memory_type_info& memory_type, vmm_allocation_pool pool, bool nullable)
: m_device(dev), m_size(block_sz)
memory_block::memory_block(VkDevice dev, const memory_allocation_request& alloc_request)
: m_device(dev), m_size(alloc_request.size)
{
m_mem_allocator = get_current_mem_allocator();
m_mem_handle = m_mem_allocator->alloc(block_sz, alignment, memory_type, pool, !nullable);
m_mem_handle = m_mem_allocator->alloc(alloc_request);
}
memory_block::~memory_block()

View File

@ -66,6 +66,16 @@ namespace vk
u64 size;
};
struct memory_allocation_request
{
u64 size = 0;
u64 alignment = 1;
const memory_type_info* memory_type = nullptr;
vmm_allocation_pool pool = VMM_ALLOCATION_POOL_UNDEFINED;
bool throw_on_fail = true;
bool recover_vmem_on_fail = true;
};
class mem_allocator_base
{
public:
@ -76,7 +86,7 @@ namespace vk
virtual void destroy() = 0;
virtual mem_handle_t alloc(u64 block_sz, u64 alignment, const memory_type_info& memory_type, vmm_allocation_pool pool, bool throw_on_fail) = 0;
virtual mem_handle_t alloc(const memory_allocation_request& request) = 0;
virtual void free(mem_handle_t mem_handle) = 0;
virtual void* map(mem_handle_t mem_handle, u64 offset, u64 size) = 0;
virtual void unmap(mem_handle_t mem_handle) = 0;
@ -104,7 +114,7 @@ namespace vk
void destroy() override;
mem_handle_t alloc(u64 block_sz, u64 alignment, const memory_type_info& memory_type, vmm_allocation_pool pool, bool throw_on_fail) override;
mem_handle_t alloc(const memory_allocation_request& request) override;
void free(mem_handle_t mem_handle) override;
void* map(mem_handle_t mem_handle, u64 offset, u64 /*size*/) override;
@ -134,7 +144,7 @@ namespace vk
void destroy() override {}
mem_handle_t alloc(u64 block_sz, u64 /*alignment*/, const memory_type_info& memory_type, vmm_allocation_pool pool, bool throw_on_fail) override;
mem_handle_t alloc(const memory_allocation_request& request) override;
void free(mem_handle_t mem_handle) override;
void* map(mem_handle_t mem_handle, u64 offset, u64 size) override;
@ -147,7 +157,7 @@ namespace vk
struct memory_block
{
memory_block(VkDevice dev, u64 block_sz, u64 alignment, const memory_type_info& memory_type, vmm_allocation_pool pool, bool nullable = false);
memory_block(VkDevice dev, const memory_allocation_request& alloc_request);
virtual ~memory_block();
virtual VkDeviceMemory get_vk_device_memory();