This commit is contained in:
goeiecool9999 2026-04-04 18:35:27 +00:00 committed by GitHub
commit 08a33206c9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 149 additions and 145 deletions

View File

@ -124,12 +124,8 @@ void LatteTextureReadbackInfoVk::StartTransfer()
renderer->barrier_image<VulkanRenderer::ANY_TRANSFER | VulkanRenderer::IMAGE_WRITE, VulkanRenderer::TRANSFER_READ>(baseTexture, region.imageSubresource, VK_IMAGE_LAYOUT_GENERAL);
renderer->barrier_sequentializeTransfer();
vkCmdCopyImageToBuffer(renderer->getCurrentCommandBuffer(), baseTexture->GetImageObj()->m_image, VK_IMAGE_LAYOUT_GENERAL, m_buffer, 1, &region);
renderer->barrier_sequentializeTransfer();
renderer->barrier_image<VulkanRenderer::TRANSFER_READ, VulkanRenderer::ANY_TRANSFER | VulkanRenderer::IMAGE_WRITE>(baseTexture, region.imageSubresource, VK_IMAGE_LAYOUT_GENERAL); // make sure transfer is finished before image is modified
renderer->barrier_bufferRange<VulkanRenderer::TRANSFER_WRITE, VulkanRenderer::HOST_READ>(m_buffer, m_buffer_offset, m_image_size); // make sure transfer is finished before result is read

View File

@ -3222,6 +3222,14 @@ void VulkanRenderer::ProcessDestructionQueue()
m_spinlockDestructionQueue.unlock();
}
void VkDescriptorSetInfo::ForEachView(const std::function<void(LatteTextureViewVk*)>& fun)
{
for (auto& view : list_referencedViews)
{
fun(view);
}
}
VkDescriptorSetInfo::~VkDescriptorSetInfo()
{
for (auto& it : list_referencedViews)
@ -3701,9 +3709,9 @@ void VulkanRenderer::bufferCache_upload(uint8* buffer, sint32 size, uint32 buffe
vkMemAllocator.FlushReservation(uploadResv);
barrier_bufferRange<ANY_TRANSFER | HOST_WRITE, ANY_TRANSFER,
BUFFER_SHADER_READ, TRANSFER_WRITE>(
uploadResv.vkBuffer, uploadResv.bufferOffset, uploadResv.size, // make sure any in-flight transfers are completed
barrier_bufferRange<TRANSFER_WRITE | HOST_WRITE, TRANSFER_READ,
TRANSFER_READ | BUFFER_SHADER_READ, TRANSFER_WRITE>(
uploadResv.vkBuffer, uploadResv.bufferOffset, uploadResv.size, // make sure source data is visible
m_bufferCache, bufferOffset, size); // make sure all reads are completed before we overwrite the data
VkBufferCopy region;
@ -3712,7 +3720,7 @@ void VulkanRenderer::bufferCache_upload(uint8* buffer, sint32 size, uint32 buffe
region.size = size;
vkCmdCopyBuffer(m_state.currentCommandBuffer, uploadResv.vkBuffer, m_bufferCache, 1, &region);
barrier_sequentializeTransfer();
barrier_bufferRange<TRANSFER_WRITE, BUFFER_SHADER_READ>(m_bufferCache, bufferOffset, size);
}
void VulkanRenderer::bufferCache_copy(uint32 srcOffset, uint32 dstOffset, uint32 size)
@ -3720,7 +3728,10 @@ void VulkanRenderer::bufferCache_copy(uint32 srcOffset, uint32 dstOffset, uint32
cemu_assert_debug(!m_useHostMemoryForCache);
draw_endRenderPass();
barrier_sequentializeTransfer();
barrier_bufferRange<BUFFER_SHADER_WRITE | TRANSFER_WRITE, TRANSFER_READ,
TRANSFER_READ | BUFFER_SHADER_READ, TRANSFER_WRITE>(
m_bufferCache, srcOffset, size, // make sure source data is visible
m_bufferCache, dstOffset, size); // make sure all reads are completed before we overwrite the data
bool isOverlapping = (srcOffset + size) > dstOffset && (srcOffset) < (dstOffset + size);
cemu_assert_debug(!isOverlapping);
@ -3731,7 +3742,7 @@ void VulkanRenderer::bufferCache_copy(uint32 srcOffset, uint32 dstOffset, uint32
bufferCopy.size = size;
vkCmdCopyBuffer(m_state.currentCommandBuffer, m_bufferCache, m_bufferCache, 1, &bufferCopy);
barrier_sequentializeTransfer();
barrier_bufferRange<TRANSFER_WRITE, BUFFER_SHADER_READ>(m_bufferCache, dstOffset, size);
}
void VulkanRenderer::bufferCache_copyStreamoutToMainBuffer(uint32 srcOffset, uint32 dstOffset, uint32 size)
@ -3748,12 +3759,10 @@ void VulkanRenderer::bufferCache_copyStreamoutToMainBuffer(uint32 srcOffset, uin
else
dstBuffer = m_bufferCache;
barrier_bufferRange<BUFFER_SHADER_WRITE, TRANSFER_READ,
ANY_TRANSFER | BUFFER_SHADER_READ, TRANSFER_WRITE>(
m_xfbRingBuffer, srcOffset, size, // wait for all writes to finish
dstBuffer, dstOffset, size); // wait for all reads to finish
barrier_sequentializeTransfer();
barrier_bufferRange<BUFFER_SHADER_WRITE | TRANSFER_WRITE, TRANSFER_READ,
TRANSFER_READ | BUFFER_SHADER_READ, TRANSFER_WRITE>(
m_xfbRingBuffer, srcOffset, size, // make sure source data is visible
dstBuffer, dstOffset, size); // make sure all reads are completed before we overwrite the data
VkBufferCopy bufferCopy{};
bufferCopy.srcOffset = srcOffset;
@ -3761,7 +3770,7 @@ void VulkanRenderer::bufferCache_copyStreamoutToMainBuffer(uint32 srcOffset, uin
bufferCopy.size = size;
vkCmdCopyBuffer(m_state.currentCommandBuffer, m_xfbRingBuffer, dstBuffer, 1, &bufferCopy);
barrier_sequentializeTransfer();
barrier_bufferRange<TRANSFER_WRITE, BUFFER_SHADER_READ>(dstBuffer, dstOffset, size); // make sure writes are visible to host
}
void VulkanRenderer::AppendOverlayDebugInfo()

View File

@ -8,6 +8,7 @@
#include "Cafe/HW/Latte/Renderer/Vulkan/CachedFBOVk.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VKRMemoryManager.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/SwapchainInfoVk.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "util/math/vector2.h"
#include "util/helpers/Semaphore.h"
#include "util/containers/flat_hash_map.hpp"
@ -26,6 +27,8 @@ struct VkDescriptorSetInfo
{
VKRObjectDescriptorSet* m_vkObjDescriptorSet{};
void ForEachView(const std::function<void(LatteTextureViewVk*)>& fun);
~VkDescriptorSetInfo();
std::vector<LatteTextureViewVk*> list_referencedViews;
@ -409,7 +412,7 @@ private:
}
// invalidation / flushing
uint64 currentFlushIndex{0};
uint64 currentFlushIndex{1};
bool requestFlush{ false }; // flush after every draw operation. The renderpass dependencies dont handle dependencies across multiple drawcalls inside a single renderpass
// draw sequence
@ -546,7 +549,8 @@ private:
void draw_handleSpecialState5();
// draw synchronization helper
void sync_inputTexturesChanged();
void sync_performFlushBarrier(CachedFBOVk* fboVk);
bool sync_isInputTexturesSyncRequired();
void sync_RenderPassLoadTextures(CachedFBOVk* fboVk);
void sync_RenderPassStoreTextures(CachedFBOVk* fboVk);
@ -825,6 +829,7 @@ private:
bufMemBarrier.offset = offset;
bufMemBarrier.size = size;
vkCmdPipelineBarrier(m_state.currentCommandBuffer, srcStages, dstStages, 0, 0, nullptr, 1, &bufMemBarrier, 0, nullptr);
performanceMonitor.vk.numDrawBarriersPerFrame.increment();
}
template<uint32 TSrcSyncOpA, uint32 TDstSyncOpA, uint32 TSrcSyncOpB, uint32 TDstSyncOpB>
@ -863,32 +868,7 @@ private:
bufMemBarrier[1].size = sizeB;
vkCmdPipelineBarrier(m_state.currentCommandBuffer, srcStagesA|srcStagesB, dstStagesA|dstStagesB, 0, 0, nullptr, 2, bufMemBarrier, 0, nullptr);
}
void barrier_sequentializeTransfer()
{
VkMemoryBarrier memBarrier{};
memBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
memBarrier.pNext = nullptr;
VkPipelineStageFlags srcStages = VK_PIPELINE_STAGE_TRANSFER_BIT;
VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
memBarrier.dstAccessMask = 0;
memBarrier.srcAccessMask |= (VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT);
memBarrier.dstAccessMask |= (VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT);
vkCmdPipelineBarrier(m_state.currentCommandBuffer, srcStages, dstStages, 0, 1, &memBarrier, 0, nullptr, 0, nullptr);
}
void barrier_sequentializeCommand()
{
VkPipelineStageFlags srcStages = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
vkCmdPipelineBarrier(m_state.currentCommandBuffer, srcStages, dstStages, 0, 0, nullptr, 0, nullptr, 0, nullptr);
performanceMonitor.vk.numDrawBarriersPerFrame.increment();
}
template<uint32 TSrcSyncOp, uint32 TDstSyncOp>
@ -916,6 +896,7 @@ private:
0, NULL,
0, NULL,
1, &imageMemBarrier);
performanceMonitor.vk.numDrawBarriersPerFrame.increment();
}
template<uint32 TSrcSyncOp, uint32 TDstSyncOp>

View File

@ -975,127 +975,142 @@ VkDescriptorSetInfo* VulkanRenderer::draw_getOrCreateDescriptorSet(PipelineInfo*
return dsInfo;
}
void VulkanRenderer::sync_inputTexturesChanged()
void VulkanRenderer::sync_performFlushBarrier(CachedFBOVk* fboVk)
{
bool writeFlushRequired = false;
size_t barrierCount = 0;
VkImageMemoryBarrier imageMemBarriers[8 + 2 + LATTE_NUM_MAX_TEX_UNITS]{};
auto addImgMemBarrierForTexView = [&](LatteTextureViewVk* view) {
VkImageSubresourceRange range = {
view->GetBaseImage()->GetImageAspect(),
(uint32_t)view->firstMip,
(uint32_t)view->numMip,
(uint32_t)view->firstSlice,
(uint32_t)view->numSlice};
auto baseTex = (LatteTextureVk*)view->baseTexture;
const auto idx = barrierCount++;
imageMemBarriers[idx].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
imageMemBarriers[idx].image = baseTex->GetImageObj()->m_image;
imageMemBarriers[idx].subresourceRange = range;
imageMemBarriers[idx].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemBarriers[idx].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemBarriers[idx].oldLayout = VK_IMAGE_LAYOUT_GENERAL;
imageMemBarriers[idx].newLayout = VK_IMAGE_LAYOUT_GENERAL;
imageMemBarriers[idx].srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
imageMemBarriers[idx].srcAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
imageMemBarriers[idx].dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
imageMemBarriers[idx].dstAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
imageMemBarriers[idx].dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
view->GetBaseImage()->m_vkFlushIndex = m_state.currentFlushIndex;
};
for (auto& i : fboVk->colorBuffer)
{
if (!i.texture)
continue;
addImgMemBarrierForTexView(static_cast<LatteTextureViewVk*>(i.texture));
}
if (auto i = fboVk->depthBuffer.texture)
{
addImgMemBarrierForTexView(static_cast<LatteTextureViewVk*>(i));
}
if (m_state.activeVertexDS)
{
for (auto& tex : m_state.activeVertexDS->list_fboCandidates)
{
tex->m_vkFlushIndex_read = m_state.currentFlushIndex;
if (tex->m_vkFlushIndex_write == m_state.currentFlushIndex)
writeFlushRequired = true;
}
}
m_state.activeVertexDS->ForEachView(addImgMemBarrierForTexView);
if (m_state.activeGeometryDS)
{
for (auto& tex : m_state.activeGeometryDS->list_fboCandidates)
{
tex->m_vkFlushIndex_read = m_state.currentFlushIndex;
if (tex->m_vkFlushIndex_write == m_state.currentFlushIndex)
writeFlushRequired = true;
}
}
m_state.activeGeometryDS->ForEachView(addImgMemBarrierForTexView);
if (m_state.activePixelDS)
{
for (auto& tex : m_state.activePixelDS->list_fboCandidates)
{
tex->m_vkFlushIndex_read = m_state.currentFlushIndex;
if (tex->m_vkFlushIndex_write == m_state.currentFlushIndex)
writeFlushRequired = true;
}
}
// barrier here
if (writeFlushRequired)
{
VkMemoryBarrier memoryBarrier{};
memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
memoryBarrier.srcAccessMask = 0;
memoryBarrier.dstAccessMask = 0;
m_state.activePixelDS->ForEachView(addImgMemBarrierForTexView);
VkPipelineStageFlags srcStage = 0;
VkPipelineStageFlags dstStage = 0;
VkPipelineStageFlags stages = 0;
// src
srcStage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
memoryBarrier.srcAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
stages |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
stages |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
srcStage |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
memoryBarrier.srcAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
vkCmdPipelineBarrier(m_state.currentCommandBuffer, stages, stages, 0, 0, nullptr, 0, nullptr, barrierCount, imageMemBarriers);
// dst
dstStage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
memoryBarrier.dstAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT;
performanceMonitor.vk.numDrawBarriersPerFrame.increment();
dstStage |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
memoryBarrier.dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT;
m_state.currentFlushIndex++;
}
vkCmdPipelineBarrier(m_state.currentCommandBuffer, srcStage, dstStage, 0, 1, &memoryBarrier, 0, nullptr, 0, nullptr);
performanceMonitor.vk.numDrawBarriersPerFrame.increment();
m_state.currentFlushIndex++;
}
bool VulkanRenderer::sync_isInputTexturesSyncRequired()
{
bool required = false;
auto checkSync = [&](LatteTextureViewVk* texViewVk) {
LatteTextureVk* texVk = texViewVk->GetBaseImage();
texVk->m_vkFlushIndex_read = m_state.currentFlushIndex;
if (texVk->m_vkFlushIndex < texVk->m_vkFlushIndex_write)
required = true;
};
if (m_state.activeVertexDS)
m_state.activeVertexDS->ForEachView(checkSync);
if (m_state.activeGeometryDS)
m_state.activeGeometryDS->ForEachView(checkSync);
if (m_state.activePixelDS)
m_state.activePixelDS->ForEachView(checkSync);
return required;
}
void VulkanRenderer::sync_RenderPassLoadTextures(CachedFBOVk* fboVk)
{
bool readFlushRequired = false;
// always called after draw_inputTexturesChanged()
bool flushRequired = false;
auto checkImageSyncHazard = [&](LatteTextureVk* texVk, bool isWrite = false) {
//RAW / WAW
if (texVk->m_vkFlushIndex < texVk->m_vkFlushIndex_write)
flushRequired = true;
//WAR
if (isWrite && texVk->m_vkFlushIndex < texVk->m_vkFlushIndex_read)
flushRequired = true;
};
for (auto& tex : fboVk->GetTextures())
checkImageSyncHazard((LatteTextureVk*)tex, true);
auto checkViewSync = [&](LatteTextureViewVk* view) {
checkImageSyncHazard(view->GetBaseImage());
};
if (m_state.activeVertexDS)
m_state.activeVertexDS->ForEachView(checkViewSync);
if (m_state.activeGeometryDS)
m_state.activeGeometryDS->ForEachView(checkViewSync);
if (m_state.activePixelDS)
m_state.activePixelDS->ForEachView(checkViewSync);
if (flushRequired)
sync_performFlushBarrier(fboVk);
for (auto& tex : fboVk->GetTextures())
{
LatteTextureVk* texVk = (LatteTextureVk*)tex;
// write-before-write
if (texVk->m_vkFlushIndex_write == m_state.currentFlushIndex)
readFlushRequired = true;
texVk->m_vkFlushIndex_write = m_state.currentFlushIndex;
// todo - also check for write-before-write ?
if (texVk->m_vkFlushIndex_read == m_state.currentFlushIndex)
readFlushRequired = true;
texVk->m_vkFlushIndex_read = m_state.currentFlushIndex;
}
// barrier here
if (readFlushRequired)
{
VkMemoryBarrier memoryBarrier{};
memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
memoryBarrier.srcAccessMask = 0;
memoryBarrier.dstAccessMask = 0;
VkPipelineStageFlags srcStage = 0;
VkPipelineStageFlags dstStage = 0;
auto updateViewSync = [&](LatteTextureViewVk* view) {
view->GetBaseImage()->m_vkFlushIndex_read = m_state.currentFlushIndex;
};
// src
srcStage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
memoryBarrier.srcAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
if (m_state.activeVertexDS)
m_state.activeVertexDS->ForEachView(updateViewSync);
if (m_state.activeGeometryDS)
m_state.activeGeometryDS->ForEachView(updateViewSync);
if (m_state.activePixelDS)
m_state.activePixelDS->ForEachView(updateViewSync);
srcStage |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
memoryBarrier.srcAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
// dst
dstStage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
memoryBarrier.dstAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT;
dstStage |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
memoryBarrier.dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(m_state.currentCommandBuffer, srcStage, dstStage, 0, 1, &memoryBarrier, 0, nullptr, 0, nullptr);
performanceMonitor.vk.numDrawBarriersPerFrame.increment();
m_state.currentFlushIndex++;
}
}
void VulkanRenderer::sync_RenderPassStoreTextures(CachedFBOVk* fboVk)
{
uint32 flushIndex = m_state.currentFlushIndex;
for (auto& tex : fboVk->GetTextures())
{
LatteTextureVk* texVk = (LatteTextureVk*)tex;
texVk->m_vkFlushIndex_write = flushIndex;
texVk->m_vkFlushIndex_write = m_state.currentFlushIndex;
}
}
@ -1164,22 +1179,24 @@ void VulkanRenderer::draw_setRenderPass()
auto vkObjRenderPass = fboVk->GetRenderPassObj();
auto vkObjFramebuffer = fboVk->GetFramebufferObj();
bool overridePassReuse = m_state.hasRenderSelfDependency && (GetConfig().vk_accurate_barriers || m_state.activePipelineInfo->neverSkipAccurateBarrier);
const bool syncSkipAllowed = !m_state.hasRenderSelfDependency || !(GetConfig().vk_accurate_barriers || m_state.activePipelineInfo->neverSkipAccurateBarrier);
if (!overridePassReuse && m_state.activeRenderpassFBO == fboVk)
const bool inputSyncNecessary = m_state.descriptorSetsChanged && sync_isInputTexturesSyncRequired();
const bool FBOChanged = m_state.activeRenderpassFBO != fboVk;
const bool passReusable = !FBOChanged && !inputSyncNecessary && syncSkipAllowed;
if (passReusable)
{
if (m_state.descriptorSetsChanged)
sync_inputTexturesChanged();
// reuse previous render pass
return;
}
draw_endRenderPass();
if (m_state.descriptorSetsChanged)
sync_inputTexturesChanged();
// assume that FBO changed, update self-dependency state
m_state.hasRenderSelfDependency = fboVk->CheckForCollision(m_state.activeVertexDS, m_state.activeGeometryDS, m_state.activePixelDS);
sync_RenderPassLoadTextures(fboVk);
if (sync_isInputTexturesSyncRequired())
sync_performFlushBarrier(fboVk);
if (m_featureControl.deviceExtensions.dynamic_rendering)
{
@ -1213,6 +1230,7 @@ void VulkanRenderer::draw_endRenderPass()
{
if (!m_state.activeRenderpassFBO)
return;
if (m_featureControl.deviceExtensions.dynamic_rendering)
vkCmdEndRenderingKHR(m_state.currentCommandBuffer);
else