Merge branch 'master' into master

This commit is contained in:
Kravickas 2026-04-08 07:54:53 +02:00 committed by GitHub
commit 18d0f53b21
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 154 additions and 68 deletions

View File

@ -4357,16 +4357,6 @@ public:
{
switch (op.ra)
{
case SPU_WrOutMbox:
{
res.value = wait_rchcnt(::offset32(&spu_thread::ch_out_mbox), true);
break;
}
case SPU_WrOutIntrMbox:
{
res.value = wait_rchcnt(::offset32(&spu_thread::ch_out_intr_mbox), true);
break;
}
case SPU_RdSigNotify1:
{
res.value = wait_rchcnt(::offset32(&spu_thread::ch_snr1));

View File

@ -1731,24 +1731,34 @@ namespace rsx
}
case deferred_request_command::cubemap_unwrap:
{
rsx::simple_array<copy_region_descriptor> sections(6);
for (u16 n = 0; n < 6; ++n)
rsx::simple_array<copy_region_descriptor> sections(6 * desc.mipmaps);
for (u16 n = 0, section_id = 0; n < 6; ++n)
{
sections[n] =
u16 mip_w = desc.width, mip_h = desc.height;
u16 y_offset = static_cast<u16>(desc.slice_h * n);
for (u8 mip = 0; mip < desc.mipmaps; ++mip)
{
.src = desc.external_handle,
.xform = surface_transform::coordinate_transform,
.level = 0,
.src_x = 0,
.src_y = static_cast<u16>(desc.slice_h * n),
.dst_x = 0,
.dst_y = 0,
.dst_z = n,
.src_w = desc.width,
.src_h = desc.height,
.dst_w = desc.width,
.dst_h = desc.height
};
sections[section_id++] =
{
.src = desc.external_handle,
.xform = surface_transform::coordinate_transform,
.level = mip,
.src_x = 0,
.src_y = y_offset,
.dst_x = 0,
.dst_y = 0,
.dst_z = n,
.src_w = mip_w,
.src_h = mip_h,
.dst_w = mip_w,
.dst_h = mip_h
};
y_offset += mip_h;
mip_w = std::max<u16>(mip_w / 2, 1);
mip_h = std::max<u16>(mip_h / 2, 1);
}
}
result = generate_cubemap_from_images(cmd, desc.gcm_format, desc.width, sections, desc.remap);

View File

@ -384,7 +384,21 @@ void GLGSRender::load_texture_env()
}
}
m_fs_sampler_states[i].apply(tex, fs_sampler_state[i].get());
u32 actual_mipcount = 1;
if (sampler_state->upload_context == rsx::texture_upload_context::shader_read)
{
actual_mipcount = tex.get_exact_mipmap_count();
}
else if (sampler_state->external_subresource_desc.op == rsx::deferred_request_command::mipmap_gather)
{
actual_mipcount = sampler_state->external_subresource_desc.sections_to_copy.size();
}
else if (sampler_state->external_subresource_desc.op == rsx::deferred_request_command::cubemap_unwrap)
{
actual_mipcount = sampler_state->external_subresource_desc.mipmaps;
}
m_fs_sampler_states[i].apply(tex, fs_sampler_state[i].get(), actual_mipcount > 1);
const auto texture_format = sampler_state->format_ex.format();
// Depth format redirected to BGRA8 resample stage. Do not filter to avoid bits leaking.

View File

@ -586,7 +586,8 @@ namespace gl
gl::texture_view* generate_cubemap_from_images(gl::command_context& cmd, u32 gcm_format, u16 size, const rsx::simple_array<copy_region_descriptor>& sources, const rsx::texture_channel_remap_t& remap_vector) override
{
auto _template = get_template_from_collection_impl(sources);
auto result = create_temporary_subresource_impl(cmd, _template, GL_NONE, GL_TEXTURE_CUBE_MAP, gcm_format, 0, 0, size, size, 1, 1, remap_vector, false);
const u8 mip_count = 1 + sources.reduce(0, FN(std::max<u8>(x, y.level)));
auto result = create_temporary_subresource_impl(cmd, _template, GL_NONE, GL_TEXTURE_CUBE_MAP, gcm_format, 0, 0, size, size, 1, mip_count, remap_vector, false);
copy_transfer_regions_impl(cmd, result->image(), sources);
return result;

View File

@ -72,7 +72,7 @@ namespace gl
}
// Apply sampler state settings
void sampler_state::apply(const rsx::fragment_texture& tex, const rsx::sampled_image_descriptor_base* sampled_image)
void sampler_state::apply(const rsx::fragment_texture& tex, const rsx::sampled_image_descriptor_base* sampled_image, bool allow_mipmaps)
{
set_parameteri(GL_TEXTURE_WRAP_S, wrap_mode(tex.wrap_s()));
set_parameteri(GL_TEXTURE_WRAP_T, wrap_mode(tex.wrap_t()));
@ -114,8 +114,7 @@ namespace gl
}
}
if (sampled_image->upload_context != rsx::texture_upload_context::shader_read ||
tex.get_exact_mipmap_count() == 1)
if (!allow_mipmaps || tex.get_exact_mipmap_count() == 1)
{
GLint min_filter = tex_min_filter(tex.min_filter());

View File

@ -75,7 +75,7 @@ namespace gl
return (prop == m_propertiesf.end()) ? 0 : prop->second;
}
void apply(const rsx::fragment_texture& tex, const rsx::sampled_image_descriptor_base* sampled_image);
void apply(const rsx::fragment_texture& tex, const rsx::sampled_image_descriptor_base* sampled_image, bool allow_mipmaps = true);
void apply(const rsx::vertex_texture& tex, const rsx::sampled_image_descriptor_base* sampled_image);
void apply_defaults(GLenum default_filter = GL_NEAREST);

View File

@ -471,6 +471,10 @@ void VKGSRender::load_texture_env()
// Clamp min and max lod
actual_mipmaps = static_cast<f32>(sampler_state->external_subresource_desc.sections_to_copy.size());
}
else if (sampler_state->external_subresource_desc.op == rsx::deferred_request_command::cubemap_unwrap)
{
actual_mipmaps = static_cast<f32>(sampler_state->external_subresource_desc.mipmaps);
}
else
{
actual_mipmaps = 1.f;

View File

@ -2115,7 +2115,7 @@ void VKGSRender::load_program_env()
if (vk::emulate_conditional_rendering())
{
const vk::buffer& predicate = m_cond_render_buffer ? *m_cond_render_buffer : *vk::get_scratch_buffer(*m_current_command_buffer, 4);
const vk::buffer& predicate = m_cond_render_buffer ? *m_cond_render_buffer : *vk::get_scratch_buffer(*m_current_command_buffer, 4, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_ACCESS_NONE);
const u32 offset = cond_render_ctrl.hw_cond_active ? 0 : 4;
m_program->bind_uniform({ predicate, offset, 4 }, vk::glsl::binding_set_index_vertex, m_vs_binding_table->cr_pred_buffer_location);
}
@ -2910,7 +2910,7 @@ void VKGSRender::begin_conditional_rendering(const std::vector<rsx::reports::occ
else if (num_hw_queries > 0)
{
// We'll need to do some result aggregation using a compute shader.
auto scratch = vk::get_scratch_buffer(*m_current_command_buffer, num_hw_queries * 4);
vk::buffer* scratch = nullptr;
// Range latching. Because of how the query pool manages allocations using a stack, we get an inverse sequential set of handles/indices that we can easily group together.
// This drastically boosts performance on some drivers like the NVIDIA proprietary one that seems to have a rather high cost for every individual query transer command.
@ -2918,6 +2918,11 @@ void VKGSRender::begin_conditional_rendering(const std::vector<rsx::reports::occ
auto copy_query_range_impl = [&]()
{
if (!scratch)
{
scratch = vk::get_scratch_buffer(*m_current_command_buffer, num_hw_queries * 4, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT);
}
const auto count = (query_range.last - query_range.first + 1);
m_occlusion_query_manager->get_query_result_indirect(*m_current_command_buffer, query_range.first, count, scratch->value, dst_offset);
dst_offset += count * 4;
@ -2964,7 +2969,7 @@ void VKGSRender::begin_conditional_rendering(const std::vector<rsx::reports::occ
}
// Sanity check
ensure(dst_offset <= scratch->size());
ensure(scratch && dst_offset <= scratch->size());
if (!partial_eval)
{

View File

@ -71,10 +71,11 @@ namespace vk
enum image_upload_options
{
upload_contents_async = 1,
initialize_image_layout = 2,
preserve_image_layout = 4,
source_is_gpu_resident = 8,
upload_contents_async = 0x0001,
initialize_image_layout = 0x0002,
preserve_image_layout = 0x0004,
source_is_gpu_resident = 0x0008,
source_is_userptr = 0x0010,
// meta-flags
upload_contents_inline = 0,

View File

@ -724,6 +724,7 @@ namespace vk
subres.height_in_block
);
subres.data = std::span(ext_data);
upload_flags |= source_is_userptr;
#else
const auto [scratch_buf, linear_data_scratch_offset] = vk::detile_memory_block(cmd, tiled_region, range, subres.width_in_block, subres.height_in_block, get_bpp());

View File

@ -598,7 +598,7 @@ namespace vk
const auto transfer_size = surface->get_memory_range().length();
if (transfer_size > max_copy_length || src_offset_in_buffer || surface->is_depth_surface())
{
auto scratch = vk::get_scratch_buffer(cmd, transfer_size * 4);
auto scratch = vk::get_scratch_buffer(cmd, transfer_size * 4, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT);
dest = scratch;
}

View File

@ -376,7 +376,7 @@ namespace vk
const auto min_scratch_size = calculate_working_buffer_size(src_length, src->aspect() | dst->aspect());
// Initialize scratch memory
auto scratch_buf = vk::get_scratch_buffer(cmd, min_scratch_size);
auto scratch_buf = vk::get_scratch_buffer(cmd, min_scratch_size, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT);
for (u32 mip_level = 0; mip_level < mipmaps; ++mip_level)
{
@ -601,7 +601,7 @@ namespace vk
const auto dst_w = dst_rect.width();
const auto dst_h = dst_rect.height();
auto scratch_buf = vk::get_scratch_buffer(cmd, std::max(src_w, dst_w) * std::max(src_h, dst_h) * 4);
auto scratch_buf = vk::get_scratch_buffer(cmd, std::max(src_w, dst_w) * std::max(src_h, dst_h) * 4, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT);
//1. Copy unscaled to typeless surface
VkBufferImageCopy info{};
@ -990,7 +990,7 @@ namespace vk
auto pdev = vk::get_current_renderer();
rsx::texture_uploader_capabilities caps{ .supports_dxt = pdev->get_texture_compression_bc_support(), .alignment = heap_align };
rsx::texture_memory_info opt{};
bool check_caps = true;
bool check_hw_caps = !(image_setup_flags & source_is_userptr);
vk::buffer* scratch_buf = nullptr;
u32 scratch_offset = 0;
@ -1015,13 +1015,13 @@ namespace vk
image_linear_size = row_pitch * layout.depth * (rsx::is_compressed_host_format(caps, format) ? layout.height_in_block : layout.height_in_texel);
// Only do GPU-side conversion if occupancy is good
if (check_caps)
if (check_hw_caps)
{
caps.supports_byteswap = (image_linear_size >= 1024) || (image_setup_flags & source_is_gpu_resident);
caps.supports_hw_deswizzle = caps.supports_byteswap;
caps.supports_zero_copy = caps.supports_byteswap;
caps.supports_vtc_decoding = false;
check_caps = false;
check_hw_caps = false;
}
auto buf_allocator = [&](usz) -> std::tuple<void*, usz>
@ -1124,7 +1124,7 @@ namespace vk
scratch_buf_size += (image_linear_size * 5) / 4;
}
scratch_buf = vk::get_scratch_buffer(cmd2, scratch_buf_size);
scratch_buf = vk::get_scratch_buffer(cmd2, scratch_buf_size, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT);
buffer_copies.reserve(subresource_layout.size());
}
@ -1183,13 +1183,6 @@ namespace vk
{
ensure(scratch_buf);
// WAW hazard - complete previous work before executing any transfers
insert_buffer_memory_barrier(
cmd2, scratch_buf->value, 0, scratch_offset,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT);
if (upload_commands.size() > 1)
{
auto range_ptr = buffer_copies.data();
@ -1199,8 +1192,9 @@ namespace vk
range_ptr += op.second;
}
}
else if (!buffer_copies.empty())
else
{
ensure(!buffer_copies.empty());
vkCmdCopyBuffer(cmd2, upload_buffer->value, scratch_buf->value, static_cast<u32>(buffer_copies.size()), buffer_copies.data());
}
@ -1279,7 +1273,10 @@ namespace vk
vk::load_dma(range.start, section_length);
// Allocate scratch and prepare for the GPU job
const auto scratch_buf = vk::get_scratch_buffer(cmd, section_length * 3); // 0 = linear data, 1 = padding (deswz), 2 = tiled data
const auto scratch_buf = vk::get_scratch_buffer(cmd, section_length * 3, // 0 = linear data, 1 = padding (deswz), 2 = tiled data
VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT);
const auto tiled_data_scratch_offset = section_length * 2;
const auto linear_data_scratch_offset = 0u;
@ -1313,16 +1310,16 @@ namespace vk
};
vkCmdCopyBuffer(cmd, dma_mapping.second->value, scratch_buf->value, 1, &copy_rgn);
// Barrier
// Post-Transfer barrier
vk::insert_buffer_memory_barrier(
cmd, scratch_buf->value, linear_data_scratch_offset, section_length,
cmd, scratch_buf->value, tiled_data_scratch_offset, section_length,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
// Detile
vk::get_compute_task<vk::cs_tile_memcpy<RSX_detiler_op::decode>>()->run(cmd, config);
// Barrier
// Post-Compute barrier
vk::insert_buffer_memory_barrier(
cmd, scratch_buf->value, linear_data_scratch_offset, static_cast<u32>(width) * height * bpp,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,

View File

@ -100,7 +100,7 @@ namespace vk
auto dma_sync_region = valid_range;
dma_mapping_handle dma_mapping = { 0, nullptr };
auto dma_sync = [&dma_sync_region, &dma_mapping](bool load, bool force = false)
auto dma_sync = [&](bool load, bool force = false)
{
if (dma_mapping.second && !force)
{
@ -130,9 +130,10 @@ namespace vk
dma_sync_region = tiled_region.tile_align(dma_sync_region);
}
#endif
auto working_buffer = vk::get_scratch_buffer(cmd, working_buffer_length);
u32 result_offset = 0;
auto working_buffer = vk::get_scratch_buffer(cmd, working_buffer_length,
VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT);
VkBufferImageCopy region = {};
region.imageSubresource = { src->aspect(), 0, 0, 1 };
@ -220,7 +221,7 @@ namespace vk
// Transfer -> Compute barrier
vk::insert_buffer_memory_barrier(cmd, working_buffer->value, dst_offset, dma_sync_region.length(),
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT);
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT);
}
// Prepare payload
@ -284,8 +285,10 @@ namespace vk
if (require_rw_barrier)
{
vk::insert_buffer_memory_barrier(cmd, working_buffer->value, result_offset, dma_sync_region.length(),
VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT);
VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT);
}
if (rsx_pitch == real_pitch) [[likely]]
@ -332,6 +335,14 @@ namespace vk
vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, dma_mapping.second->value, 1, &region);
}
// Post-transfer barrier on dma layer
vk::insert_buffer_memory_barrier(
cmd, dma_mapping.second->value,
dma_mapping.first, dma_sync_region.length(),
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT
);
src->pop_layout(cmd);
VkBufferMemoryBarrier2KHR mem_barrier =
@ -761,8 +772,9 @@ namespace vk
const rsx::simple_array<copy_region_descriptor>& sections_to_copy, const rsx::texture_channel_remap_t& remap_vector)
{
auto _template = get_template_from_collection_impl(sections_to_copy);
const u8 mip_count = 1 + sections_to_copy.reduce(0, FN(std::max<u8>(x, y.level)));
auto result = create_temporary_subresource_view_impl(cmd, _template, VK_IMAGE_TYPE_2D,
VK_IMAGE_VIEW_TYPE_CUBE, gcm_format, 0, 0, size, size, 1, 1, remap_vector, false);
VK_IMAGE_VIEW_TYPE_CUBE, gcm_format, 0, 0, size, size, 1, mip_count, remap_vector, false);
if (!result)
{
@ -772,7 +784,7 @@ namespace vk
const auto image = result->image();
VkImageAspectFlags dst_aspect = vk::get_aspect_flags(result->info.format);
VkImageSubresourceRange dst_range = { dst_aspect, 0, 1, 0, 6 };
VkImageSubresourceRange dst_range = { dst_aspect, 0, mip_count, 0, 6 };
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, dst_range);
if (!(dst_aspect & VK_IMAGE_ASPECT_DEPTH_BIT))
@ -786,6 +798,14 @@ namespace vk
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
vk::insert_image_memory_barrier(
cmd,
image->handle(),
image->current_layout, image->current_layout,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
dst_range);
copy_transfer_regions_impl(cmd, image, sections_to_copy);
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, dst_range);
@ -821,6 +841,14 @@ namespace vk
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
vk::insert_image_memory_barrier(
cmd,
image->handle(),
image->current_layout, image->current_layout,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
dst_range);
copy_transfer_regions_impl(cmd, image, sections_to_copy);
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, dst_range);
@ -859,6 +887,14 @@ namespace vk
}
}
vk::insert_image_memory_barrier(
cmd,
image->handle(),
image->current_layout, image->current_layout,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
dst_range);
copy_transfer_regions_impl(cmd, image, sections_to_copy);
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, dst_range);
@ -895,6 +931,14 @@ namespace vk
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &dst_range);
}
vk::insert_image_memory_barrier(
cmd,
image->handle(),
image->current_layout, image->current_layout,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
dst_range);
copy_transfer_regions_impl(cmd, image, sections_to_copy);
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, dst_range);
@ -1015,6 +1059,14 @@ namespace vk
VkClearDepthStencilValue clear{ 1.f, 255 };
vkCmdClearDepthStencilImage(cmd, image->value, image->current_layout, &clear, 1, &range);
}
vk::insert_image_memory_barrier(
cmd,
image->handle(),
image->current_layout, image->current_layout,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
range);
}
}
}

View File

@ -177,7 +177,7 @@ namespace vk
return { scratch_buffer.get(), is_new };
}
vk::buffer* get_scratch_buffer(const vk::command_buffer& cmd, u64 min_required_size, bool zero_memory)
vk::buffer* get_scratch_buffer(const vk::command_buffer& cmd, u64 min_required_size, VkPipelineStageFlags dst_stage_flags, VkAccessFlags dst_access, bool zero_memory)
{
const auto [buf, init_mem] = get_scratch_buffer(cmd.get_queue_family(), min_required_size);
@ -191,6 +191,12 @@ namespace vk
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT);
}
else if (dst_access != VK_ACCESS_NONE)
{
insert_buffer_memory_barrier(cmd, buf->value, 0, min_required_size,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_flags,
VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, dst_access);
}
return buf;
}

View File

@ -6,7 +6,13 @@ namespace vk
VkSampler null_sampler();
image_view* null_image_view(const command_buffer& cmd, VkImageViewType type);
image* get_typeless_helper(VkFormat format, rsx::format_class format_class, u32 requested_width, u32 requested_height);
buffer* get_scratch_buffer(const command_buffer& cmd, u64 min_required_size, bool zero_memory = false);
buffer* get_scratch_buffer(
const command_buffer& cmd,
u64 min_required_size,
VkPipelineStageFlags dst_stage_flags,
VkAccessFlags dst_access,
bool zero_memory = false);
void clear_scratch_resources();
}