Merge branch 'master' into guigui

This commit is contained in:
Megamouse 2026-04-06 22:01:57 +02:00 committed by GitHub
commit bf51e3b61f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 1241 additions and 1615 deletions

2
3rdparty/FAudio vendored

@ -1 +1 @@
Subproject commit dc034fc671b07bbd14e8410d5dd6be6da38fdf6d
Subproject commit 0372329dbb56e7814d0dea7b6eafa7a613bd8042

@ -1 +1 @@
Subproject commit 3982730833b6daefe77dcfb32b5c282851640c17
Subproject commit a0fba77b6f9cfbdb71f8bbec58b6ac4e5e3b1097

@ -1 +1 @@
Subproject commit 683181b47cfabd293e3ea409f838915b8297a4fd
Subproject commit 5848e584a1b606de26e3dbd1c7e4ecbc34f807a6

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,8 @@
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "cellPamf.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Utilities/BitField.h"
// Error Codes
enum CellDmuxError :u32
@ -18,6 +19,10 @@ enum CellDmuxStreamType : s32
CELL_DMUX_STREAM_TYPE_UNDEF = 0,
CELL_DMUX_STREAM_TYPE_PAMF = 1,
CELL_DMUX_STREAM_TYPE_TERMINATOR = 2,
// Only used in cellSail
CELL_DMUX_STREAM_TYPE_MP4 = 0x81,
CELL_DMUX_STREAM_TYPE_AVI = 0x82
};
enum CellDmuxMsgType : s32
@ -48,13 +53,14 @@ struct CellDmuxEsMsg
struct CellDmuxType
{
be_t<s32> streamType; // CellDmuxStreamType
be_t<u32> reserved[2];
be_t<s32> reserved1;
be_t<s32> reserved2;
};
struct CellDmuxType2
{
be_t<s32> streamType; // CellDmuxStreamType
be_t<u32> streamSpecificInfo;
be_t<s32> streamType;
vm::bcptr<void> streamSpecificInfo;
};
struct CellDmuxResource
@ -73,8 +79,8 @@ struct CellDmuxResourceEx
be_t<u32> memSize;
be_t<u32> ppuThreadPriority;
be_t<u32> ppuThreadStackSize;
be_t<u32> spurs_addr;
u8 priority[8];
vm::bptr<void> spurs; // CellSpurs*
be_t<u64, 1> priority;
be_t<u32> maxContention;
};
@ -85,33 +91,23 @@ struct CellDmuxResourceSpurs
be_t<u32> maxContention;
};
/*
struct CellDmuxResource2Ex
{
b8 isResourceEx; //true
CellDmuxResourceEx resourceEx;
};
struct CellDmuxResource2NoEx
{
b8 isResourceEx; //false
CellDmuxResource resource;
};
*/
struct CellDmuxResource2
{
b8 isResourceEx;
be_t<u32> memAddr;
be_t<u32> memSize;
be_t<u32> ppuThreadPriority;
be_t<u32> ppuThreadStackSize;
be_t<u32> shit[4];
union
{
CellDmuxResource resource;
CellDmuxResourceEx resourceEx;
};
};
using CellDmuxCbMsg = u32(u32 demuxerHandle, vm::cptr<CellDmuxMsg> demuxerMsg, vm::ptr<void> cbArg);
struct DmuxContext;
struct DmuxEsContext;
using CellDmuxCbEsMsg = u32(u32 demuxerHandle, u32 esHandle, vm::cptr<CellDmuxEsMsg> esMsg, vm::ptr<void> cbArg);
using CellDmuxCbMsg = u32(vm::ptr<DmuxContext> demuxerHandle, vm::cptr<CellDmuxMsg> demuxerMsg, vm::ptr<void> cbArg);
using CellDmuxCbEsMsg = u32(vm::ptr<DmuxContext> demuxerHandle, vm::ptr<DmuxEsContext> esHandle, vm::cptr<CellDmuxEsMsg> esMsg, vm::ptr<void> cbArg);
// Used for internal callbacks as well
template <typename F>
@ -177,6 +173,70 @@ struct DmuxAuInfo
be_t<u32> specific_info_size;
};
struct DmuxAuQueueElement
{
be_t<u32> index;
u8 unk; // unused
DmuxAuInfo au_info;
};
CHECK_SIZE(DmuxAuQueueElement, 0x38);
enum DmuxState : u32
{
DMUX_STOPPED = 1 << 0,
DMUX_RUNNING = 1 << 1,
};
struct alignas(0x10) DmuxContext // CellDmuxHandle = DmuxContext*
{
vm::bptr<DmuxContext> _this;
be_t<u32> _this_size;
be_t<u32> version;
be_t<u32> dmux_state;
CellDmuxType dmux_type;
CellDmuxCb dmux_cb;
b8 stream_is_set;
vm::bptr<void> core_handle;
be_t<u32> version_; // Same value as 'version'
be_t<u64> user_data;
be_t<s32> max_enabled_es_num;
be_t<s32> enabled_es_num;
be_t<u32> _dx_mhd; // sys_mutex_t
u8 reserved[0x7c];
};
CHECK_SIZE_ALIGN(DmuxContext, 0xc0, 0x10);
struct alignas(0x10) DmuxEsContext // CellDmuxEsHandle = DmuxEsContext*
{
be_t<u32> _dx_mes; // sys_mutex_t
be_t<u32> is_enabled;
be_t<u32> error_mem_size;
be_t<u32> error_count;
vm::bptr<void> error_mem_addr;
vm::bptr<DmuxEsContext> _this;
be_t<u32> _this_size;
be_t<s32> _this_index;
vm::bptr<DmuxContext> dmux_handle;
CellDmuxEsCb es_cb;
vm::bptr<void> core_es_handle;
bf_t<be_t<u32>, 0, 1> flush_started;
struct
{
be_t<s32> max_size;
be_t<s32> allocated_size;
be_t<s32> size;
be_t<s32> front;
be_t<s32> back;
be_t<s32> allocated_back;
}
au_queue;
};
CHECK_SIZE_ALIGN(DmuxEsContext, 0x50, 0x10);
using DmuxNotifyDemuxDone = error_code(vm::ptr<void>, u32, vm::ptr<void>);
using DmuxNotifyFatalErr = error_code(vm::ptr<void>, u32, vm::ptr<void>);
using DmuxNotifyProgEndCode = error_code(vm::ptr<void>, vm::ptr<void>);
@ -194,10 +254,10 @@ using CellDmuxCoreOpSetStream = error_code(vm::ptr<void>, vm::cptr<void>, u32, b
using CellDmuxCoreOpReleaseAu = error_code(vm::ptr<void>, vm::ptr<void>, u32);
using CellDmuxCoreOpQueryEsAttr = error_code(vm::cptr<void>, vm::cptr<void>, vm::ptr<CellDmuxPamfEsAttr>);
using CellDmuxCoreOpEnableEs = error_code(vm::ptr<void>, vm::cptr<void>, vm::cptr<CellDmuxEsResource>, vm::cptr<DmuxCb<DmuxEsNotifyAuFound>>, vm::cptr<DmuxCb<DmuxEsNotifyFlushDone>>, vm::cptr<void>, vm::pptr<void>);
using CellDmuxCoreOpDisableEs = u32(vm::ptr<void>);
using CellDmuxCoreOpFlushEs = u32(vm::ptr<void>);
using CellDmuxCoreOpResetEs = u32(vm::ptr<void>);
using CellDmuxCoreOpResetStreamAndWaitDone = u32(vm::ptr<void>);
using CellDmuxCoreOpDisableEs = error_code(vm::ptr<void>);
using CellDmuxCoreOpFlushEs = error_code(vm::ptr<void>);
using CellDmuxCoreOpResetEs = error_code(vm::ptr<void>);
using CellDmuxCoreOpResetStreamAndWaitDone = error_code(vm::ptr<void>);
struct CellDmuxCoreOps
{

View File

@ -2591,7 +2591,7 @@ template <bool raw_es>
error_code _CellDmuxCoreOpEnableEs(ppu_thread& ppu, vm::ptr<CellDmuxPamfHandle> handle, vm::cptr<void> esFilterId, vm::cptr<CellDmuxEsResource> esResource, vm::cptr<DmuxCb<DmuxEsNotifyAuFound>> notifyAuFound,
vm::cptr<DmuxCb<DmuxEsNotifyFlushDone>> notifyFlushDone, vm::cptr<void> esSpecificInfo, vm::pptr<CellDmuxPamfEsHandle> esHandle)
{
cellDmuxPamf.notice("_CellDmuxCoreOpEnableEs<raw_es=%d>(handle=*0x%x, esFilterId=*0x%x, esResource=*0x%x, notifyAuFound=*0x%x, notifyFlushDone=*0x%x, esSpecificInfo=*0x%x, esHandle)",
cellDmuxPamf.notice("_CellDmuxCoreOpEnableEs<raw_es=%d>(handle=*0x%x, esFilterId=*0x%x, esResource=*0x%x, notifyAuFound=*0x%x, notifyFlushDone=*0x%x, esSpecificInfo=*0x%x, esHandle=**0x%x)",
raw_es, handle, esFilterId, esResource, notifyAuFound, notifyFlushDone, esSpecificInfo, esHandle);
if (!handle || !esFilterId || !esResource || !esResource->memAddr || esResource->memSize == 0u || !notifyAuFound || !notifyAuFound->cbFunc || !notifyAuFound->cbArg || !notifyFlushDone || !notifyFlushDone->cbFunc || !notifyFlushDone->cbArg)

View File

@ -5,14 +5,6 @@
#include <bitset>
#include "cellPamf.h"
const std::function<bool()> SQUEUE_ALWAYS_EXIT = []() { return true; };
const std::function<bool()> SQUEUE_NEVER_EXIT = []() { return false; };
bool squeue_test_exit()
{
return Emu.IsStopped();
}
LOG_CHANNEL(cellPamf);
template<>

View File

@ -595,345 +595,3 @@ struct CellPamfReader
CHECK_SIZE(CellPamfReader, 128);
error_code cellPamfReaderInitialize(vm::ptr<CellPamfReader> pSelf, vm::cptr<PamfHeader> pAddr, u64 fileSize, u32 attribute);
#include <mutex>
#include <condition_variable>
extern const std::function<bool()> SQUEUE_ALWAYS_EXIT;
extern const std::function<bool()> SQUEUE_NEVER_EXIT;
bool squeue_test_exit();
// TODO: eliminate this boolshit
template<typename T, u32 sq_size = 256>
class squeue_t
{
struct squeue_sync_var_t
{
struct
{
u32 position : 31;
u32 pop_lock : 1;
};
struct
{
u32 count : 31;
u32 push_lock : 1;
};
};
atomic_t<squeue_sync_var_t> m_sync;
mutable std::mutex m_rcv_mutex;
mutable std::mutex m_wcv_mutex;
mutable std::condition_variable m_rcv;
mutable std::condition_variable m_wcv;
T m_data[sq_size];
enum squeue_sync_var_result : u32
{
SQSVR_OK = 0,
SQSVR_LOCKED = 1,
SQSVR_FAILED = 2,
};
public:
squeue_t()
: m_sync(squeue_sync_var_t{})
{
}
static u32 get_max_size()
{
return sq_size;
}
bool is_full() const
{
return m_sync.load().count == sq_size;
}
bool push(const T& data, const std::function<bool()>& test_exit)
{
u32 pos = 0;
while (u32 res = m_sync.atomic_op([&pos](squeue_sync_var_t& sync) -> u32
{
ensure(sync.count <= sq_size);
ensure(sync.position < sq_size);
if (sync.push_lock)
{
return SQSVR_LOCKED;
}
if (sync.count == sq_size)
{
return SQSVR_FAILED;
}
sync.push_lock = 1;
pos = sync.position + sync.count;
return SQSVR_OK;
}))
{
if (res == SQSVR_FAILED && (test_exit() || squeue_test_exit()))
{
return false;
}
std::unique_lock<std::mutex> wcv_lock(m_wcv_mutex);
m_wcv.wait_for(wcv_lock, std::chrono::milliseconds(1));
}
m_data[pos >= sq_size ? pos - sq_size : pos] = data;
m_sync.atomic_op([](squeue_sync_var_t& sync)
{
ensure(sync.count <= sq_size);
ensure(sync.position < sq_size);
ensure(!!sync.push_lock);
sync.push_lock = 0;
sync.count++;
});
m_rcv.notify_one();
m_wcv.notify_one();
return true;
}
bool push(const T& data, const volatile bool* do_exit)
{
return push(data, [do_exit]() { return do_exit && *do_exit; });
}
bool push(const T& data)
{
return push(data, SQUEUE_NEVER_EXIT);
}
bool try_push(const T& data)
{
return push(data, SQUEUE_ALWAYS_EXIT);
}
bool pop(T& data, const std::function<bool()>& test_exit)
{
u32 pos = 0;
while (u32 res = m_sync.atomic_op([&pos](squeue_sync_var_t& sync) -> u32
{
ensure(sync.count <= sq_size);
ensure(sync.position < sq_size);
if (!sync.count)
{
return SQSVR_FAILED;
}
if (sync.pop_lock)
{
return SQSVR_LOCKED;
}
sync.pop_lock = 1;
pos = sync.position;
return SQSVR_OK;
}))
{
if (res == SQSVR_FAILED && (test_exit() || squeue_test_exit()))
{
return false;
}
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
}
data = m_data[pos];
m_sync.atomic_op([](squeue_sync_var_t& sync)
{
ensure(sync.count <= sq_size);
ensure(sync.position < sq_size);
ensure(!!sync.pop_lock);
sync.pop_lock = 0;
sync.position++;
sync.count--;
if (sync.position == sq_size)
{
sync.position = 0;
}
});
m_rcv.notify_one();
m_wcv.notify_one();
return true;
}
bool pop(T& data, const volatile bool* do_exit)
{
return pop(data, [do_exit]() { return do_exit && *do_exit; });
}
bool pop(T& data)
{
return pop(data, SQUEUE_NEVER_EXIT);
}
bool try_pop(T& data)
{
return pop(data, SQUEUE_ALWAYS_EXIT);
}
bool peek(T& data, u32 start_pos, const std::function<bool()>& test_exit)
{
ensure(start_pos < sq_size);
u32 pos = 0;
while (u32 res = m_sync.atomic_op([&pos, start_pos](squeue_sync_var_t& sync) -> u32
{
ensure(sync.count <= sq_size);
ensure(sync.position < sq_size);
if (sync.count <= start_pos)
{
return SQSVR_FAILED;
}
if (sync.pop_lock)
{
return SQSVR_LOCKED;
}
sync.pop_lock = 1;
pos = sync.position + start_pos;
return SQSVR_OK;
}))
{
if (res == SQSVR_FAILED && (test_exit() || squeue_test_exit()))
{
return false;
}
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
}
data = m_data[pos >= sq_size ? pos - sq_size : pos];
m_sync.atomic_op([](squeue_sync_var_t& sync)
{
ensure(sync.count <= sq_size);
ensure(sync.position < sq_size);
ensure(!!sync.pop_lock);
sync.pop_lock = 0;
});
m_rcv.notify_one();
return true;
}
bool peek(T& data, u32 start_pos, const volatile bool* do_exit)
{
return peek(data, start_pos, [do_exit]() { return do_exit && *do_exit; });
}
bool peek(T& data, u32 start_pos = 0)
{
return peek(data, start_pos, SQUEUE_NEVER_EXIT);
}
bool try_peek(T& data, u32 start_pos = 0)
{
return peek(data, start_pos, SQUEUE_ALWAYS_EXIT);
}
class squeue_data_t
{
T* const m_data;
const u32 m_pos;
const u32 m_count;
squeue_data_t(T* data, u32 pos, u32 count)
: m_data(data)
, m_pos(pos)
, m_count(count)
{
}
public:
T& operator [] (u32 index)
{
ensure(index < m_count);
index += m_pos;
index = index < sq_size ? index : index - sq_size;
return m_data[index];
}
};
void process(void(*proc)(squeue_data_t data))
{
u32 pos, count;
while (m_sync.atomic_op([&pos, &count](squeue_sync_var_t& sync) -> u32
{
ensure(sync.count <= sq_size);
ensure(sync.position < sq_size);
if (sync.pop_lock || sync.push_lock)
{
return SQSVR_LOCKED;
}
pos = sync.position;
count = sync.count;
sync.pop_lock = 1;
sync.push_lock = 1;
return SQSVR_OK;
}))
{
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
}
proc(squeue_data_t(m_data, pos, count));
m_sync.atomic_op([](squeue_sync_var_t& sync)
{
ensure(sync.count <= sq_size);
ensure(sync.position < sq_size);
ensure(!!sync.pop_lock);
ensure(!!sync.push_lock);
sync.pop_lock = 0;
sync.push_lock = 0;
});
m_wcv.notify_one();
m_rcv.notify_one();
}
void clear()
{
while (m_sync.atomic_op([](squeue_sync_var_t& sync) -> u32
{
ensure(sync.count <= sq_size);
ensure(sync.position < sq_size);
if (sync.pop_lock || sync.push_lock)
{
return SQSVR_LOCKED;
}
sync.pop_lock = 1;
sync.push_lock = 1;
return SQSVR_OK;
}))
{
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
}
m_sync.exchange({});
m_wcv.notify_one();
m_rcv.notify_one();
}
};

View File

@ -63,7 +63,7 @@ extern const std::map<std::string_view, int> g_prx_list
{ "libcelpenc.sprx", 0 },
{ "libddpdec.sprx", 0 },
{ "libdivxdec.sprx", 0 },
{ "libdmux.sprx", 0 },
{ "libdmux.sprx", 1 },
{ "libdmuxpamf.sprx", 1 },
{ "libdtslbrdec.sprx", 0 },
{ "libfiber.sprx", 0 },

View File

@ -1731,24 +1731,34 @@ namespace rsx
}
case deferred_request_command::cubemap_unwrap:
{
rsx::simple_array<copy_region_descriptor> sections(6);
for (u16 n = 0; n < 6; ++n)
rsx::simple_array<copy_region_descriptor> sections(6 * desc.mipmaps);
for (u16 n = 0, section_id = 0; n < 6; ++n)
{
sections[n] =
u16 mip_w = desc.width, mip_h = desc.height;
u16 y_offset = static_cast<u16>(desc.slice_h * n);
for (u8 mip = 0; mip < desc.mipmaps; ++mip)
{
.src = desc.external_handle,
.xform = surface_transform::coordinate_transform,
.level = 0,
.src_x = 0,
.src_y = static_cast<u16>(desc.slice_h * n),
.dst_x = 0,
.dst_y = 0,
.dst_z = n,
.src_w = desc.width,
.src_h = desc.height,
.dst_w = desc.width,
.dst_h = desc.height
};
sections[section_id++] =
{
.src = desc.external_handle,
.xform = surface_transform::coordinate_transform,
.level = mip,
.src_x = 0,
.src_y = y_offset,
.dst_x = 0,
.dst_y = 0,
.dst_z = n,
.src_w = mip_w,
.src_h = mip_h,
.dst_w = mip_w,
.dst_h = mip_h
};
y_offset += mip_h;
mip_w = std::max<u16>(mip_w / 2, 1);
mip_h = std::max<u16>(mip_h / 2, 1);
}
}
result = generate_cubemap_from_images(cmd, desc.gcm_format, desc.width, sections, desc.remap);

View File

@ -384,7 +384,21 @@ void GLGSRender::load_texture_env()
}
}
m_fs_sampler_states[i].apply(tex, fs_sampler_state[i].get());
u32 actual_mipcount = 1;
if (sampler_state->upload_context == rsx::texture_upload_context::shader_read)
{
actual_mipcount = tex.get_exact_mipmap_count();
}
else if (sampler_state->external_subresource_desc.op == rsx::deferred_request_command::mipmap_gather)
{
actual_mipcount = sampler_state->external_subresource_desc.sections_to_copy.size();
}
else if (sampler_state->external_subresource_desc.op == rsx::deferred_request_command::cubemap_unwrap)
{
actual_mipcount = sampler_state->external_subresource_desc.mipmaps;
}
m_fs_sampler_states[i].apply(tex, fs_sampler_state[i].get(), actual_mipcount > 1);
const auto texture_format = sampler_state->format_ex.format();
// Depth format redirected to BGRA8 resample stage. Do not filter to avoid bits leaking.

View File

@ -586,7 +586,8 @@ namespace gl
gl::texture_view* generate_cubemap_from_images(gl::command_context& cmd, u32 gcm_format, u16 size, const rsx::simple_array<copy_region_descriptor>& sources, const rsx::texture_channel_remap_t& remap_vector) override
{
auto _template = get_template_from_collection_impl(sources);
auto result = create_temporary_subresource_impl(cmd, _template, GL_NONE, GL_TEXTURE_CUBE_MAP, gcm_format, 0, 0, size, size, 1, 1, remap_vector, false);
const u8 mip_count = 1 + sources.reduce(0, FN(std::max<u8>(x, y.level)));
auto result = create_temporary_subresource_impl(cmd, _template, GL_NONE, GL_TEXTURE_CUBE_MAP, gcm_format, 0, 0, size, size, 1, mip_count, remap_vector, false);
copy_transfer_regions_impl(cmd, result->image(), sources);
return result;

View File

@ -72,7 +72,7 @@ namespace gl
}
// Apply sampler state settings
void sampler_state::apply(const rsx::fragment_texture& tex, const rsx::sampled_image_descriptor_base* sampled_image)
void sampler_state::apply(const rsx::fragment_texture& tex, const rsx::sampled_image_descriptor_base* sampled_image, bool allow_mipmaps)
{
set_parameteri(GL_TEXTURE_WRAP_S, wrap_mode(tex.wrap_s()));
set_parameteri(GL_TEXTURE_WRAP_T, wrap_mode(tex.wrap_t()));
@ -114,8 +114,7 @@ namespace gl
}
}
if (sampled_image->upload_context != rsx::texture_upload_context::shader_read ||
tex.get_exact_mipmap_count() == 1)
if (!allow_mipmaps || tex.get_exact_mipmap_count() == 1)
{
GLint min_filter = tex_min_filter(tex.min_filter());

View File

@ -75,7 +75,7 @@ namespace gl
return (prop == m_propertiesf.end()) ? 0 : prop->second;
}
void apply(const rsx::fragment_texture& tex, const rsx::sampled_image_descriptor_base* sampled_image);
void apply(const rsx::fragment_texture& tex, const rsx::sampled_image_descriptor_base* sampled_image, bool allow_mipmaps = true);
void apply(const rsx::vertex_texture& tex, const rsx::sampled_image_descriptor_base* sampled_image);
void apply_defaults(GLenum default_filter = GL_NEAREST);

View File

@ -14,7 +14,7 @@ namespace rsx
namespace nv0039
{
// Transfer with stride
inline void block2d_copy_with_stride(u8* dst, const u8* src, u32 width, u32 height, u32 src_pitch, u32 dst_pitch, u8 src_stride, u8 dst_stride)
inline void block2d_copy_with_stride(u8* dst, const u8* src, u32 width, u32 height, s32 src_pitch, s32 dst_pitch, u8 src_stride, u8 dst_stride)
{
for (u32 row = 0; row < height; ++row)
{
@ -33,7 +33,7 @@ namespace rsx
}
}
inline void block2d_copy(u8* dst, const u8* src, u32 width, u32 height, u32 src_pitch, u32 dst_pitch)
inline void block2d_copy(u8* dst, const u8* src, u32 width, u32 height, s32 src_pitch, s32 dst_pitch)
{
for (u32 i = 0; i < height; ++i)
{

View File

@ -471,6 +471,10 @@ void VKGSRender::load_texture_env()
// Clamp min and max lod
actual_mipmaps = static_cast<f32>(sampler_state->external_subresource_desc.sections_to_copy.size());
}
else if (sampler_state->external_subresource_desc.op == rsx::deferred_request_command::cubemap_unwrap)
{
actual_mipmaps = static_cast<f32>(sampler_state->external_subresource_desc.mipmaps);
}
else
{
actual_mipmaps = 1.f;

View File

@ -761,8 +761,9 @@ namespace vk
const rsx::simple_array<copy_region_descriptor>& sections_to_copy, const rsx::texture_channel_remap_t& remap_vector)
{
auto _template = get_template_from_collection_impl(sections_to_copy);
const u8 mip_count = 1 + sections_to_copy.reduce(0, FN(std::max<u8>(x, y.level)));
auto result = create_temporary_subresource_view_impl(cmd, _template, VK_IMAGE_TYPE_2D,
VK_IMAGE_VIEW_TYPE_CUBE, gcm_format, 0, 0, size, size, 1, 1, remap_vector, false);
VK_IMAGE_VIEW_TYPE_CUBE, gcm_format, 0, 0, size, size, 1, mip_count, remap_vector, false);
if (!result)
{
@ -772,7 +773,7 @@ namespace vk
const auto image = result->image();
VkImageAspectFlags dst_aspect = vk::get_aspect_flags(result->info.format);
VkImageSubresourceRange dst_range = { dst_aspect, 0, 1, 0, 6 };
VkImageSubresourceRange dst_range = { dst_aspect, 0, mip_count, 0, 6 };
vk::change_image_layout(cmd, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, dst_range);
if (!(dst_aspect & VK_IMAGE_ASPECT_DEPTH_BIT))

View File

@ -253,7 +253,11 @@ namespace utils
#ifdef __APPLE__
#ifdef ARCH_ARM64
auto ptr = ::mmap(use_addr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE | MAP_JIT | c_map_noreserve, -1, 0);
// Memory mapping regions will be replaced by file-backed MAP_FIXED mappings
// (via shm::map), which is incompatible with MAP_JIT. Only use MAP_JIT for
// non-mapping regions that need JIT executable support.
const int jit_flag = is_memory_mapping ? 0 : MAP_JIT;
auto ptr = ::mmap(use_addr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE | jit_flag | c_map_noreserve, -1, 0);
#else
auto ptr = ::mmap(use_addr, size, PROT_NONE, MAP_ANON | MAP_PRIVATE | MAP_JIT | c_map_noreserve, -1, 0);
#endif