mirror of
https://github.com/RPCS3/rpcs3.git
synced 2026-04-03 11:28:07 -06:00
cellDmuxPamf: fix freezing FMVs in some games
- Delays demuxing a bit to let the game finish initializing FMV playback. - Also includes the missing half of #18450 which I forgot to Commit ^^" - Sets cellDmuxPamf to HLE by default again
This commit is contained in:
parent
bb3e2689d4
commit
72b872df66
@ -296,7 +296,7 @@ void LpcmDecContext::exec(ppu_thread& ppu)
|
||||
|
||||
cmd_queue.pop(cmd);
|
||||
|
||||
ensure(sys_mutex_unlock(ppu, queue_mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, queue_mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
|
||||
cellAdec.trace("Command type: %d", static_cast<u32>(cmd.type.get()));
|
||||
|
||||
@ -307,7 +307,7 @@ void LpcmDecContext::exec(ppu_thread& ppu)
|
||||
{
|
||||
case LpcmDecCmdType::start_seq:
|
||||
// LLE sends a command to the SPU thread. The SPU thread consumes the command without doing anything, however
|
||||
error_occurred |= static_cast<u32>(sys_mutex_unlock(ppu, output_mutex) != CELL_OK);
|
||||
error_occurred |= static_cast<u32>(lv2_syscall<sys_mutex_unlock>(ppu, output_mutex) != CELL_OK);
|
||||
break;
|
||||
|
||||
case LpcmDecCmdType::end_seq:
|
||||
@ -324,11 +324,11 @@ void LpcmDecContext::exec(ppu_thread& ppu)
|
||||
// Doesn't do anything else
|
||||
notify_seq_done.cbFunc(ppu, notify_seq_done.cbArg);
|
||||
|
||||
error_occurred |= static_cast<u32>(sys_mutex_unlock(ppu, output_mutex) != CELL_OK);
|
||||
error_occurred |= static_cast<u32>(lv2_syscall<sys_mutex_unlock>(ppu, output_mutex) != CELL_OK);
|
||||
break;
|
||||
}
|
||||
case LpcmDecCmdType::close:
|
||||
ensure(sys_mutex_unlock(ppu, output_mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, output_mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return;
|
||||
|
||||
case LpcmDecCmdType::decode_au:
|
||||
@ -685,7 +685,7 @@ void LpcmDecContext::exec(ppu_thread& ppu)
|
||||
notify_au_done.cbFunc(ppu, cmd.pcm_handle, notify_au_done.cbArg);
|
||||
|
||||
output_locked = true;
|
||||
error_occurred |= static_cast<u32>(sys_mutex_unlock(ppu, output_mutex) != CELL_OK);
|
||||
error_occurred |= static_cast<u32>(lv2_syscall<sys_mutex_unlock>(ppu, output_mutex) != CELL_OK);
|
||||
|
||||
const vm::var<CellAdecLpcmInfo> bsi_info{{ lpcm_param->channelNumber, lpcm_param->sampleRate, static_cast<u32>(output_size) }};
|
||||
|
||||
@ -710,7 +710,7 @@ error_code LpcmDecContext::send_command(ppu_thread& ppu, auto&&... args)
|
||||
|
||||
if (cmd_queue.full())
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, queue_size_mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, queue_size_mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return CELL_ADEC_ERROR_BUSY;
|
||||
}
|
||||
|
||||
@ -722,20 +722,20 @@ error_code LpcmDecContext::send_command(ppu_thread& ppu, auto&&... args)
|
||||
|
||||
if (error_code ret = lv2_syscall<sys_mutex_lock>(ppu, queue_mutex, 0); ret != CELL_OK)
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, queue_size_mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, queue_size_mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return ret;
|
||||
}
|
||||
|
||||
cmd_queue.emplace(type, std::forward<decltype(args)>(args)...);
|
||||
|
||||
if (error_code ret = sys_mutex_unlock(ppu, queue_mutex); ret != CELL_OK
|
||||
if (error_code ret = lv2_syscall<sys_mutex_unlock>(ppu, queue_mutex); ret != CELL_OK
|
||||
|| (ret = cmd_available.release(ppu)) != CELL_OK)
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, queue_size_mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, queue_size_mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return ret;
|
||||
}
|
||||
|
||||
return sys_mutex_unlock(ppu, queue_size_mutex);
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, queue_size_mutex);
|
||||
}
|
||||
|
||||
inline error_code LpcmDecContext::release_output(ppu_thread& ppu)
|
||||
@ -752,7 +752,7 @@ inline error_code LpcmDecContext::release_output(ppu_thread& ppu)
|
||||
return ret; // LLE doesn't unlock the mutex
|
||||
}
|
||||
|
||||
return sys_mutex_unlock(ppu, output_mutex);
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, output_mutex);
|
||||
}
|
||||
|
||||
void lpcmDecEntry(ppu_thread& ppu, vm::ptr<LpcmDecContext> lpcm_dec)
|
||||
@ -820,13 +820,13 @@ error_code _CellAdecCoreOpOpenExt_lpcm(ppu_thread& ppu, vm::ptr<LpcmDecContext>
|
||||
const vm::var<sys_mutex_attribute_t> queue_mutex_attr{{ SYS_SYNC_PRIORITY, SYS_SYNC_NOT_RECURSIVE, SYS_SYNC_NOT_PROCESS_SHARED, SYS_SYNC_NOT_ADAPTIVE, 0, 0, 0, { "_adem06"_u64 } }};
|
||||
const vm::var<sys_cond_attribute_t> cond_attr{{ SYS_SYNC_NOT_PROCESS_SHARED, 0, 0, { "_adec03"_u64 } }};
|
||||
|
||||
error_code ret = sys_mutex_create(ppu, handle.ptr(&LpcmDecContext::queue_size_mutex), mutex_attr);
|
||||
ret = ret ? ret : sys_cond_create(ppu, handle.ptr(&LpcmDecContext::queue_size_cond), handle->queue_size_mutex, cond_attr);
|
||||
ret = ret ? ret : sys_mutex_create(ppu, handle.ptr(&LpcmDecContext::unk_mutex), mutex_attr);
|
||||
ret = ret ? ret : sys_cond_create(ppu, handle.ptr(&LpcmDecContext::unk_cond), handle->unk_mutex, cond_attr);
|
||||
ret = ret ? ret : sys_mutex_create(ppu, handle.ptr(&LpcmDecContext::output_mutex), output_mutex_attr);
|
||||
ret = ret ? ret : sys_cond_create(ppu, handle.ptr(&LpcmDecContext::output_consumed), handle->output_mutex, cond_attr);
|
||||
ret = ret ? ret : sys_mutex_create(ppu, handle.ptr(&LpcmDecContext::queue_mutex), queue_mutex_attr);
|
||||
error_code ret = lv2_syscall<sys_mutex_create>(ppu, handle.ptr(&LpcmDecContext::queue_size_mutex), mutex_attr);
|
||||
ret = ret ? ret : lv2_syscall<sys_cond_create>(ppu, handle.ptr(&LpcmDecContext::queue_size_cond), handle->queue_size_mutex, cond_attr);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_create>(ppu, handle.ptr(&LpcmDecContext::unk_mutex), mutex_attr);
|
||||
ret = ret ? ret : lv2_syscall<sys_cond_create>(ppu, handle.ptr(&LpcmDecContext::unk_cond), handle->unk_mutex, cond_attr);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_create>(ppu, handle.ptr(&LpcmDecContext::output_mutex), output_mutex_attr);
|
||||
ret = ret ? ret : lv2_syscall<sys_cond_create>(ppu, handle.ptr(&LpcmDecContext::output_consumed), handle->output_mutex, cond_attr);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_create>(ppu, handle.ptr(&LpcmDecContext::queue_mutex), queue_mutex_attr);
|
||||
ret = ret ? ret : handle->release_output(ppu);
|
||||
ret = ret ? ret : handle->cmd_available.init(ppu, handle.ptr(&LpcmDecContext::cmd_available), 0);
|
||||
ret = ret ? ret : handle->reserved2.init(ppu, handle.ptr(&LpcmDecContext::reserved2), 0);
|
||||
@ -844,8 +844,8 @@ error_code _CellAdecCoreOpOpenExt_lpcm(ppu_thread& ppu, vm::ptr<LpcmDecContext>
|
||||
const auto entry = g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(lpcmDecEntry));
|
||||
|
||||
ret = ppu_execute<&sys_ppu_thread_create>(ppu, handle.ptr(&LpcmDecContext::thread_id), entry, handle.addr(), +res->ppuThreadPriority, +res->ppuThreadStackSize, SYS_PPU_THREAD_CREATE_JOINABLE, +_name);
|
||||
ret = ret ? ret : sys_mutex_create(ppu, handle.ptr(&LpcmDecContext::spurs_queue_pop_mutex), mutex_attr);
|
||||
ret = ret ? ret : sys_mutex_create(ppu, handle.ptr(&LpcmDecContext::spurs_queue_push_mutex), mutex_attr);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_create>(ppu, handle.ptr(&LpcmDecContext::spurs_queue_pop_mutex), mutex_attr);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_create>(ppu, handle.ptr(&LpcmDecContext::spurs_queue_push_mutex), mutex_attr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -875,14 +875,14 @@ error_code _CellAdecCoreOpClose_lpcm(ppu_thread& ppu, vm::ptr<LpcmDecContext> ha
|
||||
{
|
||||
handle->cmd_queue.emplace(LpcmDecCmdType::close);
|
||||
|
||||
if (error_code ret = sys_mutex_unlock(ppu, handle->queue_mutex); ret != CELL_OK)
|
||||
if (error_code ret = lv2_syscall<sys_mutex_unlock>(ppu, handle->queue_mutex); ret != CELL_OK)
|
||||
{
|
||||
return ret; // LLE doesn't unlock the queue size mutex
|
||||
}
|
||||
|
||||
if (error_code ret = handle->cmd_available.release(ppu); ret != CELL_OK)
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, handle->queue_size_mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, handle->queue_size_mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -893,29 +893,29 @@ error_code _CellAdecCoreOpClose_lpcm(ppu_thread& ppu, vm::ptr<LpcmDecContext> ha
|
||||
cmd.type = LpcmDecCmdType::close;
|
||||
}
|
||||
|
||||
if (error_code ret = sys_mutex_unlock(ppu, handle->queue_mutex); ret != CELL_OK)
|
||||
if (error_code ret = lv2_syscall<sys_mutex_unlock>(ppu, handle->queue_mutex); ret != CELL_OK)
|
||||
{
|
||||
return ret; // LLE doesn't unlock the queue size mutex
|
||||
}
|
||||
}
|
||||
|
||||
error_code ret = sys_mutex_unlock(ppu, handle->queue_size_mutex);
|
||||
error_code ret = lv2_syscall<sys_mutex_unlock>(ppu, handle->queue_size_mutex);
|
||||
ret = ret ? ret : handle->release_output(ppu);
|
||||
|
||||
vm::var<u64> thread_ret;
|
||||
ret = ret ? ret : sys_ppu_thread_join(ppu, static_cast<u32>(handle->thread_id), +thread_ret);
|
||||
ret = ret ? ret : lv2_syscall<sys_ppu_thread_join>(ppu, static_cast<u32>(handle->thread_id), +thread_ret);
|
||||
|
||||
ret = ret ? ret : sys_cond_destroy(ppu, handle->queue_size_cond);
|
||||
ret = ret ? ret : sys_cond_destroy(ppu, handle->unk_cond);
|
||||
ret = ret ? ret : sys_cond_destroy(ppu, handle->output_consumed);
|
||||
ret = ret ? ret : sys_mutex_destroy(ppu, handle->queue_mutex);
|
||||
ret = ret ? ret : sys_mutex_destroy(ppu, handle->queue_size_mutex);
|
||||
ret = ret ? ret : sys_mutex_destroy(ppu, handle->unk_mutex);
|
||||
ret = ret ? ret : sys_mutex_destroy(ppu, handle->output_mutex);
|
||||
ret = ret ? ret : lv2_syscall<sys_cond_destroy>(ppu, handle->queue_size_cond);
|
||||
ret = ret ? ret : lv2_syscall<sys_cond_destroy>(ppu, handle->unk_cond);
|
||||
ret = ret ? ret : lv2_syscall<sys_cond_destroy>(ppu, handle->output_consumed);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_destroy>(ppu, handle->queue_mutex);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_destroy>(ppu, handle->queue_size_mutex);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_destroy>(ppu, handle->unk_mutex);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_destroy>(ppu, handle->output_mutex);
|
||||
ret = ret ? ret : handle->cmd_available.finalize(ppu);
|
||||
ret = ret ? ret : handle->reserved2.finalize(ppu);
|
||||
ret = ret ? ret : sys_mutex_destroy(ppu, handle->spurs_queue_pop_mutex);
|
||||
ret = ret ? ret : sys_mutex_destroy(ppu, handle->spurs_queue_push_mutex);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_destroy>(ppu, handle->spurs_queue_pop_mutex);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_destroy>(ppu, handle->spurs_queue_push_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1095,7 +1095,7 @@ error_code AdecContext::link_frame(ppu_thread& ppu, s32 pcm_handle)
|
||||
|
||||
if (verify_pcm_handle(pcm_handle) == static_cast<s32>(CELL_ADEC_ERROR_FATAL))
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return CELL_ADEC_ERROR_FATAL;
|
||||
}
|
||||
|
||||
@ -1115,11 +1115,11 @@ error_code AdecContext::link_frame(ppu_thread& ppu, s32 pcm_handle)
|
||||
}
|
||||
else
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return CELL_ADEC_ERROR_FATAL;
|
||||
}
|
||||
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -1129,13 +1129,13 @@ error_code AdecContext::unlink_frame(ppu_thread& ppu, s32 pcm_handle)
|
||||
|
||||
if (verify_pcm_handle(pcm_handle) == static_cast<s32>(CELL_ADEC_ERROR_FATAL))
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return CELL_ADEC_ERROR_FATAL;
|
||||
}
|
||||
|
||||
if (frames_head == -1 || frames_tail == -1)
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return CELL_ADEC_ERROR_FATAL;
|
||||
}
|
||||
|
||||
@ -1146,7 +1146,7 @@ error_code AdecContext::unlink_frame(ppu_thread& ppu, s32 pcm_handle)
|
||||
{
|
||||
if (pcm_handle != frames_tail)
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return CELL_ADEC_ERROR_FATAL;
|
||||
}
|
||||
|
||||
@ -1171,7 +1171,7 @@ error_code AdecContext::unlink_frame(ppu_thread& ppu, s32 pcm_handle)
|
||||
frames[prev].next = next;
|
||||
}
|
||||
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -1557,7 +1557,7 @@ error_code adecOpen(ppu_thread& ppu, vm::ptr<CellAdecType> type, vm::cptr<CellAd
|
||||
frames[i].prev = 0;
|
||||
}
|
||||
|
||||
ensure(sys_mutex_create(ppu, _this.ptr(&AdecContext::mutex), _this.ptr(&AdecContext::mutex_attribute)) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_create>(ppu, _this.ptr(&AdecContext::mutex), _this.ptr(&AdecContext::mutex_attribute)) == CELL_OK); // Error code isn't checked on LLE
|
||||
|
||||
*handle = _this;
|
||||
|
||||
@ -1626,7 +1626,7 @@ error_code cellAdecClose(ppu_thread& ppu, vm::ptr<AdecContext> handle)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (error_code ret = sys_mutex_destroy(ppu, handle->mutex); ret != CELL_OK)
|
||||
if (error_code ret = lv2_syscall<sys_mutex_destroy>(ppu, handle->mutex); ret != CELL_OK)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -524,10 +524,10 @@ public:
|
||||
this->size = 0;
|
||||
|
||||
const vm::var<sys_mutex_attribute_t> mutex_attr = {{ SYS_SYNC_PRIORITY, SYS_SYNC_NOT_RECURSIVE, SYS_SYNC_NOT_PROCESS_SHARED, SYS_SYNC_NOT_ADAPTIVE, 0, 0, 0, { "_adem07"_u64 } }};
|
||||
ensure(sys_mutex_create(ppu, _this.ptr(&AdecOutputQueue::mutex), mutex_attr) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_create>(ppu, _this.ptr(&AdecOutputQueue::mutex), mutex_attr) == CELL_OK); // Error code isn't checked on LLE
|
||||
|
||||
const vm::var<sys_cond_attribute_t> cond_attr = {{ SYS_SYNC_NOT_PROCESS_SHARED, 0, 0, { "_adec05"_u64 } }};
|
||||
ensure(sys_cond_create(ppu, _this.ptr(&AdecOutputQueue::cond), mutex, cond_attr) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_cond_create>(ppu, _this.ptr(&AdecOutputQueue::cond), mutex, cond_attr) == CELL_OK); // Error code isn't checked on LLE
|
||||
|
||||
for (s32 i = 0; i < 4; i++)
|
||||
{
|
||||
@ -537,12 +537,12 @@ public:
|
||||
|
||||
error_code finalize(ppu_thread& ppu) const
|
||||
{
|
||||
if (error_code ret = sys_cond_destroy(ppu, cond); ret != CELL_OK)
|
||||
if (error_code ret = lv2_syscall<sys_cond_destroy>(ppu, cond); ret != CELL_OK)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (error_code ret = sys_mutex_destroy(ppu, mutex); ret != CELL_OK)
|
||||
if (error_code ret = lv2_syscall<sys_mutex_destroy>(ppu, mutex); ret != CELL_OK)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
@ -556,7 +556,7 @@ public:
|
||||
|
||||
if (entries[back].state != 0xff)
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return true; // LLE returns the result of the comparison above
|
||||
}
|
||||
|
||||
@ -567,7 +567,7 @@ public:
|
||||
back = (back + 1) & 3;
|
||||
size++;
|
||||
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -582,7 +582,7 @@ public:
|
||||
|
||||
if (entries[front].state == 0xff)
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -594,7 +594,7 @@ public:
|
||||
front = (front + 1) & 3;
|
||||
size--;
|
||||
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -602,7 +602,7 @@ public:
|
||||
{
|
||||
ensure(lv2_syscall<sys_mutex_lock>(ppu, mutex, 0) == CELL_OK); // Error code isn't checked on LLE
|
||||
const entry& ret = entries[front];
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
@ -721,22 +721,22 @@ public:
|
||||
const vm::var<sys_mutex_attribute_t> mutex_attr{{ SYS_SYNC_PRIORITY, SYS_SYNC_NOT_RECURSIVE, SYS_SYNC_NOT_PROCESS_SHARED, SYS_SYNC_NOT_ADAPTIVE, 0, 0, 0, { "_adem01"_u64 } }};
|
||||
const vm::var<sys_cond_attribute_t> cond_attr{{ SYS_SYNC_NOT_PROCESS_SHARED, 0, 0, { "_adec01"_u64 } }};
|
||||
|
||||
if (error_code ret = sys_mutex_create(ppu, _this.ptr(&LpcmDecSemaphore::mutex), mutex_attr); ret != CELL_OK)
|
||||
if (error_code ret = lv2_syscall<sys_mutex_create>(ppu, _this.ptr(&LpcmDecSemaphore::mutex), mutex_attr); ret != CELL_OK)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
return sys_cond_create(ppu, _this.ptr(&LpcmDecSemaphore::cond), mutex, cond_attr);
|
||||
return lv2_syscall<sys_cond_create>(ppu, _this.ptr(&LpcmDecSemaphore::cond), mutex, cond_attr);
|
||||
}
|
||||
|
||||
error_code finalize(ppu_thread& ppu) const
|
||||
{
|
||||
if (error_code ret = sys_cond_destroy(ppu, cond); ret != CELL_OK)
|
||||
if (error_code ret = lv2_syscall<sys_cond_destroy>(ppu, cond); ret != CELL_OK)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
return sys_mutex_destroy(ppu, mutex);
|
||||
return lv2_syscall<sys_mutex_destroy>(ppu, mutex);
|
||||
}
|
||||
|
||||
error_code release(ppu_thread& ppu)
|
||||
@ -753,7 +753,7 @@ public:
|
||||
return ret; // LLE doesn't unlock the mutex
|
||||
}
|
||||
|
||||
return sys_mutex_unlock(ppu, mutex);
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, mutex);
|
||||
}
|
||||
|
||||
error_code acquire(ppu_thread& ppu, lpcm_dec_state& savestate)
|
||||
@ -793,7 +793,7 @@ public:
|
||||
|
||||
value--;
|
||||
|
||||
return sys_mutex_unlock(ppu, mutex);
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, mutex);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -317,12 +317,12 @@ void AtracXdecContext::exec(ppu_thread& ppu)
|
||||
return;
|
||||
}
|
||||
|
||||
ensure(sys_mutex_unlock(ppu, queue_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, queue_mutex) == CELL_OK);
|
||||
}
|
||||
|
||||
cmd_queue.pop(cmd);
|
||||
|
||||
ensure(sys_mutex_unlock(ppu, queue_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, queue_mutex) == CELL_OK);
|
||||
|
||||
savestate = atracxdec_state::checking_run_thread_1;
|
||||
label2_check_run_thread_1_state:
|
||||
@ -336,11 +336,11 @@ void AtracXdecContext::exec(ppu_thread& ppu)
|
||||
|
||||
if (!run_thread)
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, run_thread_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, run_thread_mutex) == CELL_OK);
|
||||
return;
|
||||
}
|
||||
|
||||
ensure(sys_mutex_unlock(ppu, run_thread_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, run_thread_mutex) == CELL_OK);
|
||||
|
||||
savestate = atracxdec_state::executing_cmd;
|
||||
label3_execute_cmd_state:
|
||||
@ -426,12 +426,12 @@ void AtracXdecContext::exec(ppu_thread& ppu)
|
||||
|
||||
if (!run_thread)
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, run_thread_mutex) == CELL_OK);
|
||||
ensure(sys_mutex_unlock(ppu, output_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, run_thread_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, output_mutex) == CELL_OK);
|
||||
return;
|
||||
}
|
||||
|
||||
ensure(sys_mutex_unlock(ppu, run_thread_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, run_thread_mutex) == CELL_OK);
|
||||
|
||||
savestate = atracxdec_state::decoding;
|
||||
label6_decode_state:
|
||||
@ -645,7 +645,7 @@ void AtracXdecContext::exec(ppu_thread& ppu)
|
||||
notify_au_done.cbFunc(ppu, cmd.pcm_handle, notify_au_done.cbArg);
|
||||
|
||||
output_locked = true;
|
||||
ensure(sys_mutex_unlock(ppu, output_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, output_mutex) == CELL_OK);
|
||||
|
||||
const u32 output_size = decoded_samples_num * (decoder.bw_pcm & 0x7fu) * decoder.nch_out;
|
||||
|
||||
@ -692,20 +692,20 @@ error_code AtracXdecContext::send_command(ppu_thread& ppu, auto&&... args)
|
||||
// Close command is only sent if the queue is empty on LLE
|
||||
if (!cmd_queue.empty())
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, queue_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, queue_mutex) == CELL_OK);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
if (cmd_queue.full())
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, queue_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, queue_mutex) == CELL_OK);
|
||||
return CELL_ADEC_ERROR_ATX_BUSY;
|
||||
}
|
||||
|
||||
cmd_queue.emplace(std::forward<AtracXdecCmdType>(type), std::forward<decltype(args)>(args)...);
|
||||
|
||||
ensure(sys_mutex_unlock(ppu, queue_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, queue_mutex) == CELL_OK);
|
||||
}
|
||||
|
||||
ensure(lv2_syscall<sys_cond_signal>(ppu, queue_not_empty) == CELL_OK);
|
||||
@ -779,25 +779,25 @@ error_code _CellAdecCoreOpOpenExt_atracx(ppu_thread& ppu, vm::ptr<AtracXdecConte
|
||||
const vm::var<sys_mutex_attribute_t> mutex_attr{{ SYS_SYNC_PRIORITY, SYS_SYNC_NOT_RECURSIVE, SYS_SYNC_NOT_PROCESS_SHARED, SYS_SYNC_NOT_ADAPTIVE, 0, 0, 0, { "_atd001"_u64 } }};
|
||||
const vm::var<sys_cond_attribute_t> cond_attr{{ SYS_SYNC_NOT_PROCESS_SHARED, 0, 0, { "_atd002"_u64 } }};
|
||||
|
||||
ensure(sys_mutex_create(ppu, handle.ptr(&AtracXdecContext::queue_mutex), mutex_attr) == CELL_OK);
|
||||
ensure(sys_cond_create(ppu, handle.ptr(&AtracXdecContext::queue_not_empty), handle->queue_mutex, cond_attr) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_create>(ppu, handle.ptr(&AtracXdecContext::queue_mutex), mutex_attr) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_cond_create>(ppu, handle.ptr(&AtracXdecContext::queue_not_empty), handle->queue_mutex, cond_attr) == CELL_OK);
|
||||
|
||||
mutex_attr->name_u64 = "_atd003"_u64;
|
||||
cond_attr->name_u64 = "_atd004"_u64;
|
||||
|
||||
ensure(sys_mutex_create(ppu, handle.ptr(&AtracXdecContext::run_thread_mutex), mutex_attr) == CELL_OK);
|
||||
ensure(sys_cond_create(ppu, handle.ptr(&AtracXdecContext::run_thread_cond), handle->run_thread_mutex, cond_attr) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_create>(ppu, handle.ptr(&AtracXdecContext::run_thread_mutex), mutex_attr) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_cond_create>(ppu, handle.ptr(&AtracXdecContext::run_thread_cond), handle->run_thread_mutex, cond_attr) == CELL_OK);
|
||||
|
||||
mutex_attr->name_u64 = "_atd005"_u64;
|
||||
cond_attr->name_u64 = "_atd006"_u64;
|
||||
|
||||
ensure(sys_mutex_create(ppu, handle.ptr(&AtracXdecContext::output_mutex), mutex_attr) == CELL_OK);
|
||||
ensure(sys_cond_create(ppu, handle.ptr(&AtracXdecContext::output_consumed), handle->output_mutex, cond_attr) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_create>(ppu, handle.ptr(&AtracXdecContext::output_mutex), mutex_attr) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_cond_create>(ppu, handle.ptr(&AtracXdecContext::output_consumed), handle->output_mutex, cond_attr) == CELL_OK);
|
||||
|
||||
ensure(lv2_syscall<sys_mutex_lock>(ppu, handle->output_mutex, 0) == CELL_OK);
|
||||
handle->output_locked = false;
|
||||
ensure(lv2_syscall<sys_cond_signal>(ppu, handle->output_consumed) == CELL_OK);
|
||||
ensure(sys_mutex_unlock(ppu, handle->output_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, handle->output_mutex) == CELL_OK);
|
||||
|
||||
const vm::var<char[]> _name = vm::make_str("HLE ATRAC3plus decoder");
|
||||
const auto entry = g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(atracXdecEntry));
|
||||
@ -831,24 +831,24 @@ error_code _CellAdecCoreOpClose_atracx(ppu_thread& ppu, vm::ptr<AtracXdecContext
|
||||
|
||||
ensure(lv2_syscall<sys_mutex_lock>(ppu, handle->run_thread_mutex, 0) == CELL_OK);
|
||||
handle->run_thread = false;
|
||||
ensure(sys_mutex_unlock(ppu, handle->run_thread_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, handle->run_thread_mutex) == CELL_OK);
|
||||
|
||||
handle->send_command<AtracXdecCmdType::close>(ppu);
|
||||
|
||||
ensure(lv2_syscall<sys_mutex_lock>(ppu, handle->output_mutex, 0) == CELL_OK);
|
||||
handle->output_locked = false;
|
||||
ensure(sys_mutex_unlock(ppu, handle->output_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, handle->output_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_cond_signal>(ppu, handle->output_consumed) == CELL_OK);
|
||||
|
||||
vm::var<u64> thread_ret;
|
||||
ensure(lv2_syscall<sys_ppu_thread_join>(ppu, static_cast<u32>(handle->thread_id), +thread_ret) == CELL_OK);
|
||||
|
||||
error_code ret = sys_cond_destroy(ppu, handle->queue_not_empty);
|
||||
ret = ret ? ret : sys_cond_destroy(ppu, handle->run_thread_cond);
|
||||
ret = ret ? ret : sys_cond_destroy(ppu, handle->output_consumed);
|
||||
ret = ret ? ret : sys_mutex_destroy(ppu, handle->queue_mutex);
|
||||
ret = ret ? ret : sys_mutex_destroy(ppu, handle->run_thread_mutex);
|
||||
ret = ret ? ret : sys_mutex_destroy(ppu, handle->output_mutex);
|
||||
error_code ret = lv2_syscall<sys_cond_destroy>(ppu, handle->queue_not_empty);
|
||||
ret = ret ? ret : lv2_syscall<sys_cond_destroy>(ppu, handle->run_thread_cond);
|
||||
ret = ret ? ret : lv2_syscall<sys_cond_destroy>(ppu, handle->output_consumed);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_destroy>(ppu, handle->queue_mutex);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_destroy>(ppu, handle->run_thread_mutex);
|
||||
ret = ret ? ret : lv2_syscall<sys_mutex_destroy>(ppu, handle->output_mutex);
|
||||
|
||||
return ret != CELL_OK ? static_cast<error_code>(CELL_ADEC_ERROR_FATAL) : CELL_OK;
|
||||
}
|
||||
@ -939,7 +939,7 @@ error_code _CellAdecCoreOpReleasePcm_atracx(ppu_thread& ppu, vm::ptr<AtracXdecCo
|
||||
return {};
|
||||
}
|
||||
|
||||
ensure(sys_mutex_unlock(ppu, handle->output_mutex) == CELL_OK);
|
||||
ensure(lv2_syscall<sys_mutex_unlock>(ppu, handle->output_mutex) == CELL_OK);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -999,6 +999,10 @@ void dmux_pamf_spu_context::operator()() // cellSpursMain()
|
||||
ensure(stream_info_queue->pop(stream_info));
|
||||
|
||||
set_stream({ stream_info.stream_addr.get_ptr(), stream_info.stream_size }, stream_info.continuity);
|
||||
|
||||
// Delay demuxing a bit
|
||||
// Prevents White Knight Chronicles II FMVs from freezing, since events are otherwise fired before the game has finished initializing FMV playback
|
||||
thread_ctrl::wait_for(1'500);
|
||||
}
|
||||
|
||||
process_next_pack();
|
||||
@ -1229,7 +1233,7 @@ error_code DmuxPamfContext::wait_au_released_or_stream_reset(ppu_thread& ppu, u6
|
||||
|
||||
if (lv2_syscall<sys_cond_wait>(ppu, cond, 0) != CELL_OK)
|
||||
{
|
||||
sys_mutex_unlock(ppu, mutex);
|
||||
lv2_syscall<sys_mutex_unlock>(ppu, mutex);
|
||||
return CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
@ -1247,7 +1251,7 @@ error_code DmuxPamfContext::wait_au_released_or_stream_reset(ppu_thread& ppu, u6
|
||||
|
||||
au_released_bitset = 0;
|
||||
|
||||
return sys_mutex_unlock(ppu, mutex) != CELL_OK ? static_cast<error_code>(CELL_DMUX_PAMF_ERROR_FATAL) : CELL_OK;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, mutex) != CELL_OK ? static_cast<error_code>(CELL_DMUX_PAMF_ERROR_FATAL) : CELL_OK;
|
||||
}
|
||||
|
||||
template <bool reset>
|
||||
@ -1265,7 +1269,7 @@ error_code DmuxPamfContext::set_au_reset(ppu_thread& ppu)
|
||||
|
||||
std::ranges::for_each(elementary_streams | std::views::filter([](auto es){ return !!es; }), [](auto& reset_next_au) { reset_next_au = reset; }, &DmuxPamfElementaryStream::reset_next_au);
|
||||
|
||||
return sys_mutex_unlock(ppu, mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
@ -1383,7 +1387,7 @@ void DmuxPamfContext::exec(ppu_thread& ppu)
|
||||
|
||||
sequence_state = DmuxPamfSequenceState::resetting;
|
||||
|
||||
if (sys_mutex_unlock(ppu, mutex) != CELL_OK)
|
||||
if (lv2_syscall<sys_mutex_unlock>(ppu, mutex) != CELL_OK)
|
||||
{
|
||||
savestate = dmux_pamf_state::starting_demux_done_mutex_unlock_error;
|
||||
label6_starting_demux_done_mutex_unlock_error_state:
|
||||
@ -1451,7 +1455,7 @@ void DmuxPamfContext::exec(ppu_thread& ppu)
|
||||
// If the elementary stream of the found access unit is not enabled, don't do anything
|
||||
if (!es || es->_this.get_ptr() != es || es->es_id != event.au_found.user_data)
|
||||
{
|
||||
if (sys_mutex_unlock(ppu, mutex) != CELL_OK)
|
||||
if (lv2_syscall<sys_mutex_unlock>(ppu, mutex) != CELL_OK)
|
||||
{
|
||||
savestate = dmux_pamf_state::sending_fatal_err;
|
||||
continue;
|
||||
@ -1479,7 +1483,7 @@ void DmuxPamfContext::exec(ppu_thread& ppu)
|
||||
es->reset_next_au = false;
|
||||
}
|
||||
|
||||
if (sys_mutex_unlock(ppu, mutex) != CELL_OK)
|
||||
if (lv2_syscall<sys_mutex_unlock>(ppu, mutex) != CELL_OK)
|
||||
{
|
||||
savestate = dmux_pamf_state::sending_fatal_err;
|
||||
continue;
|
||||
@ -1505,7 +1509,7 @@ void DmuxPamfContext::exec(ppu_thread& ppu)
|
||||
es->au_specific_info[2] = read_from_ptr<bf_t<u8, 6, 2>>(event.au_found.stream_header_buf, 1);
|
||||
}
|
||||
|
||||
if (sys_mutex_unlock(ppu, mutex) != CELL_OK)
|
||||
if (lv2_syscall<sys_mutex_unlock>(ppu, mutex) != CELL_OK)
|
||||
{
|
||||
savestate = dmux_pamf_state::sending_fatal_err;
|
||||
continue;
|
||||
@ -1567,7 +1571,7 @@ void DmuxPamfContext::exec(ppu_thread& ppu)
|
||||
|
||||
if (lv2_syscall<sys_cond_signal_all>(ppu, cond) != CELL_OK)
|
||||
{
|
||||
sys_mutex_unlock(ppu, mutex);
|
||||
lv2_syscall<sys_mutex_unlock>(ppu, mutex);
|
||||
|
||||
savestate = dmux_pamf_state::sending_fatal_err;
|
||||
continue;
|
||||
@ -1576,7 +1580,7 @@ void DmuxPamfContext::exec(ppu_thread& ppu)
|
||||
RETURN_ON_CPU_FLAG_AGAIN();
|
||||
}
|
||||
|
||||
if (sys_mutex_unlock(ppu, mutex) != CELL_OK)
|
||||
if (lv2_syscall<sys_mutex_unlock>(ppu, mutex) != CELL_OK)
|
||||
{
|
||||
savestate = dmux_pamf_state::sending_fatal_err;
|
||||
continue;
|
||||
@ -1602,7 +1606,7 @@ void DmuxPamfContext::exec(ppu_thread& ppu)
|
||||
DmuxPamfElementaryStream* const es = find_es(event.flush_done.stream_id, event.flush_done.private_stream_id);
|
||||
const bool valid = es && es->_this.get_ptr() == es && es->es_id == event.flush_done.user_data;
|
||||
|
||||
if (sys_mutex_unlock(ppu, mutex) != CELL_OK)
|
||||
if (lv2_syscall<sys_mutex_unlock>(ppu, mutex) != CELL_OK)
|
||||
{
|
||||
savestate = dmux_pamf_state::sending_fatal_err;
|
||||
continue;
|
||||
@ -1672,7 +1676,7 @@ void DmuxPamfContext::exec(ppu_thread& ppu)
|
||||
ensure(cmd_result_queue.pop());
|
||||
}
|
||||
|
||||
if (sys_mutex_unlock(ppu, mutex) != CELL_OK)
|
||||
if (lv2_syscall<sys_mutex_unlock>(ppu, mutex) != CELL_OK)
|
||||
{
|
||||
savestate = dmux_pamf_state::sending_fatal_err;
|
||||
continue;
|
||||
@ -1978,8 +1982,8 @@ error_code DmuxPamfContext::open(ppu_thread& ppu, const CellDmuxPamfResource& re
|
||||
const vm::var<sys_mutex_attribute_t> mutex_attr = {{ SYS_SYNC_PRIORITY, SYS_SYNC_NOT_RECURSIVE, SYS_SYNC_NOT_PROCESS_SHARED, SYS_SYNC_NOT_ADAPTIVE, 0, 0, 0, { "_dxpmtx"_u64 } }};
|
||||
const vm::var<sys_cond_attribute_t> cond_attr = {{ SYS_SYNC_NOT_PROCESS_SHARED, 0, 0, { "_dxpcnd"_u64 } }};
|
||||
|
||||
if (sys_mutex_create(ppu, _this.ptr(&DmuxPamfContext::mutex), mutex_attr) != CELL_OK
|
||||
|| sys_cond_create(ppu, _this.ptr(&DmuxPamfContext::cond), _this->mutex, cond_attr) != CELL_OK)
|
||||
if (lv2_syscall<sys_mutex_create>(ppu, _this.ptr(&DmuxPamfContext::mutex), mutex_attr) != CELL_OK
|
||||
|| lv2_syscall<sys_cond_create>(ppu, _this.ptr(&DmuxPamfContext::cond), _this->mutex, cond_attr) != CELL_OK)
|
||||
{
|
||||
return CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
@ -2092,8 +2096,8 @@ error_code DmuxPamfContext::close(ppu_thread& ppu)
|
||||
g_fxo->get<lv2_memory_container>().free(0x40000);
|
||||
}
|
||||
|
||||
if (sys_cond_destroy(ppu, cond) != CELL_OK
|
||||
|| sys_mutex_destroy(ppu, mutex) != CELL_OK)
|
||||
if (lv2_syscall<sys_cond_destroy>(ppu, cond) != CELL_OK
|
||||
|| lv2_syscall<sys_mutex_destroy>(ppu, mutex) != CELL_OK)
|
||||
{
|
||||
return CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
@ -2144,7 +2148,7 @@ error_code DmuxPamfContext::reset_stream(ppu_thread& ppu)
|
||||
|
||||
if (sequence_state != DmuxPamfSequenceState::running)
|
||||
{
|
||||
return sys_mutex_unlock(ppu, mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
[[fallthrough]];
|
||||
@ -2164,7 +2168,7 @@ error_code DmuxPamfContext::reset_stream(ppu_thread& ppu)
|
||||
case 2:
|
||||
if (const error_code ret = lv2_syscall<sys_cond_signal_to>(ppu, cond, static_cast<u32>(thread_id)); ret != CELL_OK && ret != static_cast<s32>(CELL_EPERM))
|
||||
{
|
||||
sys_mutex_unlock(ppu, mutex);
|
||||
lv2_syscall<sys_mutex_unlock>(ppu, mutex);
|
||||
return CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
@ -2174,7 +2178,7 @@ error_code DmuxPamfContext::reset_stream(ppu_thread& ppu)
|
||||
return {};
|
||||
}
|
||||
|
||||
return sys_mutex_unlock(ppu, mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
|
||||
default:
|
||||
fmt::throw_exception("Unexpected savestate value: 0x%x", savestate);
|
||||
@ -2240,7 +2244,7 @@ error_code DmuxPamfContext::join_thread(ppu_thread& ppu)
|
||||
|
||||
send_spu_command_and_wait<DmuxPamfCommandType::close>(ppu, false);
|
||||
|
||||
if (sys_mutex_unlock(ppu, mutex) != CELL_OK)
|
||||
if (lv2_syscall<sys_mutex_unlock>(ppu, mutex) != CELL_OK)
|
||||
{
|
||||
return CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
@ -2293,7 +2297,7 @@ error_code DmuxPamfContext::set_stream(ppu_thread& ppu, vm::cptr<u8> stream_addr
|
||||
|
||||
if (!stream_info_queue.emplace(stream_address, stream_size, user_data, !discontinuity, raw_es))
|
||||
{
|
||||
return sys_mutex_unlock(ppu, mutex) == CELL_OK ? CELL_DMUX_PAMF_ERROR_BUSY : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK ? CELL_DMUX_PAMF_ERROR_BUSY : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2307,7 +2311,7 @@ error_code DmuxPamfContext::set_stream(ppu_thread& ppu, vm::cptr<u8> stream_addr
|
||||
|
||||
sequence_state = DmuxPamfSequenceState::running;
|
||||
|
||||
return sys_mutex_unlock(ppu, mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
template <bool raw_es>
|
||||
@ -2362,7 +2366,7 @@ error_code DmuxPamfElementaryStream::release_au(ppu_thread& ppu, vm::ptr<u8> au_
|
||||
case 2:
|
||||
if (const error_code ret = lv2_syscall<sys_cond_signal_to>(ppu, demuxer->cond, static_cast<u32>(demuxer->thread_id)); ret != CELL_OK && ret != static_cast<s32>(CELL_EPERM))
|
||||
{
|
||||
sys_mutex_unlock(ppu, demuxer->mutex);
|
||||
lv2_syscall<sys_mutex_unlock>(ppu, demuxer->mutex);
|
||||
return CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
@ -2372,7 +2376,7 @@ error_code DmuxPamfElementaryStream::release_au(ppu_thread& ppu, vm::ptr<u8> au_
|
||||
return {};
|
||||
}
|
||||
|
||||
return sys_mutex_unlock(ppu, demuxer->mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, demuxer->mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
|
||||
default:
|
||||
fmt::throw_exception("Unexpected savestate value: 0x%x", savestate);
|
||||
@ -2490,13 +2494,13 @@ error_code DmuxPamfContext::enable_es(ppu_thread& ppu, u16 stream_id, u16 privat
|
||||
|
||||
if (enabled_es_num == max_enabled_es_num)
|
||||
{
|
||||
return sys_mutex_unlock(ppu, mutex) == CELL_OK ? CELL_DMUX_PAMF_ERROR_NO_MEMORY : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK ? CELL_DMUX_PAMF_ERROR_NO_MEMORY : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
if (find_es(stream_id, private_stream_id))
|
||||
{
|
||||
// Elementary stream is already enabled
|
||||
return sys_mutex_unlock(ppu, mutex) == CELL_OK ? CELL_DMUX_PAMF_ERROR_ARG : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK ? CELL_DMUX_PAMF_ERROR_ARG : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2574,7 +2578,7 @@ error_code DmuxPamfContext::enable_es(ppu_thread& ppu, u16 stream_id, u16 privat
|
||||
|
||||
enabled_es_num++;
|
||||
|
||||
if (sys_mutex_unlock(ppu, mutex) != CELL_OK)
|
||||
if (lv2_syscall<sys_mutex_unlock>(ppu, mutex) != CELL_OK)
|
||||
{
|
||||
return CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
@ -2642,7 +2646,7 @@ error_code DmuxPamfElementaryStream::disable_es(ppu_thread& ppu)
|
||||
if (!dmux->find_es(stream_id, private_stream_id))
|
||||
{
|
||||
// Elementary stream is already disabled
|
||||
return sys_mutex_unlock(ppu, dmux->mutex) == CELL_OK ? CELL_DMUX_PAMF_ERROR_ARG : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, dmux->mutex) == CELL_OK ? CELL_DMUX_PAMF_ERROR_ARG : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
[[fallthrough]];
|
||||
@ -2675,7 +2679,7 @@ error_code DmuxPamfElementaryStream::disable_es(ppu_thread& ppu)
|
||||
case 2:
|
||||
if (const error_code ret = lv2_syscall<sys_cond_signal_to>(ppu, dmux->cond, static_cast<u32>(dmux->thread_id)); ret != CELL_OK && ret != static_cast<s32>(CELL_EPERM))
|
||||
{
|
||||
sys_mutex_unlock(ppu, dmux->mutex);
|
||||
lv2_syscall<sys_mutex_unlock>(ppu, dmux->mutex);
|
||||
return CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
@ -2685,7 +2689,7 @@ error_code DmuxPamfElementaryStream::disable_es(ppu_thread& ppu)
|
||||
return {};
|
||||
}
|
||||
|
||||
return sys_mutex_unlock(ppu, dmux->mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, dmux->mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
|
||||
default:
|
||||
fmt::throw_exception("Unexpected savestate value: 0x%x", savestate);
|
||||
@ -2732,7 +2736,7 @@ error_code DmuxPamfElementaryStream::flush_es(ppu_thread& ppu) const
|
||||
return {};
|
||||
}
|
||||
|
||||
return sys_mutex_unlock(ppu, demuxer->mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, demuxer->mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
error_code _CellDmuxCoreOpFlushEs(ppu_thread& ppu, vm::ptr<CellDmuxPamfEsHandle> esHandle)
|
||||
@ -2775,7 +2779,7 @@ error_code DmuxPamfElementaryStream::reset_es(ppu_thread& ppu) const
|
||||
return {};
|
||||
}
|
||||
|
||||
return sys_mutex_unlock(ppu, demuxer->mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, demuxer->mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
error_code _CellDmuxCoreOpResetEs(ppu_thread& ppu, vm::ptr<CellDmuxPamfEsHandle> esHandle)
|
||||
@ -2825,7 +2829,7 @@ error_code DmuxPamfContext::reset_stream_and_wait_done(ppu_thread& ppu)
|
||||
{
|
||||
if (lv2_syscall<sys_cond_wait>(ppu, cond, 0) != CELL_OK)
|
||||
{
|
||||
sys_mutex_unlock(ppu, mutex);
|
||||
lv2_syscall<sys_mutex_unlock>(ppu, mutex);
|
||||
return CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
@ -2835,7 +2839,7 @@ error_code DmuxPamfContext::reset_stream_and_wait_done(ppu_thread& ppu)
|
||||
}
|
||||
}
|
||||
|
||||
return sys_mutex_unlock(ppu, mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
return lv2_syscall<sys_mutex_unlock>(ppu, mutex) == CELL_OK ? static_cast<error_code>(CELL_OK) : CELL_DMUX_PAMF_ERROR_FATAL;
|
||||
}
|
||||
|
||||
error_code _CellDmuxCoreOpResetStreamAndWaitDone(ppu_thread& ppu, vm::ptr<CellDmuxPamfHandle> handle)
|
||||
|
||||
@ -64,7 +64,7 @@ extern const std::map<std::string_view, int> g_prx_list
|
||||
{ "libddpdec.sprx", 0 },
|
||||
{ "libdivxdec.sprx", 0 },
|
||||
{ "libdmux.sprx", 0 },
|
||||
{ "libdmuxpamf.sprx", 0 },
|
||||
{ "libdmuxpamf.sprx", 1 },
|
||||
{ "libdtslbrdec.sprx", 0 },
|
||||
{ "libfiber.sprx", 0 },
|
||||
{ "libfont.sprx", 0 },
|
||||
|
||||
Loading…
Reference in New Issue
Block a user