Common: Add AtomicMutex and SpinMutex classes as faster alternatives to std::mutex.

This commit is contained in:
Jordan Woyak 2025-10-31 19:42:33 -05:00
parent fc9f25a418
commit 46369ef1b1
6 changed files with 157 additions and 0 deletions

View File

@ -111,6 +111,7 @@ add_library(common
MinizipUtil.h
MsgHandler.cpp
MsgHandler.h
Mutex.h
NandPaths.cpp
NandPaths.h
Network.cpp

View File

@ -0,0 +1,51 @@
// Copyright 2025 Dolphin Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <atomic>
namespace Common
{
namespace detail
{
template <bool UseAtomicWait>
class AtomicMutexBase
{
public:
void lock()
{
while (m_lock.exchange(true, std::memory_order_acquire))
{
if constexpr (UseAtomicWait)
m_lock.wait(true, std::memory_order_relaxed);
}
}
bool try_lock()
{
bool expected = false;
return m_lock.compare_exchange_weak(expected, true, std::memory_order_acquire,
std::memory_order_relaxed);
}
// Unlike with std::mutex, this call may come from any thread.
void unlock()
{
m_lock.store(false, std::memory_order_release);
if constexpr (UseAtomicWait)
m_lock.notify_one();
}
private:
std::atomic_bool m_lock{};
};
} // namespace detail
// Sometimes faster than std::mutex.
using AtomicMutex = detail::AtomicMutexBase<true>;
// Very fast to lock and unlock when uncontested (~3x faster than std::mutex).
using SpinMutex = detail::AtomicMutexBase<false>;
} // namespace Common

View File

@ -145,6 +145,7 @@
<ClInclude Include="Common\MemoryUtil.h" />
<ClInclude Include="Common\MinizipUtil.h" />
<ClInclude Include="Common\MsgHandler.h" />
<ClInclude Include="Common\Mutex.h" />
<ClInclude Include="Common\NandPaths.h" />
<ClInclude Include="Common\Network.h" />
<ClInclude Include="Common\PcapFile.h" />

View File

@ -14,6 +14,7 @@ add_dolphin_test(FixedSizeQueueTest FixedSizeQueueTest.cpp)
add_dolphin_test(FlagTest FlagTest.cpp)
add_dolphin_test(FloatUtilsTest FloatUtilsTest.cpp)
add_dolphin_test(MathUtilTest MathUtilTest.cpp)
add_dolphin_test(MutexTest MutexTest.cpp)
add_dolphin_test(NandPathsTest NandPathsTest.cpp)
add_dolphin_test(SettingsHandlerTest SettingsHandlerTest.cpp)
add_dolphin_test(SPSCQueueTest SPSCQueueTest.cpp)

View File

@ -0,0 +1,102 @@
// Copyright 2025 Dolphin Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <gtest/gtest.h>
#include <algorithm>
#include <chrono>
#include <mutex>
#include <thread>
#include "Common/Mutex.h"
template <typename MutexType>
static void DoAtomicMutexTests(const char mutex_name[])
{
MutexType work_mutex;
bool worker_done = false;
static constexpr auto SLEEP_TIME = std::chrono::microseconds{1};
// lock() on main thread, unlock() on worker thread.
std::thread thread{[&, lk = std::unique_lock{work_mutex}] {
std::this_thread::sleep_for(SLEEP_TIME);
worker_done = true;
}};
// lock() waits for the thread to unlock().
{
std::lock_guard lk{work_mutex};
EXPECT_TRUE(worker_done);
}
thread.join();
// Prevent below workers from incrementing `done_count`.
MutexType done_mutex;
std::unique_lock done_lk{done_mutex};
// try_lock() fails when holding a lock.
EXPECT_FALSE(done_mutex.try_lock());
static constexpr int THREAD_COUNT = 4;
static constexpr int REPEAT_COUNT = 100;
static constexpr int TOTAL_ITERATIONS = THREAD_COUNT * REPEAT_COUNT;
int done_count = 0;
int work_count = 0;
std::atomic<int> try_lock_fail_count{};
std::vector<std::thread> threads(THREAD_COUNT);
for (auto& t : threads)
{
t = std::thread{[&] {
// lock() blocks until main thread unlock()s.
{
std::lock_guard lk{done_mutex};
++done_count;
}
// Contesting lock() and try_lock() doesn't explode.
for (int i = 0; i != REPEAT_COUNT; ++i)
{
{
std::lock_guard lk{work_mutex};
++work_count;
}
// Try lock in a loop.
while (!work_mutex.try_lock())
{
try_lock_fail_count.fetch_add(1, std::memory_order_relaxed);
}
std::lock_guard lk{work_mutex, std::adopt_lock};
++work_count;
}
}};
}
std::this_thread::sleep_for(SLEEP_TIME);
// The threads are still blocking on done_mutex.
EXPECT_EQ(done_count, 0);
done_lk.unlock();
std::ranges::for_each(threads, &std::thread::join);
// The threads finished.
EXPECT_EQ(done_count, THREAD_COUNT);
EXPECT_EQ(work_count, TOTAL_ITERATIONS * 2);
GTEST_LOG_(INFO) << mutex_name << "::try_lock() failure %: "
<< (try_lock_fail_count * 100.0 / (TOTAL_ITERATIONS + try_lock_fail_count));
// Things are still sane after contesting in worker threads.
std::lock_guard lk{work_mutex};
}
TEST(Mutex, AtomicMutex)
{
DoAtomicMutexTests<Common::AtomicMutex>("AtomicMutex");
DoAtomicMutexTests<Common::SpinMutex>("SpinMutex");
}

View File

@ -53,6 +53,7 @@
<ClCompile Include="Common\FlagTest.cpp" />
<ClCompile Include="Common\FloatUtilsTest.cpp" />
<ClCompile Include="Common\MathUtilTest.cpp" />
<ClCompile Include="Common\MutexTest.cpp" />
<ClCompile Include="Common\NandPathsTest.cpp" />
<ClCompile Include="Common\SettingsHandlerTest.cpp" />
<ClCompile Include="Common\SPSCQueueTest.cpp" />