summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Markus Wick2021-04-07 13:57:49 +0200
committerGravatar Markus Wick2021-04-07 22:38:52 +0200
commite6fb49fa4bb2864702abcefc14f6bb62eaba7a7e (patch)
tree6cad1f7c35b6f9c539fbb9ad0fa2a1359a543a97
parentvideo_core/gpu_thread: Implement a ShutDown method. (diff)
downloadyuzu-e6fb49fa4bb2864702abcefc14f6bb62eaba7a7e.tar.gz
yuzu-e6fb49fa4bb2864702abcefc14f6bb62eaba7a7e.tar.xz
yuzu-e6fb49fa4bb2864702abcefc14f6bb62eaba7a7e.zip
video_core/gpu_thread: Keep the write lock for allocating the fence.
Else the fence might get submited out-of-order into the queue, which makes testing them pointless. Overhead should be tiny as the mutex is just moved from the queue to the writing code.
-rw-r--r--src/video_core/gpu_thread.cpp2
-rw-r--r--src/video_core/gpu_thread.h3
2 files changed, 4 insertions, 1 deletions
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 6b8f06f78..9488bf544 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -151,11 +151,13 @@ void ThreadManager::OnCommandListEnd() {
151} 151}
152 152
153u64 ThreadManager::PushCommand(CommandData&& command_data) { 153u64 ThreadManager::PushCommand(CommandData&& command_data) {
154 std::unique_lock lk(state.write_lock);
154 const u64 fence{++state.last_fence}; 155 const u64 fence{++state.last_fence};
155 state.queue.Push(CommandDataContainer(std::move(command_data), fence)); 156 state.queue.Push(CommandDataContainer(std::move(command_data), fence));
156 157
157 if (!is_async) { 158 if (!is_async) {
158 // In synchronous GPU mode, block the caller until the command has executed 159 // In synchronous GPU mode, block the caller until the command has executed
160 lk.unlock();
159 WaitIdle(); 161 WaitIdle();
160 } 162 }
161 163
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index d384164de..cb901c22a 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -101,7 +101,8 @@ struct CommandDataContainer {
101struct SynchState final { 101struct SynchState final {
102 std::atomic_bool is_running{true}; 102 std::atomic_bool is_running{true};
103 103
104 using CommandQueue = Common::MPSCQueue<CommandDataContainer>; 104 using CommandQueue = Common::SPSCQueue<CommandDataContainer>;
105 std::mutex write_lock;
105 CommandQueue queue; 106 CommandQueue queue;
106 u64 last_fence{}; 107 u64 last_fence{};
107 std::atomic<u64> signaled_fence{}; 108 std::atomic<u64> signaled_fence{};