summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar ReinUsesLisp2021-05-07 06:26:12 -0300
committerGravatar ameerj2021-07-22 21:51:30 -0400
commit36f158626726f940d9dba22a2b03ebbb5aa41c5e (patch)
tree8323e280f214fdf8c0b4d47303b47bd3d0a3fb0a /src
parentvk_query_cache: Wait before reading queries (diff)
downloadyuzu-36f158626726f940d9dba22a2b03ebbb5aa41c5e.tar.gz
yuzu-36f158626726f940d9dba22a2b03ebbb5aa41c5e.tar.xz
yuzu-36f158626726f940d9dba22a2b03ebbb5aa41c5e.zip
vk_scheduler: Use locks instead of SPSC a queue
This tries to fix a data race where we'd wait forever for the GPU.
Diffstat (limited to 'src')
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp58
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h16
2 files changed, 42 insertions, 32 deletions
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index fcb6a5911..4840962de 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -47,8 +47,11 @@ VKScheduler::VKScheduler(const Device& device_, StateTracker& state_tracker_)
47} 47}
48 48
49VKScheduler::~VKScheduler() { 49VKScheduler::~VKScheduler() {
50 quit = true; 50 {
51 cv.notify_all(); 51 std::lock_guard lock{work_mutex};
52 quit = true;
53 }
54 work_cv.notify_all();
52 worker_thread.join(); 55 worker_thread.join();
53} 56}
54 57
@@ -69,20 +72,19 @@ void VKScheduler::WaitWorker() {
69 MICROPROFILE_SCOPE(Vulkan_WaitForWorker); 72 MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
70 DispatchWork(); 73 DispatchWork();
71 74
72 bool finished = false; 75 std::unique_lock lock{work_mutex};
73 do { 76 wait_cv.wait(lock, [this] { return work_queue.empty(); });
74 cv.notify_all();
75 std::unique_lock lock{mutex};
76 finished = chunk_queue.Empty();
77 } while (!finished);
78} 77}
79 78
80void VKScheduler::DispatchWork() { 79void VKScheduler::DispatchWork() {
81 if (chunk->Empty()) { 80 if (chunk->Empty()) {
82 return; 81 return;
83 } 82 }
84 chunk_queue.Push(std::move(chunk)); 83 {
85 cv.notify_all(); 84 std::lock_guard lock{work_mutex};
85 work_queue.push(std::move(chunk));
86 }
87 work_cv.notify_one();
86 AcquireNewChunk(); 88 AcquireNewChunk();
87} 89}
88 90
@@ -135,22 +137,27 @@ bool VKScheduler::UpdateGraphicsPipeline(GraphicsPipeline* pipeline) {
135 137
136void VKScheduler::WorkerThread() { 138void VKScheduler::WorkerThread() {
137 Common::SetCurrentThreadName("yuzu:VulkanWorker"); 139 Common::SetCurrentThreadName("yuzu:VulkanWorker");
138 std::unique_lock lock{mutex};
139 do { 140 do {
140 cv.wait(lock, [this] { return !chunk_queue.Empty() || quit; }); 141 if (work_queue.empty()) {
141 if (quit) { 142 wait_cv.notify_all();
142 continue;
143 } 143 }
144 while (!chunk_queue.Empty()) { 144 std::unique_ptr<CommandChunk> work;
145 auto extracted_chunk = std::move(chunk_queue.Front()); 145 {
146 chunk_queue.Pop(); 146 std::unique_lock lock{work_mutex};
147 const bool has_submit = extracted_chunk->HasSubmit(); 147 work_cv.wait(lock, [this] { return !work_queue.empty() || quit; });
148 extracted_chunk->ExecuteAll(current_cmdbuf); 148 if (quit) {
149 if (has_submit) { 149 continue;
150 AllocateWorkerCommandBuffer();
151 } 150 }
152 chunk_reserve.Push(std::move(extracted_chunk)); 151 work = std::move(work_queue.front());
152 work_queue.pop();
153 }
154 const bool has_submit = work->HasSubmit();
155 work->ExecuteAll(current_cmdbuf);
156 if (has_submit) {
157 AllocateWorkerCommandBuffer();
153 } 158 }
159 std::lock_guard reserve_lock{reserve_mutex};
160 chunk_reserve.push_back(std::move(work));
154 } while (!quit); 161 } while (!quit);
155} 162}
156 163
@@ -269,12 +276,13 @@ void VKScheduler::EndRenderPass() {
269} 276}
270 277
271void VKScheduler::AcquireNewChunk() { 278void VKScheduler::AcquireNewChunk() {
272 if (chunk_reserve.Empty()) { 279 std::lock_guard lock{reserve_mutex};
280 if (chunk_reserve.empty()) {
273 chunk = std::make_unique<CommandChunk>(); 281 chunk = std::make_unique<CommandChunk>();
274 return; 282 return;
275 } 283 }
276 chunk = std::move(chunk_reserve.Front()); 284 chunk = std::move(chunk_reserve.back());
277 chunk_reserve.Pop(); 285 chunk_reserve.pop_back();
278} 286}
279 287
280} // namespace Vulkan 288} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index 40215c4c5..6600fb142 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -6,14 +6,14 @@
6 6
7#include <atomic> 7#include <atomic>
8#include <condition_variable> 8#include <condition_variable>
9#include <queue>
9#include <cstddef> 10#include <cstddef>
10#include <memory> 11#include <memory>
11#include <stack>
12#include <thread> 12#include <thread>
13#include <utility> 13#include <utility>
14
14#include "common/alignment.h" 15#include "common/alignment.h"
15#include "common/common_types.h" 16#include "common/common_types.h"
16#include "common/threadsafe_queue.h"
17#include "video_core/renderer_vulkan/vk_master_semaphore.h" 17#include "video_core/renderer_vulkan/vk_master_semaphore.h"
18#include "video_core/vulkan_common/vulkan_wrapper.h" 18#include "video_core/vulkan_common/vulkan_wrapper.h"
19 19
@@ -220,11 +220,13 @@ private:
220 std::array<VkImage, 9> renderpass_images{}; 220 std::array<VkImage, 9> renderpass_images{};
221 std::array<VkImageSubresourceRange, 9> renderpass_image_ranges{}; 221 std::array<VkImageSubresourceRange, 9> renderpass_image_ranges{};
222 222
223 Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_queue; 223 std::queue<std::unique_ptr<CommandChunk>> work_queue;
224 Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_reserve; 224 std::vector<std::unique_ptr<CommandChunk>> chunk_reserve;
225 std::mutex mutex; 225 std::mutex reserve_mutex;
226 std::condition_variable cv; 226 std::mutex work_mutex;
227 bool quit = false; 227 std::condition_variable work_cv;
228 std::condition_variable wait_cv;
229 std::atomic_bool quit{};
228}; 230};
229 231
230} // namespace Vulkan 232} // namespace Vulkan