summaryrefslogtreecommitdiff
path: root/src/video_core/fence_manager.h
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2022-02-06 01:16:11 +0100
committerGravatar Fernando Sahmkow2022-10-06 21:00:52 +0200
commitbc8b3d225eda388f0603830cbff8357893abb0f9 (patch)
tree479b41b73913feceeeb0c9c6f3147d6491c0fa04 /src/video_core/fence_manager.h
parentMemoryManager: initial multi paging system implementation. (diff)
downloadyuzu-bc8b3d225eda388f0603830cbff8357893abb0f9.tar.gz
yuzu-bc8b3d225eda388f0603830cbff8357893abb0f9.tar.xz
yuzu-bc8b3d225eda388f0603830cbff8357893abb0f9.zip
VideoCore: Refactor fencing system.
Diffstat (limited to 'src/video_core/fence_manager.h')
-rw-r--r--src/video_core/fence_manager.h96
1 files changed, 39 insertions, 57 deletions
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
index 03a70e5e0..c390ac91b 100644
--- a/src/video_core/fence_manager.h
+++ b/src/video_core/fence_manager.h
@@ -5,6 +5,8 @@
5 5
6#include <algorithm> 6#include <algorithm>
7#include <cstring> 7#include <cstring>
8#include <deque>
9#include <functional>
8#include <memory> 10#include <memory>
9#include <queue> 11#include <queue>
10 12
@@ -19,28 +21,7 @@ namespace VideoCommon {
19 21
20class FenceBase { 22class FenceBase {
21public: 23public:
22 explicit FenceBase(u32 payload_, bool is_stubbed_) 24 explicit FenceBase(bool is_stubbed_) : is_stubbed{is_stubbed_} {}
23 : address{}, payload{payload_}, is_semaphore{false}, is_stubbed{is_stubbed_} {}
24
25 explicit FenceBase(u8* address_, u32 payload_, bool is_stubbed_)
26 : address{address_}, payload{payload_}, is_semaphore{true}, is_stubbed{is_stubbed_} {}
27
28 u8* GetAddress() const {
29 return address;
30 }
31
32 u32 GetPayload() const {
33 return payload;
34 }
35
36 bool IsSemaphore() const {
37 return is_semaphore;
38 }
39
40private:
41 u8* address;
42 u32 payload;
43 bool is_semaphore;
44 25
45protected: 26protected:
46 bool is_stubbed; 27 bool is_stubbed;
@@ -60,31 +41,28 @@ public:
60 buffer_cache.AccumulateFlushes(); 41 buffer_cache.AccumulateFlushes();
61 } 42 }
62 43
63 void SignalSemaphore(u8* addr, u32 value) { 44 void SyncOperation(std::function<void()>&& func) {
45 uncommitted_operations.emplace_back(std::move(func));
46 }
47
48 void SignalFence(std::function<void()>&& func) {
64 TryReleasePendingFences(); 49 TryReleasePendingFences();
65 const bool should_flush = ShouldFlush(); 50 const bool should_flush = ShouldFlush();
66 CommitAsyncFlushes(); 51 CommitAsyncFlushes();
67 TFence new_fence = CreateFence(addr, value, !should_flush); 52 uncommitted_operations.emplace_back(std::move(func));
53 CommitOperations();
54 TFence new_fence = CreateFence(!should_flush);
68 fences.push(new_fence); 55 fences.push(new_fence);
69 QueueFence(new_fence); 56 QueueFence(new_fence);
70 if (should_flush) { 57 if (should_flush) {
71 rasterizer.FlushCommands(); 58 rasterizer.FlushCommands();
72 } 59 }
73 rasterizer.SyncGuestHost();
74 } 60 }
75 61
76 void SignalSyncPoint(u32 value) { 62 void SignalSyncPoint(u32 value) {
77 syncpoint_manager.IncrementGuest(value); 63 syncpoint_manager.IncrementGuest(value);
78 TryReleasePendingFences(); 64 std::function<void()> func([this, value] { syncpoint_manager.IncrementHost(value); });
79 const bool should_flush = ShouldFlush(); 65 SignalFence(std::move(func));
80 CommitAsyncFlushes();
81 TFence new_fence = CreateFence(value, !should_flush);
82 fences.push(new_fence);
83 QueueFence(new_fence);
84 if (should_flush) {
85 rasterizer.FlushCommands();
86 }
87 rasterizer.SyncGuestHost();
88 } 66 }
89 67
90 void WaitPendingFences() { 68 void WaitPendingFences() {
@@ -94,12 +72,10 @@ public:
94 WaitFence(current_fence); 72 WaitFence(current_fence);
95 } 73 }
96 PopAsyncFlushes(); 74 PopAsyncFlushes();
97 if (current_fence->IsSemaphore()) { 75 auto operations = std::move(pending_operations.front());
98 char* address = reinterpret_cast<char*>(current_fence->GetAddress()); 76 pending_operations.pop_front();
99 auto payload = current_fence->GetPayload(); 77 for (auto& operation : operations) {
100 std::memcpy(address, &payload, sizeof(payload)); 78 operation();
101 } else {
102 syncpoint_manager.IncrementHost(current_fence->GetPayload());
103 } 79 }
104 PopFence(); 80 PopFence();
105 } 81 }
@@ -114,11 +90,9 @@ protected:
114 90
115 virtual ~FenceManager() = default; 91 virtual ~FenceManager() = default;
116 92
117 /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is 93 /// Creates a Fence Interface, does not create a backend fence if 'is_stubbed' is
118 /// true 94 /// true
119 virtual TFence CreateFence(u32 value, bool is_stubbed) = 0; 95 virtual TFence CreateFence(bool is_stubbed) = 0;
120 /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true
121 virtual TFence CreateFence(u8* addr, u32 value, bool is_stubbed) = 0;
122 /// Queues a fence into the backend if the fence isn't stubbed. 96 /// Queues a fence into the backend if the fence isn't stubbed.
123 virtual void QueueFence(TFence& fence) = 0; 97 virtual void QueueFence(TFence& fence) = 0;
124 /// Notifies that the backend fence has been signaled/reached in host GPU. 98 /// Notifies that the backend fence has been signaled/reached in host GPU.
@@ -141,12 +115,10 @@ private:
141 return; 115 return;
142 } 116 }
143 PopAsyncFlushes(); 117 PopAsyncFlushes();
144 if (current_fence->IsSemaphore()) { 118 auto operations = std::move(pending_operations.front());
145 char* address = reinterpret_cast<char*>(current_fence->GetAddress()); 119 pending_operations.pop_front();
146 const auto payload = current_fence->GetPayload(); 120 for (auto& operation : operations) {
147 std::memcpy(address, &payload, sizeof(payload)); 121 operation();
148 } else {
149 syncpoint_manager.IncrementHost(current_fence->GetPayload());
150 } 122 }
151 PopFence(); 123 PopFence();
152 } 124 }
@@ -165,16 +137,20 @@ private:
165 } 137 }
166 138
167 void PopAsyncFlushes() { 139 void PopAsyncFlushes() {
168 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 140 {
169 texture_cache.PopAsyncFlushes(); 141 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
170 buffer_cache.PopAsyncFlushes(); 142 texture_cache.PopAsyncFlushes();
143 buffer_cache.PopAsyncFlushes();
144 }
171 query_cache.PopAsyncFlushes(); 145 query_cache.PopAsyncFlushes();
172 } 146 }
173 147
174 void CommitAsyncFlushes() { 148 void CommitAsyncFlushes() {
175 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 149 {
176 texture_cache.CommitAsyncFlushes(); 150 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
177 buffer_cache.CommitAsyncFlushes(); 151 texture_cache.CommitAsyncFlushes();
152 buffer_cache.CommitAsyncFlushes();
153 }
178 query_cache.CommitAsyncFlushes(); 154 query_cache.CommitAsyncFlushes();
179 } 155 }
180 156
@@ -183,7 +159,13 @@ private:
183 fences.pop(); 159 fences.pop();
184 } 160 }
185 161
162 void CommitOperations() {
163 pending_operations.emplace_back(std::move(uncommitted_operations));
164 }
165
186 std::queue<TFence> fences; 166 std::queue<TFence> fences;
167 std::deque<std::function<void()>> uncommitted_operations;
168 std::deque<std::deque<std::function<void()>>> pending_operations;
187 169
188 DelayedDestructionRing<TFence, 6> delayed_destruction_ring; 170 DelayedDestructionRing<TFence, 6> delayed_destruction_ring;
189}; 171};