diff options
| author | 2020-04-16 12:29:53 -0400 | |
|---|---|---|
| committer | 2020-04-22 11:36:24 -0400 | |
| commit | f616dc0b591b783b3fb75ca89633f1c26cce05a9 (patch) | |
| tree | 43a9c2052c5ceaad8cf6a69173b0817e54cc8f42 /src | |
| parent | Fix GCC error. (diff) | |
| download | yuzu-f616dc0b591b783b3fb75ca89633f1c26cce05a9.tar.gz yuzu-f616dc0b591b783b3fb75ca89633f1c26cce05a9.tar.xz yuzu-f616dc0b591b783b3fb75ca89633f1c26cce05a9.zip | |
Address Feedback.
Diffstat (limited to 'src')
| -rw-r--r-- | src/video_core/buffer_cache/buffer_cache.h | 56 | ||||
| -rw-r--r-- | src/video_core/fence_manager.h | 72 | ||||
| -rw-r--r-- | src/video_core/gpu.cpp | 4 | ||||
| -rw-r--r-- | src/video_core/gpu.h | 12 | ||||
| -rw-r--r-- | src/video_core/query_cache.h | 39 | ||||
| -rw-r--r-- | src/video_core/rasterizer_interface.h | 1 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/gl_fence_manager.cpp | 2 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/gl_fence_manager.h | 2 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/gl_rasterizer.cpp | 5 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_fence_manager.cpp | 2 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_fence_manager.h | 2 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_rasterizer.cpp | 2 | ||||
| -rw-r--r-- | src/video_core/texture_cache/texture_cache.h | 50 |
13 files changed, 117 insertions, 132 deletions
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 372545080..f3aa35295 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h | |||
| @@ -154,12 +154,9 @@ public: | |||
| 154 | std::lock_guard lock{mutex}; | 154 | std::lock_guard lock{mutex}; |
| 155 | 155 | ||
| 156 | std::vector<MapInterval> objects = GetMapsInRange(addr, size); | 156 | std::vector<MapInterval> objects = GetMapsInRange(addr, size); |
| 157 | for (auto& object : objects) { | 157 | return std::any_of(objects.begin(), objects.end(), [](const MapInterval& map) { |
| 158 | if (object->IsModified() && object->IsRegistered()) { | 158 | return map->IsModified() && map->IsRegistered(); |
| 159 | return true; | 159 | }); |
| 160 | } | ||
| 161 | } | ||
| 162 | return false; | ||
| 163 | } | 160 | } |
| 164 | 161 | ||
| 165 | /// Mark the specified region as being invalidated | 162 | /// Mark the specified region as being invalidated |
| @@ -199,9 +196,9 @@ public: | |||
| 199 | } | 196 | } |
| 200 | 197 | ||
| 201 | void CommitAsyncFlushes() { | 198 | void CommitAsyncFlushes() { |
| 202 | if (uncommited_flushes) { | 199 | if (uncommitted_flushes) { |
| 203 | auto commit_list = std::make_shared<std::list<MapInterval>>(); | 200 | auto commit_list = std::make_shared<std::list<MapInterval>>(); |
| 204 | for (auto& map : *uncommited_flushes) { | 201 | for (auto& map : *uncommitted_flushes) { |
| 205 | if (map->IsRegistered() && map->IsModified()) { | 202 | if (map->IsRegistered() && map->IsModified()) { |
| 206 | // TODO(Blinkhawk): Implement backend asynchronous flushing | 203 | // TODO(Blinkhawk): Implement backend asynchronous flushing |
| 207 | // AsyncFlushMap(map) | 204 | // AsyncFlushMap(map) |
| @@ -209,41 +206,34 @@ public: | |||
| 209 | } | 206 | } |
| 210 | } | 207 | } |
| 211 | if (!commit_list->empty()) { | 208 | if (!commit_list->empty()) { |
| 212 | commited_flushes.push_back(commit_list); | 209 | committed_flushes.push_back(commit_list); |
| 213 | } else { | 210 | } else { |
| 214 | commited_flushes.emplace_back(); | 211 | committed_flushes.emplace_back(); |
| 215 | } | 212 | } |
| 216 | } else { | 213 | } else { |
| 217 | commited_flushes.emplace_back(); | 214 | committed_flushes.emplace_back(); |
| 218 | } | 215 | } |
| 219 | uncommited_flushes.reset(); | 216 | uncommitted_flushes.reset(); |
| 220 | } | 217 | } |
| 221 | 218 | ||
| 222 | bool ShouldWaitAsyncFlushes() { | 219 | bool ShouldWaitAsyncFlushes() const { |
| 223 | if (commited_flushes.empty()) { | 220 | if (committed_flushes.empty()) { |
| 224 | return false; | ||
| 225 | } | ||
| 226 | auto& flush_list = commited_flushes.front(); | ||
| 227 | if (!flush_list) { | ||
| 228 | return false; | 221 | return false; |
| 229 | } | 222 | } |
| 230 | return true; | 223 | return committed_flushes.front() != nullptr; |
| 231 | } | 224 | } |
| 232 | 225 | ||
| 233 | bool HasUncommitedFlushes() { | 226 | bool HasUncommittedFlushes() const { |
| 234 | if (uncommited_flushes) { | 227 | return uncommitted_flushes != nullptr; |
| 235 | return true; | ||
| 236 | } | ||
| 237 | return false; | ||
| 238 | } | 228 | } |
| 239 | 229 | ||
| 240 | void PopAsyncFlushes() { | 230 | void PopAsyncFlushes() { |
| 241 | if (commited_flushes.empty()) { | 231 | if (committed_flushes.empty()) { |
| 242 | return; | 232 | return; |
| 243 | } | 233 | } |
| 244 | auto& flush_list = commited_flushes.front(); | 234 | auto& flush_list = committed_flushes.front(); |
| 245 | if (!flush_list) { | 235 | if (!flush_list) { |
| 246 | commited_flushes.pop_front(); | 236 | committed_flushes.pop_front(); |
| 247 | return; | 237 | return; |
| 248 | } | 238 | } |
| 249 | for (MapInterval& map : *flush_list) { | 239 | for (MapInterval& map : *flush_list) { |
| @@ -252,7 +242,7 @@ public: | |||
| 252 | FlushMap(map); | 242 | FlushMap(map); |
| 253 | } | 243 | } |
| 254 | } | 244 | } |
| 255 | commited_flushes.pop_front(); | 245 | committed_flushes.pop_front(); |
| 256 | } | 246 | } |
| 257 | 247 | ||
| 258 | virtual BufferType GetEmptyBuffer(std::size_t size) = 0; | 248 | virtual BufferType GetEmptyBuffer(std::size_t size) = 0; |
| @@ -568,10 +558,10 @@ private: | |||
| 568 | } | 558 | } |
| 569 | 559 | ||
| 570 | void MarkForAsyncFlush(MapInterval& map) { | 560 | void MarkForAsyncFlush(MapInterval& map) { |
| 571 | if (!uncommited_flushes) { | 561 | if (!uncommitted_flushes) { |
| 572 | uncommited_flushes = std::make_shared<std::unordered_set<MapInterval>>(); | 562 | uncommitted_flushes = std::make_shared<std::unordered_set<MapInterval>>(); |
| 573 | } | 563 | } |
| 574 | uncommited_flushes->insert(map); | 564 | uncommitted_flushes->insert(map); |
| 575 | } | 565 | } |
| 576 | 566 | ||
| 577 | VideoCore::RasterizerInterface& rasterizer; | 567 | VideoCore::RasterizerInterface& rasterizer; |
| @@ -605,8 +595,8 @@ private: | |||
| 605 | std::vector<u8> staging_buffer; | 595 | std::vector<u8> staging_buffer; |
| 606 | std::list<MapInterval> marked_for_unregister; | 596 | std::list<MapInterval> marked_for_unregister; |
| 607 | 597 | ||
| 608 | std::shared_ptr<std::unordered_set<MapInterval>> uncommited_flushes{}; | 598 | std::shared_ptr<std::unordered_set<MapInterval>> uncommitted_flushes{}; |
| 609 | std::list<std::shared_ptr<std::list<MapInterval>>> commited_flushes; | 599 | std::list<std::shared_ptr<std::list<MapInterval>>> committed_flushes; |
| 610 | 600 | ||
| 611 | std::recursive_mutex mutex; | 601 | std::recursive_mutex mutex; |
| 612 | }; | 602 | }; |
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h index 99a138b5b..9fe9c1bf2 100644 --- a/src/video_core/fence_manager.h +++ b/src/video_core/fence_manager.h | |||
| @@ -28,15 +28,15 @@ public: | |||
| 28 | FenceBase(GPUVAddr address, u32 payload, bool is_stubbed) | 28 | FenceBase(GPUVAddr address, u32 payload, bool is_stubbed) |
| 29 | : address{address}, payload{payload}, is_semaphore{true}, is_stubbed{is_stubbed} {} | 29 | : address{address}, payload{payload}, is_semaphore{true}, is_stubbed{is_stubbed} {} |
| 30 | 30 | ||
| 31 | constexpr GPUVAddr GetAddress() const { | 31 | GPUVAddr GetAddress() const { |
| 32 | return address; | 32 | return address; |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | constexpr u32 GetPayload() const { | 35 | u32 GetPayload() const { |
| 36 | return payload; | 36 | return payload; |
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | constexpr bool IsSemaphore() const { | 39 | bool IsSemaphore() const { |
| 40 | return is_semaphore; | 40 | return is_semaphore; |
| 41 | } | 41 | } |
| 42 | 42 | ||
| @@ -54,12 +54,8 @@ class FenceManager { | |||
| 54 | public: | 54 | public: |
| 55 | void SignalSemaphore(GPUVAddr addr, u32 value) { | 55 | void SignalSemaphore(GPUVAddr addr, u32 value) { |
| 56 | TryReleasePendingFences(); | 56 | TryReleasePendingFences(); |
| 57 | bool should_flush = texture_cache.HasUncommitedFlushes(); | 57 | bool should_flush = ShouldFlush(); |
| 58 | should_flush |= buffer_cache.HasUncommitedFlushes(); | 58 | CommitAsyncFlushes(); |
| 59 | should_flush |= query_cache.HasUncommitedFlushes(); | ||
| 60 | texture_cache.CommitAsyncFlushes(); | ||
| 61 | buffer_cache.CommitAsyncFlushes(); | ||
| 62 | query_cache.CommitAsyncFlushes(); | ||
| 63 | TFence new_fence = CreateFence(addr, value, !should_flush); | 59 | TFence new_fence = CreateFence(addr, value, !should_flush); |
| 64 | fences.push(new_fence); | 60 | fences.push(new_fence); |
| 65 | QueueFence(new_fence); | 61 | QueueFence(new_fence); |
| @@ -71,12 +67,8 @@ public: | |||
| 71 | 67 | ||
| 72 | void SignalSyncPoint(u32 value) { | 68 | void SignalSyncPoint(u32 value) { |
| 73 | TryReleasePendingFences(); | 69 | TryReleasePendingFences(); |
| 74 | bool should_flush = texture_cache.HasUncommitedFlushes(); | 70 | bool should_flush = ShouldFlush(); |
| 75 | should_flush |= buffer_cache.HasUncommitedFlushes(); | 71 | CommitAsyncFlushes(); |
| 76 | should_flush |= query_cache.HasUncommitedFlushes(); | ||
| 77 | texture_cache.CommitAsyncFlushes(); | ||
| 78 | buffer_cache.CommitAsyncFlushes(); | ||
| 79 | query_cache.CommitAsyncFlushes(); | ||
| 80 | TFence new_fence = CreateFence(value, !should_flush); | 72 | TFence new_fence = CreateFence(value, !should_flush); |
| 81 | fences.push(new_fence); | 73 | fences.push(new_fence); |
| 82 | QueueFence(new_fence); | 74 | QueueFence(new_fence); |
| @@ -89,15 +81,10 @@ public: | |||
| 89 | void WaitPendingFences() { | 81 | void WaitPendingFences() { |
| 90 | while (!fences.empty()) { | 82 | while (!fences.empty()) { |
| 91 | TFence& current_fence = fences.front(); | 83 | TFence& current_fence = fences.front(); |
| 92 | bool should_wait = texture_cache.ShouldWaitAsyncFlushes(); | 84 | if (ShouldWait()) { |
| 93 | should_wait |= buffer_cache.ShouldWaitAsyncFlushes(); | ||
| 94 | should_wait |= query_cache.ShouldWaitAsyncFlushes(); | ||
| 95 | if (should_wait) { | ||
| 96 | WaitFence(current_fence); | 85 | WaitFence(current_fence); |
| 97 | } | 86 | } |
| 98 | texture_cache.PopAsyncFlushes(); | 87 | PopAsyncFlushes(); |
| 99 | buffer_cache.PopAsyncFlushes(); | ||
| 100 | query_cache.PopAsyncFlushes(); | ||
| 101 | auto& gpu{system.GPU()}; | 88 | auto& gpu{system.GPU()}; |
| 102 | if (current_fence->IsSemaphore()) { | 89 | if (current_fence->IsSemaphore()) { |
| 103 | auto& memory_manager{gpu.MemoryManager()}; | 90 | auto& memory_manager{gpu.MemoryManager()}; |
| @@ -116,10 +103,18 @@ protected: | |||
| 116 | : system{system}, rasterizer{rasterizer}, texture_cache{texture_cache}, | 103 | : system{system}, rasterizer{rasterizer}, texture_cache{texture_cache}, |
| 117 | buffer_cache{buffer_cache}, query_cache{query_cache} {} | 104 | buffer_cache{buffer_cache}, query_cache{query_cache} {} |
| 118 | 105 | ||
| 106 | virtual ~FenceManager() {} | ||
| 107 | |||
| 108 | /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is | ||
| 109 | /// true | ||
| 119 | virtual TFence CreateFence(u32 value, bool is_stubbed) = 0; | 110 | virtual TFence CreateFence(u32 value, bool is_stubbed) = 0; |
| 111 | /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true | ||
| 120 | virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0; | 112 | virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0; |
| 113 | /// Queues a fence into the backend if the fence isn't stubbed. | ||
| 121 | virtual void QueueFence(TFence& fence) = 0; | 114 | virtual void QueueFence(TFence& fence) = 0; |
| 122 | virtual bool IsFenceSignaled(TFence& fence) = 0; | 115 | /// Notifies that the backend fence has been signaled/reached in host GPU. |
| 116 | virtual bool IsFenceSignaled(TFence& fence) const = 0; | ||
| 117 | /// Waits until a fence has been signalled by the host GPU. | ||
| 123 | virtual void WaitFence(TFence& fence) = 0; | 118 | virtual void WaitFence(TFence& fence) = 0; |
| 124 | 119 | ||
| 125 | Core::System& system; | 120 | Core::System& system; |
| @@ -132,15 +127,10 @@ private: | |||
| 132 | void TryReleasePendingFences() { | 127 | void TryReleasePendingFences() { |
| 133 | while (!fences.empty()) { | 128 | while (!fences.empty()) { |
| 134 | TFence& current_fence = fences.front(); | 129 | TFence& current_fence = fences.front(); |
| 135 | bool should_wait = texture_cache.ShouldWaitAsyncFlushes(); | 130 | if (ShouldWait() && !IsFenceSignaled(current_fence)) { |
| 136 | should_wait |= buffer_cache.ShouldWaitAsyncFlushes(); | ||
| 137 | should_wait |= query_cache.ShouldWaitAsyncFlushes(); | ||
| 138 | if (should_wait && !IsFenceSignaled(current_fence)) { | ||
| 139 | return; | 131 | return; |
| 140 | } | 132 | } |
| 141 | texture_cache.PopAsyncFlushes(); | 133 | PopAsyncFlushes(); |
| 142 | buffer_cache.PopAsyncFlushes(); | ||
| 143 | query_cache.PopAsyncFlushes(); | ||
| 144 | auto& gpu{system.GPU()}; | 134 | auto& gpu{system.GPU()}; |
| 145 | if (current_fence->IsSemaphore()) { | 135 | if (current_fence->IsSemaphore()) { |
| 146 | auto& memory_manager{gpu.MemoryManager()}; | 136 | auto& memory_manager{gpu.MemoryManager()}; |
| @@ -152,6 +142,28 @@ private: | |||
| 152 | } | 142 | } |
| 153 | } | 143 | } |
| 154 | 144 | ||
| 145 | bool ShouldWait() const { | ||
| 146 | return texture_cache.ShouldWaitAsyncFlushes() || buffer_cache.ShouldWaitAsyncFlushes() || | ||
| 147 | query_cache.ShouldWaitAsyncFlushes(); | ||
| 148 | } | ||
| 149 | |||
| 150 | bool ShouldFlush() const { | ||
| 151 | return texture_cache.HasUncommittedFlushes() || buffer_cache.HasUncommittedFlushes() || | ||
| 152 | query_cache.HasUncommittedFlushes(); | ||
| 153 | } | ||
| 154 | |||
| 155 | void PopAsyncFlushes() { | ||
| 156 | texture_cache.PopAsyncFlushes(); | ||
| 157 | buffer_cache.PopAsyncFlushes(); | ||
| 158 | query_cache.PopAsyncFlushes(); | ||
| 159 | } | ||
| 160 | |||
| 161 | void CommitAsyncFlushes() { | ||
| 162 | texture_cache.CommitAsyncFlushes(); | ||
| 163 | buffer_cache.CommitAsyncFlushes(); | ||
| 164 | query_cache.CommitAsyncFlushes(); | ||
| 165 | } | ||
| 166 | |||
| 155 | std::queue<TFence> fences; | 167 | std::queue<TFence> fences; |
| 156 | }; | 168 | }; |
| 157 | 169 | ||
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 85a6c7bb5..3b7572d61 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp | |||
| @@ -125,7 +125,7 @@ bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) { | |||
| 125 | return true; | 125 | return true; |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | u64 GPU::RequestFlush(CacheAddr addr, std::size_t size) { | 128 | u64 GPU::RequestFlush(VAddr addr, std::size_t size) { |
| 129 | std::unique_lock lck{flush_request_mutex}; | 129 | std::unique_lock lck{flush_request_mutex}; |
| 130 | const u64 fence = ++last_flush_fence; | 130 | const u64 fence = ++last_flush_fence; |
| 131 | flush_requests.emplace_back(fence, addr, size); | 131 | flush_requests.emplace_back(fence, addr, size); |
| @@ -137,7 +137,7 @@ void GPU::TickWork() { | |||
| 137 | while (!flush_requests.empty()) { | 137 | while (!flush_requests.empty()) { |
| 138 | auto& request = flush_requests.front(); | 138 | auto& request = flush_requests.front(); |
| 139 | const u64 fence = request.fence; | 139 | const u64 fence = request.fence; |
| 140 | const CacheAddr addr = request.addr; | 140 | const VAddr addr = request.addr; |
| 141 | const std::size_t size = request.size; | 141 | const std::size_t size = request.size; |
| 142 | flush_requests.pop_front(); | 142 | flush_requests.pop_front(); |
| 143 | flush_request_mutex.unlock(); | 143 | flush_request_mutex.unlock(); |
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index 943a5b110..5e3eb94e9 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h | |||
| @@ -155,16 +155,22 @@ public: | |||
| 155 | /// Calls a GPU method. | 155 | /// Calls a GPU method. |
| 156 | void CallMethod(const MethodCall& method_call); | 156 | void CallMethod(const MethodCall& method_call); |
| 157 | 157 | ||
| 158 | /// Flush all current written commands into the host GPU for execution. | ||
| 158 | void FlushCommands(); | 159 | void FlushCommands(); |
| 160 | /// Synchronizes CPU writes with Host GPU memory. | ||
| 159 | void SyncGuestHost(); | 161 | void SyncGuestHost(); |
| 162 | /// Signal the ending of command list. | ||
| 160 | virtual void OnCommandListEnd(); | 163 | virtual void OnCommandListEnd(); |
| 161 | 164 | ||
| 162 | u64 RequestFlush(CacheAddr addr, std::size_t size); | 165 | /// Request a host GPU memory flush from the CPU. |
| 166 | u64 RequestFlush(VAddr addr, std::size_t size); | ||
| 163 | 167 | ||
| 168 | /// Obtains current flush request fence id. | ||
| 164 | u64 CurrentFlushRequestFence() const { | 169 | u64 CurrentFlushRequestFence() const { |
| 165 | return current_flush_fence.load(std::memory_order_relaxed); | 170 | return current_flush_fence.load(std::memory_order_relaxed); |
| 166 | } | 171 | } |
| 167 | 172 | ||
| 173 | /// Tick pending requests within the GPU. | ||
| 168 | void TickWork(); | 174 | void TickWork(); |
| 169 | 175 | ||
| 170 | /// Returns a reference to the Maxwell3D GPU engine. | 176 | /// Returns a reference to the Maxwell3D GPU engine. |
| @@ -336,10 +342,10 @@ private: | |||
| 336 | std::condition_variable sync_cv; | 342 | std::condition_variable sync_cv; |
| 337 | 343 | ||
| 338 | struct FlushRequest { | 344 | struct FlushRequest { |
| 339 | FlushRequest(u64 fence, CacheAddr addr, std::size_t size) | 345 | FlushRequest(u64 fence, VAddr addr, std::size_t size) |
| 340 | : fence{fence}, addr{addr}, size{size} {} | 346 | : fence{fence}, addr{addr}, size{size} {} |
| 341 | u64 fence; | 347 | u64 fence; |
| 342 | CacheAddr addr; | 348 | VAddr addr; |
| 343 | std::size_t size; | 349 | std::size_t size; |
| 344 | }; | 350 | }; |
| 345 | 351 | ||
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index 98d956b68..2f75f8801 100644 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h | |||
| @@ -176,41 +176,34 @@ public: | |||
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | void CommitAsyncFlushes() { | 178 | void CommitAsyncFlushes() { |
| 179 | commited_flushes.push_back(uncommited_flushes); | 179 | committed_flushes.push_back(uncommitted_flushes); |
| 180 | uncommited_flushes.reset(); | 180 | uncommitted_flushes.reset(); |
| 181 | } | 181 | } |
| 182 | 182 | ||
| 183 | bool HasUncommitedFlushes() { | 183 | bool HasUncommittedFlushes() const { |
| 184 | if (uncommited_flushes) { | 184 | return uncommitted_flushes != nullptr; |
| 185 | return true; | ||
| 186 | } | ||
| 187 | return false; | ||
| 188 | } | 185 | } |
| 189 | 186 | ||
| 190 | bool ShouldWaitAsyncFlushes() { | 187 | bool ShouldWaitAsyncFlushes() const { |
| 191 | if (commited_flushes.empty()) { | 188 | if (committed_flushes.empty()) { |
| 192 | return false; | ||
| 193 | } | ||
| 194 | auto& flush_list = commited_flushes.front(); | ||
| 195 | if (!flush_list) { | ||
| 196 | return false; | 189 | return false; |
| 197 | } | 190 | } |
| 198 | return true; | 191 | return committed_flushes.front() != nullptr; |
| 199 | } | 192 | } |
| 200 | 193 | ||
| 201 | void PopAsyncFlushes() { | 194 | void PopAsyncFlushes() { |
| 202 | if (commited_flushes.empty()) { | 195 | if (committed_flushes.empty()) { |
| 203 | return; | 196 | return; |
| 204 | } | 197 | } |
| 205 | auto& flush_list = commited_flushes.front(); | 198 | auto& flush_list = committed_flushes.front(); |
| 206 | if (!flush_list) { | 199 | if (!flush_list) { |
| 207 | commited_flushes.pop_front(); | 200 | committed_flushes.pop_front(); |
| 208 | return; | 201 | return; |
| 209 | } | 202 | } |
| 210 | for (VAddr query_address : *flush_list) { | 203 | for (VAddr query_address : *flush_list) { |
| 211 | FlushAndRemoveRegion(query_address, 4); | 204 | FlushAndRemoveRegion(query_address, 4); |
| 212 | } | 205 | } |
| 213 | commited_flushes.pop_front(); | 206 | committed_flushes.pop_front(); |
| 214 | } | 207 | } |
| 215 | 208 | ||
| 216 | protected: | 209 | protected: |
| @@ -268,10 +261,10 @@ private: | |||
| 268 | } | 261 | } |
| 269 | 262 | ||
| 270 | void AsyncFlushQuery(VAddr addr) { | 263 | void AsyncFlushQuery(VAddr addr) { |
| 271 | if (!uncommited_flushes) { | 264 | if (!uncommitted_flushes) { |
| 272 | uncommited_flushes = std::make_shared<std::unordered_set<VAddr>>(); | 265 | uncommitted_flushes = std::make_shared<std::unordered_set<VAddr>>(); |
| 273 | } | 266 | } |
| 274 | uncommited_flushes->insert(addr); | 267 | uncommitted_flushes->insert(addr); |
| 275 | } | 268 | } |
| 276 | 269 | ||
| 277 | static constexpr std::uintptr_t PAGE_SIZE = 4096; | 270 | static constexpr std::uintptr_t PAGE_SIZE = 4096; |
| @@ -286,8 +279,8 @@ private: | |||
| 286 | 279 | ||
| 287 | std::array<CounterStream, VideoCore::NumQueryTypes> streams; | 280 | std::array<CounterStream, VideoCore::NumQueryTypes> streams; |
| 288 | 281 | ||
| 289 | std::shared_ptr<std::unordered_set<VAddr>> uncommited_flushes{}; | 282 | std::shared_ptr<std::unordered_set<VAddr>> uncommitted_flushes{}; |
| 290 | std::list<std::shared_ptr<std::unordered_set<VAddr>>> commited_flushes; | 283 | std::list<std::shared_ptr<std::unordered_set<VAddr>>> committed_flushes; |
| 291 | }; | 284 | }; |
| 292 | 285 | ||
| 293 | template <class QueryCache, class HostCounter> | 286 | template <class QueryCache, class HostCounter> |
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index 4e9c8fb59..603f61952 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h | |||
| @@ -64,6 +64,7 @@ public: | |||
| 64 | /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory | 64 | /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory |
| 65 | virtual void FlushRegion(VAddr addr, u64 size) = 0; | 65 | virtual void FlushRegion(VAddr addr, u64 size) = 0; |
| 66 | 66 | ||
| 67 | /// Check if the the specified memory area requires flushing to CPU Memory. | ||
| 67 | virtual bool MustFlushRegion(VAddr addr, u64 size) = 0; | 68 | virtual bool MustFlushRegion(VAddr addr, u64 size) = 0; |
| 68 | 69 | ||
| 69 | /// Notify rasterizer that any caches of the specified region should be invalidated | 70 | /// Notify rasterizer that any caches of the specified region should be invalidated |
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp index aa57a0ae0..476c89940 100644 --- a/src/video_core/renderer_opengl/gl_fence_manager.cpp +++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp | |||
| @@ -62,7 +62,7 @@ void FenceManagerOpenGL::QueueFence(Fence& fence) { | |||
| 62 | fence->Queue(); | 62 | fence->Queue(); |
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | bool FenceManagerOpenGL::IsFenceSignaled(Fence& fence) { | 65 | bool FenceManagerOpenGL::IsFenceSignaled(Fence& fence) const { |
| 66 | return fence->IsSignaled(); | 66 | return fence->IsSignaled(); |
| 67 | } | 67 | } |
| 68 | 68 | ||
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h index c76e69cb8..c917b3343 100644 --- a/src/video_core/renderer_opengl/gl_fence_manager.h +++ b/src/video_core/renderer_opengl/gl_fence_manager.h | |||
| @@ -46,7 +46,7 @@ protected: | |||
| 46 | Fence CreateFence(u32 value, bool is_stubbed) override; | 46 | Fence CreateFence(u32 value, bool is_stubbed) override; |
| 47 | Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; | 47 | Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; |
| 48 | void QueueFence(Fence& fence) override; | 48 | void QueueFence(Fence& fence) override; |
| 49 | bool IsFenceSignaled(Fence& fence) override; | 49 | bool IsFenceSignaled(Fence& fence) const override; |
| 50 | void WaitFence(Fence& fence) override; | 50 | void WaitFence(Fence& fence) override; |
| 51 | }; | 51 | }; |
| 52 | 52 | ||
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 847d67159..d662657cf 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp | |||
| @@ -653,9 +653,6 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) { | |||
| 653 | } | 653 | } |
| 654 | 654 | ||
| 655 | bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size) { | 655 | bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size) { |
| 656 | if (!Settings::IsGPULevelExtreme()) { | ||
| 657 | return buffer_cache.MustFlushRegion(addr, size); | ||
| 658 | } | ||
| 659 | return texture_cache.MustFlushRegion(addr, size) || buffer_cache.MustFlushRegion(addr, size); | 656 | return texture_cache.MustFlushRegion(addr, size) || buffer_cache.MustFlushRegion(addr, size); |
| 660 | } | 657 | } |
| 661 | 658 | ||
| @@ -672,7 +669,7 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) { | |||
| 672 | 669 | ||
| 673 | void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) { | 670 | void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) { |
| 674 | MICROPROFILE_SCOPE(OpenGL_CacheManagement); | 671 | MICROPROFILE_SCOPE(OpenGL_CacheManagement); |
| 675 | if (!addr || !size) { | 672 | if (addr == 0 || size == 0) { |
| 676 | return; | 673 | return; |
| 677 | } | 674 | } |
| 678 | texture_cache.OnCPUWrite(addr, size); | 675 | texture_cache.OnCPUWrite(addr, size); |
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp index a2b2bc408..a02be5487 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp | |||
| @@ -90,7 +90,7 @@ void VKFenceManager::QueueFence(Fence& fence) { | |||
| 90 | fence->Queue(); | 90 | fence->Queue(); |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | bool VKFenceManager::IsFenceSignaled(Fence& fence) { | 93 | bool VKFenceManager::IsFenceSignaled(Fence& fence) const { |
| 94 | return fence->IsSignaled(); | 94 | return fence->IsSignaled(); |
| 95 | } | 95 | } |
| 96 | 96 | ||
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h index 30651e9c7..04d07fe6a 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.h +++ b/src/video_core/renderer_vulkan/vk_fence_manager.h | |||
| @@ -63,7 +63,7 @@ protected: | |||
| 63 | Fence CreateFence(u32 value, bool is_stubbed) override; | 63 | Fence CreateFence(u32 value, bool is_stubbed) override; |
| 64 | Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; | 64 | Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; |
| 65 | void QueueFence(Fence& fence) override; | 65 | void QueueFence(Fence& fence) override; |
| 66 | bool IsFenceSignaled(Fence& fence) override; | 66 | bool IsFenceSignaled(Fence& fence) const override; |
| 67 | void WaitFence(Fence& fence) override; | 67 | void WaitFence(Fence& fence) override; |
| 68 | 68 | ||
| 69 | private: | 69 | private: |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 4dc7555aa..2350cd5f4 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp | |||
| @@ -533,7 +533,7 @@ void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) { | |||
| 533 | } | 533 | } |
| 534 | 534 | ||
| 535 | void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) { | 535 | void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) { |
| 536 | if (!addr || !size) { | 536 | if (addr == 0 || size == 0) { |
| 537 | return; | 537 | return; |
| 538 | } | 538 | } |
| 539 | texture_cache.OnCPUWrite(addr, size); | 539 | texture_cache.OnCPUWrite(addr, size); |
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index f3ca1ffd1..1148c3a34 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h | |||
| @@ -120,15 +120,8 @@ public: | |||
| 120 | std::lock_guard lock{mutex}; | 120 | std::lock_guard lock{mutex}; |
| 121 | 121 | ||
| 122 | auto surfaces = GetSurfacesInRegion(addr, size); | 122 | auto surfaces = GetSurfacesInRegion(addr, size); |
| 123 | if (surfaces.empty()) { | 123 | return std::any_of(surfaces.begin(), surfaces.end(), |
| 124 | return false; | 124 | [](const TSurface& surface) { return surface->IsModified(); }); |
| 125 | } | ||
| 126 | for (const auto& surface : surfaces) { | ||
| 127 | if (surface->IsModified()) { | ||
| 128 | return true; | ||
| 129 | } | ||
| 130 | } | ||
| 131 | return false; | ||
| 132 | } | 125 | } |
| 133 | 126 | ||
| 134 | TView GetTextureSurface(const Tegra::Texture::TICEntry& tic, | 127 | TView GetTextureSurface(const Tegra::Texture::TICEntry& tic, |
| @@ -333,41 +326,34 @@ public: | |||
| 333 | } | 326 | } |
| 334 | 327 | ||
| 335 | void CommitAsyncFlushes() { | 328 | void CommitAsyncFlushes() { |
| 336 | commited_flushes.push_back(uncommited_flushes); | 329 | committed_flushes.push_back(uncommitted_flushes); |
| 337 | uncommited_flushes.reset(); | 330 | uncommitted_flushes.reset(); |
| 338 | } | 331 | } |
| 339 | 332 | ||
| 340 | bool HasUncommitedFlushes() { | 333 | bool HasUncommittedFlushes() const { |
| 341 | if (uncommited_flushes) { | 334 | return uncommitted_flushes != nullptr; |
| 342 | return true; | ||
| 343 | } | ||
| 344 | return false; | ||
| 345 | } | 335 | } |
| 346 | 336 | ||
| 347 | bool ShouldWaitAsyncFlushes() { | 337 | bool ShouldWaitAsyncFlushes() const { |
| 348 | if (commited_flushes.empty()) { | 338 | if (committed_flushes.empty()) { |
| 349 | return false; | ||
| 350 | } | ||
| 351 | auto& flush_list = commited_flushes.front(); | ||
| 352 | if (!flush_list) { | ||
| 353 | return false; | 339 | return false; |
| 354 | } | 340 | } |
| 355 | return true; | 341 | return committed_flushes.front() != nullptr; |
| 356 | } | 342 | } |
| 357 | 343 | ||
| 358 | void PopAsyncFlushes() { | 344 | void PopAsyncFlushes() { |
| 359 | if (commited_flushes.empty()) { | 345 | if (committed_flushes.empty()) { |
| 360 | return; | 346 | return; |
| 361 | } | 347 | } |
| 362 | auto& flush_list = commited_flushes.front(); | 348 | auto& flush_list = committed_flushes.front(); |
| 363 | if (!flush_list) { | 349 | if (!flush_list) { |
| 364 | commited_flushes.pop_front(); | 350 | committed_flushes.pop_front(); |
| 365 | return; | 351 | return; |
| 366 | } | 352 | } |
| 367 | for (TSurface& surface : *flush_list) { | 353 | for (TSurface& surface : *flush_list) { |
| 368 | FlushSurface(surface); | 354 | FlushSurface(surface); |
| 369 | } | 355 | } |
| 370 | commited_flushes.pop_front(); | 356 | committed_flushes.pop_front(); |
| 371 | } | 357 | } |
| 372 | 358 | ||
| 373 | protected: | 359 | protected: |
| @@ -1206,10 +1192,10 @@ private: | |||
| 1206 | }; | 1192 | }; |
| 1207 | 1193 | ||
| 1208 | void AsyncFlushSurface(TSurface& surface) { | 1194 | void AsyncFlushSurface(TSurface& surface) { |
| 1209 | if (!uncommited_flushes) { | 1195 | if (!uncommitted_flushes) { |
| 1210 | uncommited_flushes = std::make_shared<std::list<TSurface>>(); | 1196 | uncommitted_flushes = std::make_shared<std::list<TSurface>>(); |
| 1211 | } | 1197 | } |
| 1212 | uncommited_flushes->push_back(surface); | 1198 | uncommitted_flushes->push_back(surface); |
| 1213 | } | 1199 | } |
| 1214 | 1200 | ||
| 1215 | VideoCore::RasterizerInterface& rasterizer; | 1201 | VideoCore::RasterizerInterface& rasterizer; |
| @@ -1258,8 +1244,8 @@ private: | |||
| 1258 | 1244 | ||
| 1259 | std::list<TSurface> marked_for_unregister; | 1245 | std::list<TSurface> marked_for_unregister; |
| 1260 | 1246 | ||
| 1261 | std::shared_ptr<std::list<TSurface>> uncommited_flushes{}; | 1247 | std::shared_ptr<std::list<TSurface>> uncommitted_flushes{}; |
| 1262 | std::list<std::shared_ptr<std::list<TSurface>>> commited_flushes; | 1248 | std::list<std::shared_ptr<std::list<TSurface>>> committed_flushes; |
| 1263 | 1249 | ||
| 1264 | StagingCache staging_cache; | 1250 | StagingCache staging_cache; |
| 1265 | std::recursive_mutex mutex; | 1251 | std::recursive_mutex mutex; |