summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/video_core/query_cache.h122
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.cpp12
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.h6
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp14
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp2
7 files changed, 118 insertions, 45 deletions
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index cd339b99d..2a14cc36a 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -17,13 +17,19 @@
17 17
18#include "common/assert.h" 18#include "common/assert.h"
19#include "common/settings.h" 19#include "common/settings.h"
20#include "core/memory.h"
20#include "video_core/control/channel_state_cache.h" 21#include "video_core/control/channel_state_cache.h"
21#include "video_core/engines/maxwell_3d.h" 22#include "video_core/engines/maxwell_3d.h"
22#include "video_core/memory_manager.h" 23#include "video_core/memory_manager.h"
23#include "video_core/rasterizer_interface.h" 24#include "video_core/rasterizer_interface.h"
25#include "video_core/texture_cache/slot_vector.h"
24 26
25namespace VideoCommon { 27namespace VideoCommon {
26 28
29using AsyncJobId = SlotId;
30
31static constexpr AsyncJobId NULL_ASYNC_JOB_ID{0};
32
27template <class QueryCache, class HostCounter> 33template <class QueryCache, class HostCounter>
28class CounterStreamBase { 34class CounterStreamBase {
29public: 35public:
@@ -93,9 +99,13 @@ private:
93template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter> 99template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter>
94class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { 100class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
95public: 101public:
96 explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_) 102 explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
97 : rasterizer{rasterizer_}, streams{{CounterStream{static_cast<QueryCache&>(*this), 103 Core::Memory::Memory& cpu_memory_)
98 VideoCore::QueryType::SamplesPassed}}} {} 104 : rasterizer{rasterizer_}, cpu_memory{cpu_memory_}, streams{
105 {CounterStream{static_cast<QueryCache&>(*this),
106 VideoCore::QueryType::SamplesPassed}}} {
107 (void) slot_async_jobs.insert(); // Null value
108 }
99 109
100 void InvalidateRegion(VAddr addr, std::size_t size) { 110 void InvalidateRegion(VAddr addr, std::size_t size) {
101 std::unique_lock lock{mutex}; 111 std::unique_lock lock{mutex};
@@ -126,10 +136,15 @@ public:
126 query = Register(type, *cpu_addr, host_ptr, timestamp.has_value()); 136 query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
127 } 137 }
128 138
129 query->BindCounter(Stream(type).Current(), timestamp); 139 auto result = query->BindCounter(Stream(type).Current());
130 if (Settings::values.use_asynchronous_gpu_emulation.GetValue()) { 140 if (result) {
131 AsyncFlushQuery(*cpu_addr); 141 auto async_job_id = query->GetAsyncJob();
142 auto& async_job = slot_async_jobs[async_job_id];
143 async_job.collected = true;
144 async_job.value = *result;
145 query->SetAsyncJob(NULL_ASYNC_JOB_ID);
132 } 146 }
147 AsyncFlushQuery(query, timestamp, lock);
133 } 148 }
134 149
135 /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch. 150 /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
@@ -201,15 +216,25 @@ public:
201 committed_flushes.pop_front(); 216 committed_flushes.pop_front();
202 return; 217 return;
203 } 218 }
204 for (VAddr query_address : *flush_list) { 219 for (AsyncJobId async_job_id : *flush_list) {
205 FlushAndRemoveRegion(query_address, 4); 220 AsyncJob& async_job = slot_async_jobs[async_job_id];
221 if (!async_job.collected) {
222 FlushAndRemoveRegion(async_job.query_location, 2, true);
223 }
206 } 224 }
207 committed_flushes.pop_front(); 225 committed_flushes.pop_front();
208 } 226 }
209 227
210private: 228private:
229 struct AsyncJob {
230 bool collected = false;
231 u64 value = 0;
232 VAddr query_location = 0;
233 std::optional<u64> timestamp{};
234 };
235
211 /// Flushes a memory range to guest memory and removes it from the cache. 236 /// Flushes a memory range to guest memory and removes it from the cache.
212 void FlushAndRemoveRegion(VAddr addr, std::size_t size) { 237 void FlushAndRemoveRegion(VAddr addr, std::size_t size, bool async = false) {
213 const u64 addr_begin = addr; 238 const u64 addr_begin = addr;
214 const u64 addr_end = addr_begin + size; 239 const u64 addr_end = addr_begin + size;
215 const auto in_range = [addr_begin, addr_end](const CachedQuery& query) { 240 const auto in_range = [addr_begin, addr_end](const CachedQuery& query) {
@@ -230,7 +255,16 @@ private:
230 continue; 255 continue;
231 } 256 }
232 rasterizer.UpdatePagesCachedCount(query.GetCpuAddr(), query.SizeInBytes(), -1); 257 rasterizer.UpdatePagesCachedCount(query.GetCpuAddr(), query.SizeInBytes(), -1);
233 query.Flush(); 258 AsyncJobId async_job_id = query.GetAsyncJob();
259 auto flush_result = query.Flush(async);
260 if (async_job_id == NULL_ASYNC_JOB_ID) {
261 ASSERT_MSG(false, "This should not be reachable at all");
262 continue;
263 }
264 AsyncJob& async_job = slot_async_jobs[async_job_id];
265 async_job.collected = true;
266 async_job.value = flush_result;
267 query.SetAsyncJob(NULL_ASYNC_JOB_ID);
234 } 268 }
235 std::erase_if(contents, in_range); 269 std::erase_if(contents, in_range);
236 } 270 }
@@ -257,17 +291,43 @@ private:
257 return found != std::end(contents) ? &*found : nullptr; 291 return found != std::end(contents) ? &*found : nullptr;
258 } 292 }
259 293
260 void AsyncFlushQuery(VAddr addr) { 294 void AsyncFlushQuery(CachedQuery* query, std::optional<u64> timestamp,
295 std::unique_lock<std::recursive_mutex>& lock) {
296 const AsyncJobId new_async_job_id = slot_async_jobs.insert();
297 AsyncJob& async_job = slot_async_jobs[new_async_job_id];
298 query->SetAsyncJob(new_async_job_id);
299 async_job.query_location = query->GetCpuAddr();
300 async_job.collected = false;
301
261 if (!uncommitted_flushes) { 302 if (!uncommitted_flushes) {
262 uncommitted_flushes = std::make_shared<std::vector<VAddr>>(); 303 uncommitted_flushes = std::make_shared<std::vector<AsyncJobId>>();
263 } 304 }
264 uncommitted_flushes->push_back(addr); 305 uncommitted_flushes->push_back(new_async_job_id);
306 lock.unlock();
307 std::function<void()> operation([this, new_async_job_id, timestamp] {
308 std::unique_lock local_lock{mutex};
309 AsyncJob& async_job = slot_async_jobs[new_async_job_id];
310 if (timestamp) {
311 u64 timestamp_value = *timestamp;
312 cpu_memory.WriteBlockUnsafe(async_job.query_location + sizeof(u64),
313 &timestamp_value, sizeof(8));
314 cpu_memory.WriteBlockUnsafe(async_job.query_location, &async_job.value, sizeof(8));
315 } else {
316 u32 small_value = static_cast<u32>(async_job.value);
317 cpu_memory.WriteBlockUnsafe(async_job.query_location, &small_value, sizeof(u32));
318 }
319 slot_async_jobs.erase(new_async_job_id);
320 });
321 rasterizer.SyncOperation(std::move(operation));
265 } 322 }
266 323
267 static constexpr std::uintptr_t YUZU_PAGESIZE = 4096; 324 static constexpr std::uintptr_t YUZU_PAGESIZE = 4096;
268 static constexpr unsigned YUZU_PAGEBITS = 12; 325 static constexpr unsigned YUZU_PAGEBITS = 12;
269 326
327 SlotVector<AsyncJob> slot_async_jobs;
328
270 VideoCore::RasterizerInterface& rasterizer; 329 VideoCore::RasterizerInterface& rasterizer;
330 Core::Memory::Memory& cpu_memory;
271 331
272 mutable std::recursive_mutex mutex; 332 mutable std::recursive_mutex mutex;
273 333
@@ -275,8 +335,8 @@ private:
275 335
276 std::array<CounterStream, VideoCore::NumQueryTypes> streams; 336 std::array<CounterStream, VideoCore::NumQueryTypes> streams;
277 337
278 std::shared_ptr<std::vector<VAddr>> uncommitted_flushes{}; 338 std::shared_ptr<std::vector<AsyncJobId>> uncommitted_flushes{};
279 std::list<std::shared_ptr<std::vector<VAddr>>> committed_flushes; 339 std::list<std::shared_ptr<std::vector<AsyncJobId>>> committed_flushes;
280}; 340};
281 341
282template <class QueryCache, class HostCounter> 342template <class QueryCache, class HostCounter>
@@ -295,12 +355,12 @@ public:
295 virtual ~HostCounterBase() = default; 355 virtual ~HostCounterBase() = default;
296 356
297 /// Returns the current value of the query. 357 /// Returns the current value of the query.
298 u64 Query() { 358 u64 Query(bool async = false) {
299 if (result) { 359 if (result) {
300 return *result; 360 return *result;
301 } 361 }
302 362
303 u64 value = BlockingQuery() + base_result; 363 u64 value = BlockingQuery(async) + base_result;
304 if (dependency) { 364 if (dependency) {
305 value += dependency->Query(); 365 value += dependency->Query();
306 dependency = nullptr; 366 dependency = nullptr;
@@ -321,7 +381,7 @@ public:
321 381
322protected: 382protected:
323 /// Returns the value of query from the backend API blocking as needed. 383 /// Returns the value of query from the backend API blocking as needed.
324 virtual u64 BlockingQuery() const = 0; 384 virtual u64 BlockingQuery(bool async = false) const = 0;
325 385
326private: 386private:
327 std::shared_ptr<HostCounter> dependency; ///< Counter to add to this value. 387 std::shared_ptr<HostCounter> dependency; ///< Counter to add to this value.
@@ -344,26 +404,23 @@ public:
344 CachedQueryBase& operator=(const CachedQueryBase&) = delete; 404 CachedQueryBase& operator=(const CachedQueryBase&) = delete;
345 405
346 /// Flushes the query to guest memory. 406 /// Flushes the query to guest memory.
347 virtual void Flush() { 407 virtual u64 Flush(bool async = false) {
348 // When counter is nullptr it means that it's just been reset. We are supposed to write a 408 // When counter is nullptr it means that it's just been reset. We are supposed to write a
349 // zero in these cases. 409 // zero in these cases.
350 const u64 value = counter ? counter->Query() : 0; 410 const u64 value = counter ? counter->Query(async) : 0;
351 std::memcpy(host_ptr, &value, sizeof(u64)); 411 return value;
352
353 if (timestamp) {
354 std::memcpy(host_ptr + TIMESTAMP_OFFSET, &*timestamp, sizeof(u64));
355 }
356 } 412 }
357 413
358 /// Binds a counter to this query. 414 /// Binds a counter to this query.
359 void BindCounter(std::shared_ptr<HostCounter> counter_, std::optional<u64> timestamp_) { 415 std::optional<u64> BindCounter(std::shared_ptr<HostCounter> counter_) {
416 std::optional<u64> result{};
360 if (counter) { 417 if (counter) {
361 // If there's an old counter set it means the query is being rewritten by the game. 418 // If there's an old counter set it means the query is being rewritten by the game.
362 // To avoid losing the data forever, flush here. 419 // To avoid losing the data forever, flush here.
363 Flush(); 420 result = std::make_optional(Flush());
364 } 421 }
365 counter = std::move(counter_); 422 counter = std::move(counter_);
366 timestamp = timestamp_; 423 return result;
367 } 424 }
368 425
369 VAddr GetCpuAddr() const noexcept { 426 VAddr GetCpuAddr() const noexcept {
@@ -378,6 +435,14 @@ public:
378 return with_timestamp ? LARGE_QUERY_SIZE : SMALL_QUERY_SIZE; 435 return with_timestamp ? LARGE_QUERY_SIZE : SMALL_QUERY_SIZE;
379 } 436 }
380 437
438 void SetAsyncJob(AsyncJobId assigned_async_job_) {
439 assigned_async_job = assigned_async_job_;
440 }
441
442 AsyncJobId GetAsyncJob() const {
443 return assigned_async_job;
444 }
445
381protected: 446protected:
382 /// Returns true when querying the counter may potentially block. 447 /// Returns true when querying the counter may potentially block.
383 bool WaitPending() const noexcept { 448 bool WaitPending() const noexcept {
@@ -393,6 +458,7 @@ private:
393 u8* host_ptr; ///< Writable host pointer. 458 u8* host_ptr; ///< Writable host pointer.
394 std::shared_ptr<HostCounter> counter; ///< Host counter to query, owns the dependency tree. 459 std::shared_ptr<HostCounter> counter; ///< Host counter to query, owns the dependency tree.
395 std::optional<u64> timestamp; ///< Timestamp to flush to guest memory. 460 std::optional<u64> timestamp; ///< Timestamp to flush to guest memory.
461 AsyncJobId assigned_async_job;
396}; 462};
397 463
398} // namespace VideoCommon 464} // namespace VideoCommon
diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp
index 5070db441..99d7347f5 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_query_cache.cpp
@@ -26,8 +26,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) {
26 26
27} // Anonymous namespace 27} // Anonymous namespace
28 28
29QueryCache::QueryCache(RasterizerOpenGL& rasterizer_) 29QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_)
30 : QueryCacheBase(rasterizer_), gl_rasterizer{rasterizer_} {} 30 : QueryCacheBase(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} {}
31 31
32QueryCache::~QueryCache() = default; 32QueryCache::~QueryCache() = default;
33 33
@@ -74,7 +74,7 @@ void HostCounter::EndQuery() {
74 glEndQuery(GetTarget(type)); 74 glEndQuery(GetTarget(type));
75} 75}
76 76
77u64 HostCounter::BlockingQuery() const { 77u64 HostCounter::BlockingQuery([[maybe_unused]] bool async) const {
78 GLint64 value; 78 GLint64 value;
79 glGetQueryObjecti64v(query.handle, GL_QUERY_RESULT, &value); 79 glGetQueryObjecti64v(query.handle, GL_QUERY_RESULT, &value);
80 return static_cast<u64>(value); 80 return static_cast<u64>(value);
@@ -96,7 +96,7 @@ CachedQuery& CachedQuery::operator=(CachedQuery&& rhs) noexcept {
96 return *this; 96 return *this;
97} 97}
98 98
99void CachedQuery::Flush() { 99u64 CachedQuery::Flush([[maybe_unused]] bool async) {
100 // Waiting for a query while another query of the same target is enabled locks Nvidia's driver. 100 // Waiting for a query while another query of the same target is enabled locks Nvidia's driver.
101 // To avoid this disable and re-enable keeping the dependency stream. 101 // To avoid this disable and re-enable keeping the dependency stream.
102 // But we only have to do this if we have pending waits to be done. 102 // But we only have to do this if we have pending waits to be done.
@@ -106,11 +106,13 @@ void CachedQuery::Flush() {
106 stream.Update(false); 106 stream.Update(false);
107 } 107 }
108 108
109 VideoCommon::CachedQueryBase<HostCounter>::Flush(); 109 auto result = VideoCommon::CachedQueryBase<HostCounter>::Flush();
110 110
111 if (slice_counter) { 111 if (slice_counter) {
112 stream.Update(true); 112 stream.Update(true);
113 } 113 }
114
115 return result;
114} 116}
115 117
116} // namespace OpenGL 118} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h
index 14ce59990..872513f22 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.h
+++ b/src/video_core/renderer_opengl/gl_query_cache.h
@@ -28,7 +28,7 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
28class QueryCache final 28class QueryCache final
29 : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { 29 : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
30public: 30public:
31 explicit QueryCache(RasterizerOpenGL& rasterizer_); 31 explicit QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_);
32 ~QueryCache(); 32 ~QueryCache();
33 33
34 OGLQuery AllocateQuery(VideoCore::QueryType type); 34 OGLQuery AllocateQuery(VideoCore::QueryType type);
@@ -51,7 +51,7 @@ public:
51 void EndQuery(); 51 void EndQuery();
52 52
53private: 53private:
54 u64 BlockingQuery() const override; 54 u64 BlockingQuery(bool async = false) const override;
55 55
56 QueryCache& cache; 56 QueryCache& cache;
57 const VideoCore::QueryType type; 57 const VideoCore::QueryType type;
@@ -70,7 +70,7 @@ public:
70 CachedQuery(const CachedQuery&) = delete; 70 CachedQuery(const CachedQuery&) = delete;
71 CachedQuery& operator=(const CachedQuery&) = delete; 71 CachedQuery& operator=(const CachedQuery&) = delete;
72 72
73 void Flush() override; 73 u64 Flush(bool async = false) override;
74 74
75private: 75private:
76 QueryCache* cache; 76 QueryCache* cache;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 90e35e307..967aa4306 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -63,7 +63,7 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra
63 buffer_cache(*this, cpu_memory_, buffer_cache_runtime), 63 buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
64 shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager, 64 shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager,
65 state_tracker, gpu.ShaderNotify()), 65 state_tracker, gpu.ShaderNotify()),
66 query_cache(*this), accelerate_dma(buffer_cache, texture_cache), 66 query_cache(*this, cpu_memory_), accelerate_dma(buffer_cache, texture_cache),
67 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache), 67 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache),
68 blit_image(program_manager_) {} 68 blit_image(program_manager_) {}
69 69
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 0701e572b..d67490449 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -66,9 +66,10 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
66 } 66 }
67} 67}
68 68
69QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, 69QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_,
70 Core::Memory::Memory& cpu_memory_, const Device& device_,
70 Scheduler& scheduler_) 71 Scheduler& scheduler_)
71 : QueryCacheBase{rasterizer_}, device{device_}, scheduler{scheduler_}, 72 : QueryCacheBase{rasterizer_, cpu_memory_}, device{device_}, scheduler{scheduler_},
72 query_pools{ 73 query_pools{
73 QueryPool{device_, scheduler_, QueryType::SamplesPassed}, 74 QueryPool{device_, scheduler_, QueryType::SamplesPassed},
74 } {} 75 } {}
@@ -100,7 +101,8 @@ HostCounter::HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> depend
100 cache.GetScheduler().Record([logical, query = query](vk::CommandBuffer cmdbuf) { 101 cache.GetScheduler().Record([logical, query = query](vk::CommandBuffer cmdbuf) {
101 const bool use_precise = Settings::IsGPULevelHigh(); 102 const bool use_precise = Settings::IsGPULevelHigh();
102 logical->ResetQueryPool(query.first, query.second, 1); 103 logical->ResetQueryPool(query.first, query.second, 1);
103 cmdbuf.BeginQuery(query.first, query.second, use_precise ? VK_QUERY_CONTROL_PRECISE_BIT : 0); 104 cmdbuf.BeginQuery(query.first, query.second,
105 use_precise ? VK_QUERY_CONTROL_PRECISE_BIT : 0);
104 }); 106 });
105} 107}
106 108
@@ -113,8 +115,10 @@ void HostCounter::EndQuery() {
113 [query = query](vk::CommandBuffer cmdbuf) { cmdbuf.EndQuery(query.first, query.second); }); 115 [query = query](vk::CommandBuffer cmdbuf) { cmdbuf.EndQuery(query.first, query.second); });
114} 116}
115 117
116u64 HostCounter::BlockingQuery() const { 118u64 HostCounter::BlockingQuery(bool async) const {
117 cache.GetScheduler().Wait(tick); 119 if (!async) {
120 cache.GetScheduler().Wait(tick);
121 }
118 u64 data; 122 u64 data;
119 const VkResult query_result = cache.GetDevice().GetLogical().GetQueryResults( 123 const VkResult query_result = cache.GetDevice().GetLogical().GetQueryResults(
120 query.first, query.second, 1, sizeof(data), &data, sizeof(data), 124 query.first, query.second, 1, sizeof(data), &data, sizeof(data),
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index 26762ee09..c1b9552eb 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -52,7 +52,8 @@ private:
52class QueryCache final 52class QueryCache final
53 : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { 53 : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
54public: 54public:
55 explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, 55 explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_,
56 Core::Memory::Memory& cpu_memory_, const Device& device_,
56 Scheduler& scheduler_); 57 Scheduler& scheduler_);
57 ~QueryCache(); 58 ~QueryCache();
58 59
@@ -83,7 +84,7 @@ public:
83 void EndQuery(); 84 void EndQuery();
84 85
85private: 86private:
86 u64 BlockingQuery() const override; 87 u64 BlockingQuery(bool async = false) const override;
87 88
88 QueryCache& cache; 89 QueryCache& cache;
89 const VideoCore::QueryType type; 90 const VideoCore::QueryType type;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index f366fdd2a..2d865729a 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -172,7 +172,7 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
172 buffer_cache(*this, cpu_memory_, buffer_cache_runtime), 172 buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
173 pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue, 173 pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue,
174 render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()), 174 render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
175 query_cache{*this, device, scheduler}, accelerate_dma(buffer_cache, texture_cache, scheduler), 175 query_cache{*this, cpu_memory_, device, scheduler}, accelerate_dma(buffer_cache, texture_cache, scheduler),
176 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), 176 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
177 wfi_event(device.GetLogical().CreateEvent()) { 177 wfi_event(device.GetLogical().CreateEvent()) {
178 scheduler.SetQueryCache(query_cache); 178 scheduler.SetQueryCache(query_cache);