summaryrefslogtreecommitdiff
path: root/src/video_core/renderer_vulkan
diff options
context:
space:
mode:
authorGravatar Rodrigo Locatti2020-09-10 02:28:54 +0000
committerGravatar GitHub2020-09-10 02:28:54 +0000
commit663ea382dae4b0a8d958753fb370416b1602c55e (patch)
tree74db3117e649cb5ae1a265c69f68d2bb03b9d542 /src/video_core/renderer_vulkan
parentMerge pull request #4635 from lioncash/gc-adap (diff)
parentvideo_core: Remove all Core::System references in renderer (diff)
downloadyuzu-663ea382dae4b0a8d958753fb370416b1602c55e.tar.gz
yuzu-663ea382dae4b0a8d958753fb370416b1602c55e.tar.xz
yuzu-663ea382dae4b0a8d958753fb370416b1602c55e.zip
Merge pull request #4633 from ReinUsesLisp/gpu-init
video_core: Remove all Core::System references in renderer
Diffstat (limited to 'src/video_core/renderer_vulkan')
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp23
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.h16
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp20
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h9
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp17
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h7
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.cpp12
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.h8
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp92
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h28
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp5
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp128
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h13
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.cpp14
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.h12
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp34
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h16
19 files changed, 221 insertions, 240 deletions
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index ae46e0444..0e4583986 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -86,7 +86,7 @@ Common::DynamicLibrary OpenVulkanLibrary() {
86 if (!library.Open(filename.c_str())) { 86 if (!library.Open(filename.c_str())) {
87 // Android devices may not have libvulkan.so.1, only libvulkan.so. 87 // Android devices may not have libvulkan.so.1, only libvulkan.so.
88 filename = Common::DynamicLibrary::GetVersionedFilename("vulkan"); 88 filename = Common::DynamicLibrary::GetVersionedFilename("vulkan");
89 library.Open(filename.c_str()); 89 (void)library.Open(filename.c_str());
90 } 90 }
91#endif 91#endif
92 return library; 92 return library;
@@ -237,10 +237,12 @@ std::string BuildCommaSeparatedExtensions(std::vector<std::string> available_ext
237 237
238} // Anonymous namespace 238} // Anonymous namespace
239 239
240RendererVulkan::RendererVulkan(Core::System& system_, Core::Frontend::EmuWindow& emu_window, 240RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_,
241 Tegra::GPU& gpu_, 241 Core::Frontend::EmuWindow& emu_window,
242 Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
242 std::unique_ptr<Core::Frontend::GraphicsContext> context) 243 std::unique_ptr<Core::Frontend::GraphicsContext> context)
243 : RendererBase{emu_window, std::move(context)}, system{system_}, gpu{gpu_} {} 244 : RendererBase{emu_window, std::move(context)}, telemetry_session{telemetry_session_},
245 cpu_memory{cpu_memory_}, gpu{gpu_} {}
244 246
245RendererVulkan::~RendererVulkan() { 247RendererVulkan::~RendererVulkan() {
246 ShutDown(); 248 ShutDown();
@@ -304,15 +306,15 @@ bool RendererVulkan::Init() {
304 swapchain = std::make_unique<VKSwapchain>(*surface, *device); 306 swapchain = std::make_unique<VKSwapchain>(*surface, *device);
305 swapchain->Create(framebuffer.width, framebuffer.height, false); 307 swapchain->Create(framebuffer.width, framebuffer.height, false);
306 308
307 state_tracker = std::make_unique<StateTracker>(system); 309 state_tracker = std::make_unique<StateTracker>(gpu);
308 310
309 scheduler = std::make_unique<VKScheduler>(*device, *resource_manager, *state_tracker); 311 scheduler = std::make_unique<VKScheduler>(*device, *resource_manager, *state_tracker);
310 312
311 rasterizer = std::make_unique<RasterizerVulkan>(system, render_window, screen_info, *device, 313 rasterizer = std::make_unique<RasterizerVulkan>(
312 *resource_manager, *memory_manager, 314 render_window, gpu, gpu.MemoryManager(), cpu_memory, screen_info, *device,
313 *state_tracker, *scheduler); 315 *resource_manager, *memory_manager, *state_tracker, *scheduler);
314 316
315 blit_screen = std::make_unique<VKBlitScreen>(system, render_window, *rasterizer, *device, 317 blit_screen = std::make_unique<VKBlitScreen>(cpu_memory, render_window, *rasterizer, *device,
316 *resource_manager, *memory_manager, *swapchain, 318 *resource_manager, *memory_manager, *swapchain,
317 *scheduler, screen_info); 319 *scheduler, screen_info);
318 320
@@ -440,8 +442,7 @@ void RendererVulkan::Report() const {
440 LOG_INFO(Render_Vulkan, "Device: {}", model_name); 442 LOG_INFO(Render_Vulkan, "Device: {}", model_name);
441 LOG_INFO(Render_Vulkan, "Vulkan: {}", api_version); 443 LOG_INFO(Render_Vulkan, "Vulkan: {}", api_version);
442 444
443 auto& telemetry_session = system.TelemetrySession(); 445 static constexpr auto field = Common::Telemetry::FieldType::UserSystem;
444 constexpr auto field = Common::Telemetry::FieldType::UserSystem;
445 telemetry_session.AddField(field, "GPU_Vendor", vendor_name); 446 telemetry_session.AddField(field, "GPU_Vendor", vendor_name);
446 telemetry_session.AddField(field, "GPU_Model", model_name); 447 telemetry_session.AddField(field, "GPU_Model", model_name);
447 telemetry_session.AddField(field, "GPU_Vulkan_Driver", driver_name); 448 telemetry_session.AddField(field, "GPU_Vulkan_Driver", driver_name);
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h
index 13debbbc0..ddff77942 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.h
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.h
@@ -14,7 +14,15 @@
14#include "video_core/renderer_vulkan/wrapper.h" 14#include "video_core/renderer_vulkan/wrapper.h"
15 15
16namespace Core { 16namespace Core {
17class System; 17class TelemetrySession;
18}
19
20namespace Core::Memory {
21class Memory;
22}
23
24namespace Tegra {
25class GPU;
18} 26}
19 27
20namespace Vulkan { 28namespace Vulkan {
@@ -38,7 +46,8 @@ struct VKScreenInfo {
38 46
39class RendererVulkan final : public VideoCore::RendererBase { 47class RendererVulkan final : public VideoCore::RendererBase {
40public: 48public:
41 explicit RendererVulkan(Core::System& system, Core::Frontend::EmuWindow& emu_window, 49 explicit RendererVulkan(Core::TelemetrySession& telemtry_session,
50 Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory,
42 Tegra::GPU& gpu, 51 Tegra::GPU& gpu,
43 std::unique_ptr<Core::Frontend::GraphicsContext> context); 52 std::unique_ptr<Core::Frontend::GraphicsContext> context);
44 ~RendererVulkan() override; 53 ~RendererVulkan() override;
@@ -59,7 +68,8 @@ private:
59 68
60 void Report() const; 69 void Report() const;
61 70
62 Core::System& system; 71 Core::TelemetrySession& telemetry_session;
72 Core::Memory::Memory& cpu_memory;
63 Tegra::GPU& gpu; 73 Tegra::GPU& gpu;
64 74
65 Common::DynamicLibrary library; 75 Common::DynamicLibrary library;
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index a551e3de8..2bea7b24d 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -210,14 +210,16 @@ struct VKBlitScreen::BufferData {
210 // Unaligned image data goes here 210 // Unaligned image data goes here
211}; 211};
212 212
213VKBlitScreen::VKBlitScreen(Core::System& system, Core::Frontend::EmuWindow& render_window, 213VKBlitScreen::VKBlitScreen(Core::Memory::Memory& cpu_memory_,
214 VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, 214 Core::Frontend::EmuWindow& render_window_,
215 VKResourceManager& resource_manager, VKMemoryManager& memory_manager, 215 VideoCore::RasterizerInterface& rasterizer_, const VKDevice& device_,
216 VKSwapchain& swapchain, VKScheduler& scheduler, 216 VKResourceManager& resource_manager_, VKMemoryManager& memory_manager_,
217 const VKScreenInfo& screen_info) 217 VKSwapchain& swapchain_, VKScheduler& scheduler_,
218 : system{system}, render_window{render_window}, rasterizer{rasterizer}, device{device}, 218 const VKScreenInfo& screen_info_)
219 resource_manager{resource_manager}, memory_manager{memory_manager}, swapchain{swapchain}, 219 : cpu_memory{cpu_memory_}, render_window{render_window_},
220 scheduler{scheduler}, image_count{swapchain.GetImageCount()}, screen_info{screen_info} { 220 rasterizer{rasterizer_}, device{device_}, resource_manager{resource_manager_},
221 memory_manager{memory_manager_}, swapchain{swapchain_}, scheduler{scheduler_},
222 image_count{swapchain.GetImageCount()}, screen_info{screen_info_} {
221 watches.resize(image_count); 223 watches.resize(image_count);
222 std::generate(watches.begin(), watches.end(), 224 std::generate(watches.begin(), watches.end(),
223 []() { return std::make_unique<VKFenceWatch>(); }); 225 []() { return std::make_unique<VKFenceWatch>(); });
@@ -259,7 +261,7 @@ std::tuple<VKFence&, VkSemaphore> VKBlitScreen::Draw(const Tegra::FramebufferCon
259 const auto pixel_format = 261 const auto pixel_format =
260 VideoCore::Surface::PixelFormatFromGPUPixelFormat(framebuffer.pixel_format); 262 VideoCore::Surface::PixelFormatFromGPUPixelFormat(framebuffer.pixel_format);
261 const VAddr framebuffer_addr = framebuffer.address + framebuffer.offset; 263 const VAddr framebuffer_addr = framebuffer.address + framebuffer.offset;
262 const auto host_ptr = system.Memory().GetPointer(framebuffer_addr); 264 const auto host_ptr = cpu_memory.GetPointer(framebuffer_addr);
263 rasterizer.FlushRegion(ToCacheAddr(host_ptr), GetSizeInBytes(framebuffer)); 265 rasterizer.FlushRegion(ToCacheAddr(host_ptr), GetSizeInBytes(framebuffer));
264 266
265 // TODO(Rodrigo): Read this from HLE 267 // TODO(Rodrigo): Read this from HLE
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index 243640fab..838d38f69 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -15,6 +15,10 @@ namespace Core {
15class System; 15class System;
16} 16}
17 17
18namespace Core::Memory {
19class Memory;
20}
21
18namespace Core::Frontend { 22namespace Core::Frontend {
19class EmuWindow; 23class EmuWindow;
20} 24}
@@ -39,7 +43,8 @@ class VKSwapchain;
39 43
40class VKBlitScreen final { 44class VKBlitScreen final {
41public: 45public:
42 explicit VKBlitScreen(Core::System& system, Core::Frontend::EmuWindow& render_window, 46 explicit VKBlitScreen(Core::Memory::Memory& cpu_memory,
47 Core::Frontend::EmuWindow& render_window,
43 VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, 48 VideoCore::RasterizerInterface& rasterizer, const VKDevice& device,
44 VKResourceManager& resource_manager, VKMemoryManager& memory_manager, 49 VKResourceManager& resource_manager, VKMemoryManager& memory_manager,
45 VKSwapchain& swapchain, VKScheduler& scheduler, 50 VKSwapchain& swapchain, VKScheduler& scheduler,
@@ -81,7 +86,7 @@ private:
81 u64 GetRawImageOffset(const Tegra::FramebufferConfig& framebuffer, 86 u64 GetRawImageOffset(const Tegra::FramebufferConfig& framebuffer,
82 std::size_t image_index) const; 87 std::size_t image_index) const;
83 88
84 Core::System& system; 89 Core::Memory::Memory& cpu_memory;
85 Core::Frontend::EmuWindow& render_window; 90 Core::Frontend::EmuWindow& render_window;
86 VideoCore::RasterizerInterface& rasterizer; 91 VideoCore::RasterizerInterface& rasterizer;
87 const VKDevice& device; 92 const VKDevice& device;
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 1d2f8b557..d9d3da9ea 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -145,14 +145,15 @@ void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst
145 }); 145 });
146} 146}
147 147
148VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, 148VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer,
149 const VKDevice& device, VKMemoryManager& memory_manager, 149 Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory,
150 VKScheduler& scheduler, VKStagingBufferPool& staging_pool) 150 const VKDevice& device_, VKMemoryManager& memory_manager_,
151 : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer, system, 151 VKScheduler& scheduler_, VKStagingBufferPool& staging_pool_)
152 CreateStreamBuffer(device, 152 : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer, gpu_memory, cpu_memory,
153 scheduler)}, 153 CreateStreamBuffer(device_,
154 device{device}, memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{ 154 scheduler_)},
155 staging_pool} {} 155 device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{
156 staging_pool_} {}
156 157
157VKBufferCache::~VKBufferCache() = default; 158VKBufferCache::~VKBufferCache() = default;
158 159
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 991ee451c..7fb5ceedf 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -13,10 +13,6 @@
13#include "video_core/renderer_vulkan/vk_stream_buffer.h" 13#include "video_core/renderer_vulkan/vk_stream_buffer.h"
14#include "video_core/renderer_vulkan/wrapper.h" 14#include "video_core/renderer_vulkan/wrapper.h"
15 15
16namespace Core {
17class System;
18}
19
20namespace Vulkan { 16namespace Vulkan {
21 17
22class VKDevice; 18class VKDevice;
@@ -53,7 +49,8 @@ private:
53 49
54class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> { 50class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> {
55public: 51public:
56 explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, 52 explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer,
53 Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory,
57 const VKDevice& device, VKMemoryManager& memory_manager, 54 const VKDevice& device, VKMemoryManager& memory_manager,
58 VKScheduler& scheduler, VKStagingBufferPool& staging_pool); 55 VKScheduler& scheduler, VKStagingBufferPool& staging_pool);
59 ~VKBufferCache(); 56 ~VKBufferCache();
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
index d7f65d435..55a8348fc 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
@@ -71,12 +71,12 @@ bool InnerFence::IsEventSignalled() const {
71 } 71 }
72} 72}
73 73
74VKFenceManager::VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer, 74VKFenceManager::VKFenceManager(VideoCore::RasterizerInterface& rasterizer, Tegra::GPU& gpu,
75 const VKDevice& device, VKScheduler& scheduler, 75 Tegra::MemoryManager& memory_manager, VKTextureCache& texture_cache,
76 VKTextureCache& texture_cache, VKBufferCache& buffer_cache, 76 VKBufferCache& buffer_cache, VKQueryCache& query_cache,
77 VKQueryCache& query_cache) 77 const VKDevice& device_, VKScheduler& scheduler_)
78 : GenericFenceManager(system, rasterizer, texture_cache, buffer_cache, query_cache), 78 : GenericFenceManager(rasterizer, gpu, texture_cache, buffer_cache, query_cache),
79 device{device}, scheduler{scheduler} {} 79 device{device_}, scheduler{scheduler_} {}
80 80
81Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) { 81Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) {
82 return std::make_shared<InnerFence>(device, scheduler, value, is_stubbed); 82 return std::make_shared<InnerFence>(device, scheduler, value, is_stubbed);
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h
index 043fe7947..1547d6d30 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.h
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.h
@@ -55,10 +55,10 @@ using GenericFenceManager =
55 55
56class VKFenceManager final : public GenericFenceManager { 56class VKFenceManager final : public GenericFenceManager {
57public: 57public:
58 explicit VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer, 58 explicit VKFenceManager(VideoCore::RasterizerInterface& rasterizer, Tegra::GPU& gpu,
59 const VKDevice& device, VKScheduler& scheduler, 59 Tegra::MemoryManager& memory_manager, VKTextureCache& texture_cache,
60 VKTextureCache& texture_cache, VKBufferCache& buffer_cache, 60 VKBufferCache& buffer_cache, VKQueryCache& query_cache,
61 VKQueryCache& query_cache); 61 const VKDevice& device, VKScheduler& scheduler);
62 62
63protected: 63protected:
64 Fence CreateFence(u32 value, bool is_stubbed) override; 64 Fence CreateFence(u32 value, bool is_stubbed) override;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index cfdcdd6ab..5c038f4bc 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -135,64 +135,56 @@ bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) con
135 return std::memcmp(&rhs, this, sizeof *this) == 0; 135 return std::memcmp(&rhs, this, sizeof *this) == 0;
136} 136}
137 137
138Shader::Shader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, 138Shader::Shader(Tegra::Engines::ConstBufferEngineInterface& engine, Tegra::Engines::ShaderType stage,
139 VideoCommon::Shader::ProgramCode program_code, u32 main_offset) 139 GPUVAddr gpu_addr_, VAddr cpu_addr, VideoCommon::Shader::ProgramCode program_code_,
140 : gpu_addr{gpu_addr}, program_code{std::move(program_code)}, 140 u32 main_offset)
141 registry{stage, GetEngine(system, stage)}, shader_ir{this->program_code, main_offset, 141 : gpu_addr(gpu_addr_), program_code(std::move(program_code_)), registry(stage, engine),
142 compiler_settings, registry}, 142 shader_ir(program_code, main_offset, compiler_settings, registry),
143 entries{GenerateShaderEntries(shader_ir)} {} 143 entries(GenerateShaderEntries(shader_ir)) {}
144 144
145Shader::~Shader() = default; 145Shader::~Shader() = default;
146 146
147Tegra::Engines::ConstBufferEngineInterface& Shader::GetEngine(Core::System& system, 147VKPipelineCache::VKPipelineCache(RasterizerVulkan& rasterizer, Tegra::GPU& gpu_,
148 Tegra::Engines::ShaderType stage) { 148 Tegra::Engines::Maxwell3D& maxwell3d_,
149 if (stage == ShaderType::Compute) { 149 Tegra::Engines::KeplerCompute& kepler_compute_,
150 return system.GPU().KeplerCompute(); 150 Tegra::MemoryManager& gpu_memory_, const VKDevice& device_,
151 } else { 151 VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_,
152 return system.GPU().Maxwell3D(); 152 VKUpdateDescriptorQueue& update_descriptor_queue_,
153 } 153 VKRenderPassCache& renderpass_cache_)
154} 154 : VideoCommon::ShaderCache<Shader>{rasterizer}, gpu{gpu_}, maxwell3d{maxwell3d_},
155 155 kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, device{device_},
156VKPipelineCache::VKPipelineCache(Core::System& system, RasterizerVulkan& rasterizer, 156 scheduler{scheduler_}, descriptor_pool{descriptor_pool_},
157 const VKDevice& device, VKScheduler& scheduler, 157 update_descriptor_queue{update_descriptor_queue_}, renderpass_cache{renderpass_cache_} {}
158 VKDescriptorPool& descriptor_pool,
159 VKUpdateDescriptorQueue& update_descriptor_queue,
160 VKRenderPassCache& renderpass_cache)
161 : VideoCommon::ShaderCache<Shader>{rasterizer}, system{system}, device{device},
162 scheduler{scheduler}, descriptor_pool{descriptor_pool},
163 update_descriptor_queue{update_descriptor_queue}, renderpass_cache{renderpass_cache} {}
164 158
165VKPipelineCache::~VKPipelineCache() = default; 159VKPipelineCache::~VKPipelineCache() = default;
166 160
167std::array<Shader*, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() { 161std::array<Shader*, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
168 const auto& gpu = system.GPU().Maxwell3D();
169
170 std::array<Shader*, Maxwell::MaxShaderProgram> shaders{}; 162 std::array<Shader*, Maxwell::MaxShaderProgram> shaders{};
163
171 for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) { 164 for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
172 const auto program{static_cast<Maxwell::ShaderProgram>(index)}; 165 const auto program{static_cast<Maxwell::ShaderProgram>(index)};
173 166
174 // Skip stages that are not enabled 167 // Skip stages that are not enabled
175 if (!gpu.regs.IsShaderConfigEnabled(index)) { 168 if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
176 continue; 169 continue;
177 } 170 }
178 171
179 auto& memory_manager{system.GPU().MemoryManager()}; 172 const GPUVAddr gpu_addr{GetShaderAddress(maxwell3d, program)};
180 const GPUVAddr program_addr{GetShaderAddress(system, program)}; 173 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
181 const std::optional cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
182 ASSERT(cpu_addr); 174 ASSERT(cpu_addr);
183 175
184 Shader* result = cpu_addr ? TryGet(*cpu_addr) : null_shader.get(); 176 Shader* result = cpu_addr ? TryGet(*cpu_addr) : null_shader.get();
185 if (!result) { 177 if (!result) {
186 const auto host_ptr{memory_manager.GetPointer(program_addr)}; 178 const u8* const host_ptr{gpu_memory.GetPointer(gpu_addr)};
187 179
188 // No shader found - create a new one 180 // No shader found - create a new one
189 constexpr u32 stage_offset = STAGE_MAIN_OFFSET; 181 static constexpr u32 stage_offset = STAGE_MAIN_OFFSET;
190 const auto stage = static_cast<ShaderType>(index == 0 ? 0 : index - 1); 182 const auto stage = static_cast<ShaderType>(index == 0 ? 0 : index - 1);
191 ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, false); 183 ProgramCode code = GetShaderCode(gpu_memory, gpu_addr, host_ptr, false);
192 const std::size_t size_in_bytes = code.size() * sizeof(u64); 184 const std::size_t size_in_bytes = code.size() * sizeof(u64);
193 185
194 auto shader = std::make_unique<Shader>(system, stage, program_addr, std::move(code), 186 auto shader = std::make_unique<Shader>(maxwell3d, stage, gpu_addr, *cpu_addr,
195 stage_offset); 187 std::move(code), stage_offset);
196 result = shader.get(); 188 result = shader.get();
197 189
198 if (cpu_addr) { 190 if (cpu_addr) {
@@ -215,11 +207,11 @@ VKGraphicsPipeline* VKPipelineCache::GetGraphicsPipeline(
215 } 207 }
216 last_graphics_key = key; 208 last_graphics_key = key;
217 209
218 if (device.UseAsynchronousShaders() && async_shaders.IsShaderAsync(system.GPU())) { 210 if (device.UseAsynchronousShaders() && async_shaders.IsShaderAsync(gpu)) {
219 std::unique_lock lock{pipeline_cache}; 211 std::unique_lock lock{pipeline_cache};
220 const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key); 212 const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key);
221 if (is_cache_miss) { 213 if (is_cache_miss) {
222 system.GPU().ShaderNotify().MarkSharderBuilding(); 214 gpu.ShaderNotify().MarkSharderBuilding();
223 LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash()); 215 LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
224 const auto [program, bindings] = DecompileShaders(key.fixed_state); 216 const auto [program, bindings] = DecompileShaders(key.fixed_state);
225 async_shaders.QueueVulkanShader(this, device, scheduler, descriptor_pool, 217 async_shaders.QueueVulkanShader(this, device, scheduler, descriptor_pool,
@@ -233,13 +225,13 @@ VKGraphicsPipeline* VKPipelineCache::GetGraphicsPipeline(
233 const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key); 225 const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key);
234 auto& entry = pair->second; 226 auto& entry = pair->second;
235 if (is_cache_miss) { 227 if (is_cache_miss) {
236 system.GPU().ShaderNotify().MarkSharderBuilding(); 228 gpu.ShaderNotify().MarkSharderBuilding();
237 LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash()); 229 LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
238 const auto [program, bindings] = DecompileShaders(key.fixed_state); 230 const auto [program, bindings] = DecompileShaders(key.fixed_state);
239 entry = std::make_unique<VKGraphicsPipeline>(device, scheduler, descriptor_pool, 231 entry = std::make_unique<VKGraphicsPipeline>(device, scheduler, descriptor_pool,
240 update_descriptor_queue, renderpass_cache, key, 232 update_descriptor_queue, renderpass_cache, key,
241 bindings, program); 233 bindings, program);
242 system.GPU().ShaderNotify().MarkShaderComplete(); 234 gpu.ShaderNotify().MarkShaderComplete();
243 } 235 }
244 last_graphics_pipeline = entry.get(); 236 last_graphics_pipeline = entry.get();
245 return last_graphics_pipeline; 237 return last_graphics_pipeline;
@@ -255,22 +247,21 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
255 } 247 }
256 LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash()); 248 LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
257 249
258 auto& memory_manager = system.GPU().MemoryManager(); 250 const GPUVAddr gpu_addr = key.shader;
259 const auto program_addr = key.shader;
260 251
261 const auto cpu_addr = memory_manager.GpuToCpuAddress(program_addr); 252 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
262 ASSERT(cpu_addr); 253 ASSERT(cpu_addr);
263 254
264 Shader* shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel.get(); 255 Shader* shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel.get();
265 if (!shader) { 256 if (!shader) {
266 // No shader found - create a new one 257 // No shader found - create a new one
267 const auto host_ptr = memory_manager.GetPointer(program_addr); 258 const auto host_ptr = gpu_memory.GetPointer(gpu_addr);
268 259
269 ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, true); 260 ProgramCode code = GetShaderCode(gpu_memory, gpu_addr, host_ptr, true);
270 const std::size_t size_in_bytes = code.size() * sizeof(u64); 261 const std::size_t size_in_bytes = code.size() * sizeof(u64);
271 262
272 auto shader_info = std::make_unique<Shader>(system, ShaderType::Compute, program_addr, 263 auto shader_info = std::make_unique<Shader>(kepler_compute, ShaderType::Compute, gpu_addr,
273 std::move(code), KERNEL_MAIN_OFFSET); 264 *cpu_addr, std::move(code), KERNEL_MAIN_OFFSET);
274 shader = shader_info.get(); 265 shader = shader_info.get();
275 266
276 if (cpu_addr) { 267 if (cpu_addr) {
@@ -298,7 +289,7 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
298} 289}
299 290
300void VKPipelineCache::EmplacePipeline(std::unique_ptr<VKGraphicsPipeline> pipeline) { 291void VKPipelineCache::EmplacePipeline(std::unique_ptr<VKGraphicsPipeline> pipeline) {
301 system.GPU().ShaderNotify().MarkShaderComplete(); 292 gpu.ShaderNotify().MarkShaderComplete();
302 std::unique_lock lock{pipeline_cache}; 293 std::unique_lock lock{pipeline_cache};
303 graphics_cache.at(pipeline->GetCacheKey()) = std::move(pipeline); 294 graphics_cache.at(pipeline->GetCacheKey()) = std::move(pipeline);
304} 295}
@@ -339,9 +330,6 @@ void VKPipelineCache::OnShaderRemoval(Shader* shader) {
339 330
340std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> 331std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>>
341VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) { 332VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) {
342 auto& memory_manager = system.GPU().MemoryManager();
343 const auto& gpu = system.GPU().Maxwell3D();
344
345 Specialization specialization; 333 Specialization specialization;
346 if (fixed_state.dynamic_state.Topology() == Maxwell::PrimitiveTopology::Points || 334 if (fixed_state.dynamic_state.Topology() == Maxwell::PrimitiveTopology::Points ||
347 device.IsExtExtendedDynamicStateSupported()) { 335 device.IsExtExtendedDynamicStateSupported()) {
@@ -364,12 +352,12 @@ VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) {
364 const auto program_enum = static_cast<Maxwell::ShaderProgram>(index); 352 const auto program_enum = static_cast<Maxwell::ShaderProgram>(index);
365 353
366 // Skip stages that are not enabled 354 // Skip stages that are not enabled
367 if (!gpu.regs.IsShaderConfigEnabled(index)) { 355 if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
368 continue; 356 continue;
369 } 357 }
370 358
371 const GPUVAddr gpu_addr = GetShaderAddress(system, program_enum); 359 const GPUVAddr gpu_addr = GetShaderAddress(maxwell3d, program_enum);
372 const std::optional<VAddr> cpu_addr = memory_manager.GpuToCpuAddress(gpu_addr); 360 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
373 Shader* const shader = cpu_addr ? TryGet(*cpu_addr) : null_shader.get(); 361 Shader* const shader = cpu_addr ? TryGet(*cpu_addr) : null_shader.get();
374 362
375 const std::size_t stage = index == 0 ? 0 : index - 1; // Stage indices are 0 - 5 363 const std::size_t stage = index == 0 ? 0 : index - 1; // Stage indices are 0 - 5
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index c04829e77..1a31fd9f6 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -85,7 +85,8 @@ namespace Vulkan {
85 85
86class Shader { 86class Shader {
87public: 87public:
88 explicit Shader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, 88 explicit Shader(Tegra::Engines::ConstBufferEngineInterface& engine,
89 Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, VAddr cpu_addr,
89 VideoCommon::Shader::ProgramCode program_code, u32 main_offset); 90 VideoCommon::Shader::ProgramCode program_code, u32 main_offset);
90 ~Shader(); 91 ~Shader();
91 92
@@ -97,22 +98,19 @@ public:
97 return shader_ir; 98 return shader_ir;
98 } 99 }
99 100
100 const VideoCommon::Shader::Registry& GetRegistry() const {
101 return registry;
102 }
103
104 const VideoCommon::Shader::ShaderIR& GetIR() const { 101 const VideoCommon::Shader::ShaderIR& GetIR() const {
105 return shader_ir; 102 return shader_ir;
106 } 103 }
107 104
105 const VideoCommon::Shader::Registry& GetRegistry() const {
106 return registry;
107 }
108
108 const ShaderEntries& GetEntries() const { 109 const ShaderEntries& GetEntries() const {
109 return entries; 110 return entries;
110 } 111 }
111 112
112private: 113private:
113 static Tegra::Engines::ConstBufferEngineInterface& GetEngine(Core::System& system,
114 Tegra::Engines::ShaderType stage);
115
116 GPUVAddr gpu_addr{}; 114 GPUVAddr gpu_addr{};
117 VideoCommon::Shader::ProgramCode program_code; 115 VideoCommon::Shader::ProgramCode program_code;
118 VideoCommon::Shader::Registry registry; 116 VideoCommon::Shader::Registry registry;
@@ -122,9 +120,11 @@ private:
122 120
123class VKPipelineCache final : public VideoCommon::ShaderCache<Shader> { 121class VKPipelineCache final : public VideoCommon::ShaderCache<Shader> {
124public: 122public:
125 explicit VKPipelineCache(Core::System& system, RasterizerVulkan& rasterizer, 123 explicit VKPipelineCache(RasterizerVulkan& rasterizer, Tegra::GPU& gpu,
126 const VKDevice& device, VKScheduler& scheduler, 124 Tegra::Engines::Maxwell3D& maxwell3d,
127 VKDescriptorPool& descriptor_pool, 125 Tegra::Engines::KeplerCompute& kepler_compute,
126 Tegra::MemoryManager& gpu_memory, const VKDevice& device,
127 VKScheduler& scheduler, VKDescriptorPool& descriptor_pool,
128 VKUpdateDescriptorQueue& update_descriptor_queue, 128 VKUpdateDescriptorQueue& update_descriptor_queue,
129 VKRenderPassCache& renderpass_cache); 129 VKRenderPassCache& renderpass_cache);
130 ~VKPipelineCache() override; 130 ~VKPipelineCache() override;
@@ -145,7 +145,11 @@ private:
145 std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders( 145 std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders(
146 const FixedPipelineState& fixed_state); 146 const FixedPipelineState& fixed_state);
147 147
148 Core::System& system; 148 Tegra::GPU& gpu;
149 Tegra::Engines::Maxwell3D& maxwell3d;
150 Tegra::Engines::KeplerCompute& kepler_compute;
151 Tegra::MemoryManager& gpu_memory;
152
149 const VKDevice& device; 153 const VKDevice& device;
150 VKScheduler& scheduler; 154 VKScheduler& scheduler;
151 VKDescriptorPool& descriptor_pool; 155 VKDescriptorPool& descriptor_pool;
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 6cd63d090..5a97c959d 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -68,10 +68,11 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
68 usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; 68 usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
69} 69}
70 70
71VKQueryCache::VKQueryCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, 71VKQueryCache::VKQueryCache(VideoCore::RasterizerInterface& rasterizer,
72 Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
72 const VKDevice& device, VKScheduler& scheduler) 73 const VKDevice& device, VKScheduler& scheduler)
73 : VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter, 74 : VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter,
74 QueryPool>{system, rasterizer}, 75 QueryPool>{rasterizer, maxwell3d, gpu_memory},
75 device{device}, scheduler{scheduler} { 76 device{device}, scheduler{scheduler} {
76 for (std::size_t i = 0; i < static_cast<std::size_t>(VideoCore::NumQueryTypes); ++i) { 77 for (std::size_t i = 0; i < static_cast<std::size_t>(VideoCore::NumQueryTypes); ++i) {
77 query_pools[i].Initialize(device, static_cast<VideoCore::QueryType>(i)); 78 query_pools[i].Initialize(device, static_cast<VideoCore::QueryType>(i));
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index 40119e6d3..9be996e55 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -56,7 +56,8 @@ class VKQueryCache final
56 : public VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter, 56 : public VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter,
57 QueryPool> { 57 QueryPool> {
58public: 58public:
59 explicit VKQueryCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, 59 explicit VKQueryCache(VideoCore::RasterizerInterface& rasterizer,
60 Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
60 const VKDevice& device, VKScheduler& scheduler); 61 const VKDevice& device, VKScheduler& scheduler);
61 ~VKQueryCache(); 62 ~VKQueryCache();
62 63
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index ff1b52eab..bafebe294 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -381,28 +381,30 @@ void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf) const {
381 } 381 }
382} 382}
383 383
384RasterizerVulkan::RasterizerVulkan(Core::System& system, Core::Frontend::EmuWindow& renderer, 384RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu_,
385 VKScreenInfo& screen_info, const VKDevice& device, 385 Tegra::MemoryManager& gpu_memory_,
386 VKResourceManager& resource_manager, 386 Core::Memory::Memory& cpu_memory, VKScreenInfo& screen_info_,
387 VKMemoryManager& memory_manager, StateTracker& state_tracker, 387 const VKDevice& device_, VKResourceManager& resource_manager_,
388 VKScheduler& scheduler) 388 VKMemoryManager& memory_manager_, StateTracker& state_tracker_,
389 : RasterizerAccelerated{system.Memory()}, system{system}, render_window{renderer}, 389 VKScheduler& scheduler_)
390 screen_info{screen_info}, device{device}, resource_manager{resource_manager}, 390 : RasterizerAccelerated(cpu_memory), gpu(gpu_), gpu_memory(gpu_memory_),
391 memory_manager{memory_manager}, state_tracker{state_tracker}, scheduler{scheduler}, 391 maxwell3d(gpu.Maxwell3D()), kepler_compute(gpu.KeplerCompute()), screen_info(screen_info_),
392 device(device_), resource_manager(resource_manager_), memory_manager(memory_manager_),
393 state_tracker(state_tracker_), scheduler(scheduler_),
392 staging_pool(device, memory_manager, scheduler), descriptor_pool(device), 394 staging_pool(device, memory_manager, scheduler), descriptor_pool(device),
393 update_descriptor_queue(device, scheduler), renderpass_cache(device), 395 update_descriptor_queue(device, scheduler), renderpass_cache(device),
394 quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), 396 quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
395 quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), 397 quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
396 uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), 398 uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
397 texture_cache(system, *this, device, resource_manager, memory_manager, scheduler, 399 texture_cache(*this, maxwell3d, gpu_memory, device, resource_manager, memory_manager,
398 staging_pool), 400 scheduler, staging_pool),
399 pipeline_cache(system, *this, device, scheduler, descriptor_pool, update_descriptor_queue, 401 pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
400 renderpass_cache), 402 descriptor_pool, update_descriptor_queue, renderpass_cache),
401 buffer_cache(*this, system, device, memory_manager, scheduler, staging_pool), 403 buffer_cache(*this, gpu_memory, cpu_memory, device, memory_manager, scheduler, staging_pool),
402 sampler_cache(device), 404 sampler_cache(device), query_cache(*this, maxwell3d, gpu_memory, device, scheduler),
403 fence_manager(system, *this, device, scheduler, texture_cache, buffer_cache, query_cache), 405 fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, device,
404 query_cache(system, *this, device, scheduler), 406 scheduler),
405 wfi_event{device.GetLogical().CreateNewEvent()}, async_shaders{renderer} { 407 wfi_event(device.GetLogical().CreateNewEvent()), async_shaders(emu_window) {
406 scheduler.SetQueryCache(query_cache); 408 scheduler.SetQueryCache(query_cache);
407 if (device.UseAsynchronousShaders()) { 409 if (device.UseAsynchronousShaders()) {
408 async_shaders.AllocateWorkers(); 410 async_shaders.AllocateWorkers();
@@ -414,15 +416,13 @@ RasterizerVulkan::~RasterizerVulkan() = default;
414void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { 416void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
415 MICROPROFILE_SCOPE(Vulkan_Drawing); 417 MICROPROFILE_SCOPE(Vulkan_Drawing);
416 418
419 SCOPE_EXIT({ gpu.TickWork(); });
417 FlushWork(); 420 FlushWork();
418 421
419 query_cache.UpdateCounters(); 422 query_cache.UpdateCounters();
420 423
421 SCOPE_EXIT({ system.GPU().TickWork(); });
422
423 const auto& gpu = system.GPU().Maxwell3D();
424 GraphicsPipelineCacheKey key; 424 GraphicsPipelineCacheKey key;
425 key.fixed_state.Fill(gpu.regs, device.IsExtExtendedDynamicStateSupported()); 425 key.fixed_state.Fill(maxwell3d.regs, device.IsExtExtendedDynamicStateSupported());
426 426
427 buffer_cache.Map(CalculateGraphicsStreamBufferSize(is_indexed)); 427 buffer_cache.Map(CalculateGraphicsStreamBufferSize(is_indexed));
428 428
@@ -480,8 +480,7 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
480void RasterizerVulkan::Clear() { 480void RasterizerVulkan::Clear() {
481 MICROPROFILE_SCOPE(Vulkan_Clearing); 481 MICROPROFILE_SCOPE(Vulkan_Clearing);
482 482
483 const auto& gpu = system.GPU().Maxwell3D(); 483 if (!maxwell3d.ShouldExecute()) {
484 if (!system.GPU().Maxwell3D().ShouldExecute()) {
485 return; 484 return;
486 } 485 }
487 486
@@ -490,7 +489,7 @@ void RasterizerVulkan::Clear() {
490 489
491 query_cache.UpdateCounters(); 490 query_cache.UpdateCounters();
492 491
493 const auto& regs = gpu.regs; 492 const auto& regs = maxwell3d.regs;
494 const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || 493 const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
495 regs.clear_buffers.A; 494 regs.clear_buffers.A;
496 const bool use_depth = regs.clear_buffers.Z; 495 const bool use_depth = regs.clear_buffers.Z;
@@ -559,7 +558,7 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
559 558
560 query_cache.UpdateCounters(); 559 query_cache.UpdateCounters();
561 560
562 const auto& launch_desc = system.GPU().KeplerCompute().launch_description; 561 const auto& launch_desc = kepler_compute.launch_description;
563 auto& pipeline = pipeline_cache.GetComputePipeline({ 562 auto& pipeline = pipeline_cache.GetComputePipeline({
564 .shader = code_addr, 563 .shader = code_addr,
565 .shared_memory_size = launch_desc.shared_alloc, 564 .shared_memory_size = launch_desc.shared_alloc,
@@ -655,16 +654,14 @@ void RasterizerVulkan::SyncGuestHost() {
655} 654}
656 655
657void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) { 656void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) {
658 auto& gpu{system.GPU()};
659 if (!gpu.IsAsync()) { 657 if (!gpu.IsAsync()) {
660 gpu.MemoryManager().Write<u32>(addr, value); 658 gpu_memory.Write<u32>(addr, value);
661 return; 659 return;
662 } 660 }
663 fence_manager.SignalSemaphore(addr, value); 661 fence_manager.SignalSemaphore(addr, value);
664} 662}
665 663
666void RasterizerVulkan::SignalSyncPoint(u32 value) { 664void RasterizerVulkan::SignalSyncPoint(u32 value) {
667 auto& gpu{system.GPU()};
668 if (!gpu.IsAsync()) { 665 if (!gpu.IsAsync()) {
669 gpu.IncrementSyncPoint(value); 666 gpu.IncrementSyncPoint(value);
670 return; 667 return;
@@ -673,7 +670,6 @@ void RasterizerVulkan::SignalSyncPoint(u32 value) {
673} 670}
674 671
675void RasterizerVulkan::ReleaseFences() { 672void RasterizerVulkan::ReleaseFences() {
676 auto& gpu{system.GPU()};
677 if (!gpu.IsAsync()) { 673 if (!gpu.IsAsync()) {
678 return; 674 return;
679 } 675 }
@@ -751,10 +747,6 @@ bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config,
751 return true; 747 return true;
752} 748}
753 749
754void RasterizerVulkan::SetupDirtyFlags() {
755 state_tracker.Initialize();
756}
757
758void RasterizerVulkan::FlushWork() { 750void RasterizerVulkan::FlushWork() {
759 static constexpr u32 DRAWS_TO_DISPATCH = 4096; 751 static constexpr u32 DRAWS_TO_DISPATCH = 4096;
760 752
@@ -778,10 +770,9 @@ void RasterizerVulkan::FlushWork() {
778 770
779RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments(bool is_clear) { 771RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments(bool is_clear) {
780 MICROPROFILE_SCOPE(Vulkan_RenderTargets); 772 MICROPROFILE_SCOPE(Vulkan_RenderTargets);
781 auto& maxwell3d = system.GPU().Maxwell3D();
782 auto& dirty = maxwell3d.dirty.flags;
783 auto& regs = maxwell3d.regs;
784 773
774 const auto& regs = maxwell3d.regs;
775 auto& dirty = maxwell3d.dirty.flags;
785 const bool update_rendertargets = dirty[VideoCommon::Dirty::RenderTargets]; 776 const bool update_rendertargets = dirty[VideoCommon::Dirty::RenderTargets];
786 dirty[VideoCommon::Dirty::RenderTargets] = false; 777 dirty[VideoCommon::Dirty::RenderTargets] = false;
787 778
@@ -844,7 +835,7 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
844 return true; 835 return true;
845 }; 836 };
846 837
847 const auto& regs = system.GPU().Maxwell3D().regs; 838 const auto& regs = maxwell3d.regs;
848 const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count); 839 const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count);
849 for (std::size_t index = 0; index < num_attachments; ++index) { 840 for (std::size_t index = 0; index < num_attachments; ++index) {
850 if (try_push(color_attachments[index])) { 841 if (try_push(color_attachments[index])) {
@@ -880,13 +871,12 @@ RasterizerVulkan::DrawParameters RasterizerVulkan::SetupGeometry(FixedPipelineSt
880 bool is_instanced) { 871 bool is_instanced) {
881 MICROPROFILE_SCOPE(Vulkan_Geometry); 872 MICROPROFILE_SCOPE(Vulkan_Geometry);
882 873
883 const auto& gpu = system.GPU().Maxwell3D(); 874 const auto& regs = maxwell3d.regs;
884 const auto& regs = gpu.regs;
885 875
886 SetupVertexArrays(buffer_bindings); 876 SetupVertexArrays(buffer_bindings);
887 877
888 const u32 base_instance = regs.vb_base_instance; 878 const u32 base_instance = regs.vb_base_instance;
889 const u32 num_instances = is_instanced ? gpu.mme_draw.instance_count : 1; 879 const u32 num_instances = is_instanced ? maxwell3d.mme_draw.instance_count : 1;
890 const u32 base_vertex = is_indexed ? regs.vb_element_base : regs.vertex_buffer.first; 880 const u32 base_vertex = is_indexed ? regs.vb_element_base : regs.vertex_buffer.first;
891 const u32 num_vertices = is_indexed ? regs.index_array.count : regs.vertex_buffer.count; 881 const u32 num_vertices = is_indexed ? regs.index_array.count : regs.vertex_buffer.count;
892 882
@@ -947,7 +937,7 @@ void RasterizerVulkan::SetupImageTransitions(
947} 937}
948 938
949void RasterizerVulkan::UpdateDynamicStates() { 939void RasterizerVulkan::UpdateDynamicStates() {
950 auto& regs = system.GPU().Maxwell3D().regs; 940 auto& regs = maxwell3d.regs;
951 UpdateViewportsState(regs); 941 UpdateViewportsState(regs);
952 UpdateScissorsState(regs); 942 UpdateScissorsState(regs);
953 UpdateDepthBias(regs); 943 UpdateDepthBias(regs);
@@ -968,7 +958,7 @@ void RasterizerVulkan::UpdateDynamicStates() {
968} 958}
969 959
970void RasterizerVulkan::BeginTransformFeedback() { 960void RasterizerVulkan::BeginTransformFeedback() {
971 const auto& regs = system.GPU().Maxwell3D().regs; 961 const auto& regs = maxwell3d.regs;
972 if (regs.tfb_enabled == 0) { 962 if (regs.tfb_enabled == 0) {
973 return; 963 return;
974 } 964 }
@@ -1000,7 +990,7 @@ void RasterizerVulkan::BeginTransformFeedback() {
1000} 990}
1001 991
1002void RasterizerVulkan::EndTransformFeedback() { 992void RasterizerVulkan::EndTransformFeedback() {
1003 const auto& regs = system.GPU().Maxwell3D().regs; 993 const auto& regs = maxwell3d.regs;
1004 if (regs.tfb_enabled == 0) { 994 if (regs.tfb_enabled == 0) {
1005 return; 995 return;
1006 } 996 }
@@ -1013,7 +1003,7 @@ void RasterizerVulkan::EndTransformFeedback() {
1013} 1003}
1014 1004
1015void RasterizerVulkan::SetupVertexArrays(BufferBindings& buffer_bindings) { 1005void RasterizerVulkan::SetupVertexArrays(BufferBindings& buffer_bindings) {
1016 const auto& regs = system.GPU().Maxwell3D().regs; 1006 const auto& regs = maxwell3d.regs;
1017 1007
1018 for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) { 1008 for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
1019 const auto& vertex_array = regs.vertex_array[index]; 1009 const auto& vertex_array = regs.vertex_array[index];
@@ -1039,7 +1029,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar
1039 if (params.num_vertices == 0) { 1029 if (params.num_vertices == 0) {
1040 return; 1030 return;
1041 } 1031 }
1042 const auto& regs = system.GPU().Maxwell3D().regs; 1032 const auto& regs = maxwell3d.regs;
1043 switch (regs.draw.topology) { 1033 switch (regs.draw.topology) {
1044 case Maxwell::PrimitiveTopology::Quads: { 1034 case Maxwell::PrimitiveTopology::Quads: {
1045 if (!params.is_indexed) { 1035 if (!params.is_indexed) {
@@ -1087,8 +1077,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar
1087 1077
1088void RasterizerVulkan::SetupGraphicsConstBuffers(const ShaderEntries& entries, std::size_t stage) { 1078void RasterizerVulkan::SetupGraphicsConstBuffers(const ShaderEntries& entries, std::size_t stage) {
1089 MICROPROFILE_SCOPE(Vulkan_ConstBuffers); 1079 MICROPROFILE_SCOPE(Vulkan_ConstBuffers);
1090 const auto& gpu = system.GPU().Maxwell3D(); 1080 const auto& shader_stage = maxwell3d.state.shader_stages[stage];
1091 const auto& shader_stage = gpu.state.shader_stages[stage];
1092 for (const auto& entry : entries.const_buffers) { 1081 for (const auto& entry : entries.const_buffers) {
1093 SetupConstBuffer(entry, shader_stage.const_buffers[entry.GetIndex()]); 1082 SetupConstBuffer(entry, shader_stage.const_buffers[entry.GetIndex()]);
1094 } 1083 }
@@ -1096,8 +1085,7 @@ void RasterizerVulkan::SetupGraphicsConstBuffers(const ShaderEntries& entries, s
1096 1085
1097void RasterizerVulkan::SetupGraphicsGlobalBuffers(const ShaderEntries& entries, std::size_t stage) { 1086void RasterizerVulkan::SetupGraphicsGlobalBuffers(const ShaderEntries& entries, std::size_t stage) {
1098 MICROPROFILE_SCOPE(Vulkan_GlobalBuffers); 1087 MICROPROFILE_SCOPE(Vulkan_GlobalBuffers);
1099 auto& gpu{system.GPU()}; 1088 const auto& cbufs{maxwell3d.state.shader_stages[stage]};
1100 const auto cbufs{gpu.Maxwell3D().state.shader_stages[stage]};
1101 1089
1102 for (const auto& entry : entries.global_buffers) { 1090 for (const auto& entry : entries.global_buffers) {
1103 const auto addr = cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset(); 1091 const auto addr = cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset();
@@ -1107,19 +1095,17 @@ void RasterizerVulkan::SetupGraphicsGlobalBuffers(const ShaderEntries& entries,
1107 1095
1108void RasterizerVulkan::SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage) { 1096void RasterizerVulkan::SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage) {
1109 MICROPROFILE_SCOPE(Vulkan_Textures); 1097 MICROPROFILE_SCOPE(Vulkan_Textures);
1110 const auto& gpu = system.GPU().Maxwell3D();
1111 for (const auto& entry : entries.uniform_texels) { 1098 for (const auto& entry : entries.uniform_texels) {
1112 const auto image = GetTextureInfo(gpu, entry, stage).tic; 1099 const auto image = GetTextureInfo(maxwell3d, entry, stage).tic;
1113 SetupUniformTexels(image, entry); 1100 SetupUniformTexels(image, entry);
1114 } 1101 }
1115} 1102}
1116 1103
1117void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage) { 1104void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage) {
1118 MICROPROFILE_SCOPE(Vulkan_Textures); 1105 MICROPROFILE_SCOPE(Vulkan_Textures);
1119 const auto& gpu = system.GPU().Maxwell3D();
1120 for (const auto& entry : entries.samplers) { 1106 for (const auto& entry : entries.samplers) {
1121 for (std::size_t i = 0; i < entry.size; ++i) { 1107 for (std::size_t i = 0; i < entry.size; ++i) {
1122 const auto texture = GetTextureInfo(gpu, entry, stage, i); 1108 const auto texture = GetTextureInfo(maxwell3d, entry, stage, i);
1123 SetupTexture(texture, entry); 1109 SetupTexture(texture, entry);
1124 } 1110 }
1125 } 1111 }
@@ -1127,25 +1113,23 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::
1127 1113
1128void RasterizerVulkan::SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage) { 1114void RasterizerVulkan::SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage) {
1129 MICROPROFILE_SCOPE(Vulkan_Textures); 1115 MICROPROFILE_SCOPE(Vulkan_Textures);
1130 const auto& gpu = system.GPU().Maxwell3D();
1131 for (const auto& entry : entries.storage_texels) { 1116 for (const auto& entry : entries.storage_texels) {
1132 const auto image = GetTextureInfo(gpu, entry, stage).tic; 1117 const auto image = GetTextureInfo(maxwell3d, entry, stage).tic;
1133 SetupStorageTexel(image, entry); 1118 SetupStorageTexel(image, entry);
1134 } 1119 }
1135} 1120}
1136 1121
1137void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage) { 1122void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage) {
1138 MICROPROFILE_SCOPE(Vulkan_Images); 1123 MICROPROFILE_SCOPE(Vulkan_Images);
1139 const auto& gpu = system.GPU().Maxwell3D();
1140 for (const auto& entry : entries.images) { 1124 for (const auto& entry : entries.images) {
1141 const auto tic = GetTextureInfo(gpu, entry, stage).tic; 1125 const auto tic = GetTextureInfo(maxwell3d, entry, stage).tic;
1142 SetupImage(tic, entry); 1126 SetupImage(tic, entry);
1143 } 1127 }
1144} 1128}
1145 1129
1146void RasterizerVulkan::SetupComputeConstBuffers(const ShaderEntries& entries) { 1130void RasterizerVulkan::SetupComputeConstBuffers(const ShaderEntries& entries) {
1147 MICROPROFILE_SCOPE(Vulkan_ConstBuffers); 1131 MICROPROFILE_SCOPE(Vulkan_ConstBuffers);
1148 const auto& launch_desc = system.GPU().KeplerCompute().launch_description; 1132 const auto& launch_desc = kepler_compute.launch_description;
1149 for (const auto& entry : entries.const_buffers) { 1133 for (const auto& entry : entries.const_buffers) {
1150 const auto& config = launch_desc.const_buffer_config[entry.GetIndex()]; 1134 const auto& config = launch_desc.const_buffer_config[entry.GetIndex()];
1151 const std::bitset<8> mask = launch_desc.const_buffer_enable_mask.Value(); 1135 const std::bitset<8> mask = launch_desc.const_buffer_enable_mask.Value();
@@ -1159,7 +1143,7 @@ void RasterizerVulkan::SetupComputeConstBuffers(const ShaderEntries& entries) {
1159 1143
1160void RasterizerVulkan::SetupComputeGlobalBuffers(const ShaderEntries& entries) { 1144void RasterizerVulkan::SetupComputeGlobalBuffers(const ShaderEntries& entries) {
1161 MICROPROFILE_SCOPE(Vulkan_GlobalBuffers); 1145 MICROPROFILE_SCOPE(Vulkan_GlobalBuffers);
1162 const auto cbufs{system.GPU().KeplerCompute().launch_description.const_buffer_config}; 1146 const auto& cbufs{kepler_compute.launch_description.const_buffer_config};
1163 for (const auto& entry : entries.global_buffers) { 1147 for (const auto& entry : entries.global_buffers) {
1164 const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()}; 1148 const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()};
1165 SetupGlobalBuffer(entry, addr); 1149 SetupGlobalBuffer(entry, addr);
@@ -1168,19 +1152,17 @@ void RasterizerVulkan::SetupComputeGlobalBuffers(const ShaderEntries& entries) {
1168 1152
1169void RasterizerVulkan::SetupComputeUniformTexels(const ShaderEntries& entries) { 1153void RasterizerVulkan::SetupComputeUniformTexels(const ShaderEntries& entries) {
1170 MICROPROFILE_SCOPE(Vulkan_Textures); 1154 MICROPROFILE_SCOPE(Vulkan_Textures);
1171 const auto& gpu = system.GPU().KeplerCompute();
1172 for (const auto& entry : entries.uniform_texels) { 1155 for (const auto& entry : entries.uniform_texels) {
1173 const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic; 1156 const auto image = GetTextureInfo(kepler_compute, entry, ComputeShaderIndex).tic;
1174 SetupUniformTexels(image, entry); 1157 SetupUniformTexels(image, entry);
1175 } 1158 }
1176} 1159}
1177 1160
1178void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) { 1161void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
1179 MICROPROFILE_SCOPE(Vulkan_Textures); 1162 MICROPROFILE_SCOPE(Vulkan_Textures);
1180 const auto& gpu = system.GPU().KeplerCompute();
1181 for (const auto& entry : entries.samplers) { 1163 for (const auto& entry : entries.samplers) {
1182 for (std::size_t i = 0; i < entry.size; ++i) { 1164 for (std::size_t i = 0; i < entry.size; ++i) {
1183 const auto texture = GetTextureInfo(gpu, entry, ComputeShaderIndex, i); 1165 const auto texture = GetTextureInfo(kepler_compute, entry, ComputeShaderIndex, i);
1184 SetupTexture(texture, entry); 1166 SetupTexture(texture, entry);
1185 } 1167 }
1186 } 1168 }
@@ -1188,18 +1170,16 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
1188 1170
1189void RasterizerVulkan::SetupComputeStorageTexels(const ShaderEntries& entries) { 1171void RasterizerVulkan::SetupComputeStorageTexels(const ShaderEntries& entries) {
1190 MICROPROFILE_SCOPE(Vulkan_Textures); 1172 MICROPROFILE_SCOPE(Vulkan_Textures);
1191 const auto& gpu = system.GPU().KeplerCompute();
1192 for (const auto& entry : entries.storage_texels) { 1173 for (const auto& entry : entries.storage_texels) {
1193 const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic; 1174 const auto image = GetTextureInfo(kepler_compute, entry, ComputeShaderIndex).tic;
1194 SetupStorageTexel(image, entry); 1175 SetupStorageTexel(image, entry);
1195 } 1176 }
1196} 1177}
1197 1178
1198void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) { 1179void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) {
1199 MICROPROFILE_SCOPE(Vulkan_Images); 1180 MICROPROFILE_SCOPE(Vulkan_Images);
1200 const auto& gpu = system.GPU().KeplerCompute();
1201 for (const auto& entry : entries.images) { 1181 for (const auto& entry : entries.images) {
1202 const auto tic = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic; 1182 const auto tic = GetTextureInfo(kepler_compute, entry, ComputeShaderIndex).tic;
1203 SetupImage(tic, entry); 1183 SetupImage(tic, entry);
1204 } 1184 }
1205} 1185}
@@ -1223,9 +1203,8 @@ void RasterizerVulkan::SetupConstBuffer(const ConstBufferEntry& entry,
1223} 1203}
1224 1204
1225void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAddr address) { 1205void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAddr address) {
1226 auto& memory_manager{system.GPU().MemoryManager()}; 1206 const u64 actual_addr = gpu_memory.Read<u64>(address);
1227 const auto actual_addr = memory_manager.Read<u64>(address); 1207 const u32 size = gpu_memory.Read<u32>(address + 8);
1228 const auto size = memory_manager.Read<u32>(address + 8);
1229 1208
1230 if (size == 0) { 1209 if (size == 0) {
1231 // Sometimes global memory pointers don't have a proper size. Upload a dummy entry 1210 // Sometimes global memory pointers don't have a proper size. Upload a dummy entry
@@ -1508,7 +1487,7 @@ std::size_t RasterizerVulkan::CalculateComputeStreamBufferSize() const {
1508} 1487}
1509 1488
1510std::size_t RasterizerVulkan::CalculateVertexArraysSize() const { 1489std::size_t RasterizerVulkan::CalculateVertexArraysSize() const {
1511 const auto& regs = system.GPU().Maxwell3D().regs; 1490 const auto& regs = maxwell3d.regs;
1512 1491
1513 std::size_t size = 0; 1492 std::size_t size = 0;
1514 for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) { 1493 for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) {
@@ -1523,9 +1502,8 @@ std::size_t RasterizerVulkan::CalculateVertexArraysSize() const {
1523} 1502}
1524 1503
1525std::size_t RasterizerVulkan::CalculateIndexBufferSize() const { 1504std::size_t RasterizerVulkan::CalculateIndexBufferSize() const {
1526 const auto& regs = system.GPU().Maxwell3D().regs; 1505 return static_cast<std::size_t>(maxwell3d.regs.index_array.count) *
1527 return static_cast<std::size_t>(regs.index_array.count) * 1506 static_cast<std::size_t>(maxwell3d.regs.index_array.FormatSizeInBytes());
1528 static_cast<std::size_t>(regs.index_array.FormatSizeInBytes());
1529} 1507}
1530 1508
1531std::size_t RasterizerVulkan::CalculateConstBufferSize( 1509std::size_t RasterizerVulkan::CalculateConstBufferSize(
@@ -1540,7 +1518,7 @@ std::size_t RasterizerVulkan::CalculateConstBufferSize(
1540} 1518}
1541 1519
1542RenderPassParams RasterizerVulkan::GetRenderPassParams(Texceptions texceptions) const { 1520RenderPassParams RasterizerVulkan::GetRenderPassParams(Texceptions texceptions) const {
1543 const auto& regs = system.GPU().Maxwell3D().regs; 1521 const auto& regs = maxwell3d.regs;
1544 const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count); 1522 const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count);
1545 1523
1546 RenderPassParams params; 1524 RenderPassParams params;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index f640ba649..16251d0f6 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -106,7 +106,8 @@ struct ImageView {
106 106
107class RasterizerVulkan final : public VideoCore::RasterizerAccelerated { 107class RasterizerVulkan final : public VideoCore::RasterizerAccelerated {
108public: 108public:
109 explicit RasterizerVulkan(Core::System& system, Core::Frontend::EmuWindow& render_window, 109 explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu,
110 Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory,
110 VKScreenInfo& screen_info, const VKDevice& device, 111 VKScreenInfo& screen_info, const VKDevice& device,
111 VKResourceManager& resource_manager, VKMemoryManager& memory_manager, 112 VKResourceManager& resource_manager, VKMemoryManager& memory_manager,
112 StateTracker& state_tracker, VKScheduler& scheduler); 113 StateTracker& state_tracker, VKScheduler& scheduler);
@@ -135,7 +136,6 @@ public:
135 const Tegra::Engines::Fermi2D::Config& copy_config) override; 136 const Tegra::Engines::Fermi2D::Config& copy_config) override;
136 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, 137 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
137 u32 pixel_stride) override; 138 u32 pixel_stride) override;
138 void SetupDirtyFlags() override;
139 139
140 VideoCommon::Shader::AsyncShaders& GetAsyncShaders() { 140 VideoCommon::Shader::AsyncShaders& GetAsyncShaders() {
141 return async_shaders; 141 return async_shaders;
@@ -279,8 +279,11 @@ private:
279 279
280 VkBuffer DefaultBuffer(); 280 VkBuffer DefaultBuffer();
281 281
282 Core::System& system; 282 Tegra::GPU& gpu;
283 Core::Frontend::EmuWindow& render_window; 283 Tegra::MemoryManager& gpu_memory;
284 Tegra::Engines::Maxwell3D& maxwell3d;
285 Tegra::Engines::KeplerCompute& kepler_compute;
286
284 VKScreenInfo& screen_info; 287 VKScreenInfo& screen_info;
285 const VKDevice& device; 288 const VKDevice& device;
286 VKResourceManager& resource_manager; 289 VKResourceManager& resource_manager;
@@ -300,8 +303,8 @@ private:
300 VKPipelineCache pipeline_cache; 303 VKPipelineCache pipeline_cache;
301 VKBufferCache buffer_cache; 304 VKBufferCache buffer_cache;
302 VKSamplerCache sampler_cache; 305 VKSamplerCache sampler_cache;
303 VKFenceManager fence_manager;
304 VKQueryCache query_cache; 306 VKQueryCache query_cache;
307 VKFenceManager fence_manager;
305 308
306 vk::Buffer default_buffer; 309 vk::Buffer default_buffer;
307 VKMemoryCommit default_buffer_commit; 310 VKMemoryCommit default_buffer_commit;
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
index 4bd1009f9..5d2c4a796 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
@@ -132,12 +132,9 @@ void SetupDirtyStencilTestEnable(Tables& tables) {
132 132
133} // Anonymous namespace 133} // Anonymous namespace
134 134
135StateTracker::StateTracker(Core::System& system) 135StateTracker::StateTracker(Tegra::GPU& gpu)
136 : system{system}, invalidation_flags{MakeInvalidationFlags()} {} 136 : flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} {
137 137 auto& tables = gpu.Maxwell3D().dirty.tables;
138void StateTracker::Initialize() {
139 auto& dirty = system.GPU().Maxwell3D().dirty;
140 auto& tables = dirty.tables;
141 SetupDirtyRenderTargets(tables); 138 SetupDirtyRenderTargets(tables);
142 SetupDirtyViewports(tables); 139 SetupDirtyViewports(tables);
143 SetupDirtyScissors(tables); 140 SetupDirtyScissors(tables);
@@ -155,9 +152,4 @@ void StateTracker::Initialize() {
155 SetupDirtyStencilTestEnable(tables); 152 SetupDirtyStencilTestEnable(tables);
156} 153}
157 154
158void StateTracker::InvalidateCommandBufferState() {
159 system.GPU().Maxwell3D().dirty.flags |= invalidation_flags;
160 current_topology = INVALID_TOPOLOGY;
161}
162
163} // namespace Vulkan 155} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h
index 13a6ce786..1de789e57 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.h
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.h
@@ -45,11 +45,12 @@ class StateTracker {
45 using Maxwell = Tegra::Engines::Maxwell3D::Regs; 45 using Maxwell = Tegra::Engines::Maxwell3D::Regs;
46 46
47public: 47public:
48 explicit StateTracker(Core::System& system); 48 explicit StateTracker(Tegra::GPU& gpu);
49 49
50 void Initialize(); 50 void InvalidateCommandBufferState() {
51 51 flags |= invalidation_flags;
52 void InvalidateCommandBufferState(); 52 current_topology = INVALID_TOPOLOGY;
53 }
53 54
54 bool TouchViewports() { 55 bool TouchViewports() {
55 return Exchange(Dirty::Viewports, false); 56 return Exchange(Dirty::Viewports, false);
@@ -121,13 +122,12 @@ private:
121 static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u); 122 static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u);
122 123
123 bool Exchange(std::size_t id, bool new_value) const noexcept { 124 bool Exchange(std::size_t id, bool new_value) const noexcept {
124 auto& flags = system.GPU().Maxwell3D().dirty.flags;
125 const bool is_dirty = flags[id]; 125 const bool is_dirty = flags[id];
126 flags[id] = new_value; 126 flags[id] = new_value;
127 return is_dirty; 127 return is_dirty;
128 } 128 }
129 129
130 Core::System& system; 130 Tegra::Engines::Maxwell3D::DirtyState::Flags& flags;
131 Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags; 131 Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags;
132 Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY; 132 Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY;
133}; 133};
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
index a5526a3f5..3c9171a5e 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
@@ -57,9 +57,9 @@ u32 GetMemoryType(const VkPhysicalDeviceMemoryProperties& properties,
57 57
58} // Anonymous namespace 58} // Anonymous namespace
59 59
60VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler, 60VKStreamBuffer::VKStreamBuffer(const VKDevice& device_, VKScheduler& scheduler_,
61 VkBufferUsageFlags usage) 61 VkBufferUsageFlags usage)
62 : device{device}, scheduler{scheduler} { 62 : device{device_}, scheduler{scheduler_} {
63 CreateBuffers(usage); 63 CreateBuffers(usage);
64 ReserveWatches(current_watches, WATCHES_INITIAL_RESERVE); 64 ReserveWatches(current_watches, WATCHES_INITIAL_RESERVE);
65 ReserveWatches(previous_watches, WATCHES_INITIAL_RESERVE); 65 ReserveWatches(previous_watches, WATCHES_INITIAL_RESERVE);
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 2c6f54101..06182d909 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -188,13 +188,13 @@ u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source, Tegra::Texture::Swizzl
188 188
189} // Anonymous namespace 189} // Anonymous namespace
190 190
191CachedSurface::CachedSurface(Core::System& system, const VKDevice& device, 191CachedSurface::CachedSurface(const VKDevice& device, VKResourceManager& resource_manager,
192 VKResourceManager& resource_manager, VKMemoryManager& memory_manager, 192 VKMemoryManager& memory_manager, VKScheduler& scheduler,
193 VKScheduler& scheduler, VKStagingBufferPool& staging_pool, 193 VKStagingBufferPool& staging_pool, GPUVAddr gpu_addr,
194 GPUVAddr gpu_addr, const SurfaceParams& params) 194 const SurfaceParams& params)
195 : SurfaceBase<View>{gpu_addr, params, device.IsOptimalAstcSupported()}, system{system}, 195 : SurfaceBase<View>{gpu_addr, params, device.IsOptimalAstcSupported()}, device{device},
196 device{device}, resource_manager{resource_manager}, 196 resource_manager{resource_manager}, memory_manager{memory_manager}, scheduler{scheduler},
197 memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} { 197 staging_pool{staging_pool} {
198 if (params.IsBuffer()) { 198 if (params.IsBuffer()) {
199 buffer = CreateBuffer(device, params, host_memory_size); 199 buffer = CreateBuffer(device, params, host_memory_size);
200 commit = memory_manager.Commit(buffer, false); 200 commit = memory_manager.Commit(buffer, false);
@@ -490,19 +490,21 @@ VkImageView CachedSurfaceView::GetAttachment() {
490 return *render_target; 490 return *render_target;
491} 491}
492 492
493VKTextureCache::VKTextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, 493VKTextureCache::VKTextureCache(VideoCore::RasterizerInterface& rasterizer,
494 const VKDevice& device, VKResourceManager& resource_manager, 494 Tegra::Engines::Maxwell3D& maxwell3d,
495 VKMemoryManager& memory_manager, VKScheduler& scheduler, 495 Tegra::MemoryManager& gpu_memory, const VKDevice& device_,
496 VKStagingBufferPool& staging_pool) 496 VKResourceManager& resource_manager_,
497 : TextureCache(system, rasterizer, device.IsOptimalAstcSupported()), device{device}, 497 VKMemoryManager& memory_manager_, VKScheduler& scheduler_,
498 resource_manager{resource_manager}, memory_manager{memory_manager}, scheduler{scheduler}, 498 VKStagingBufferPool& staging_pool_)
499 staging_pool{staging_pool} {} 499 : TextureCache(rasterizer, maxwell3d, gpu_memory, device_.IsOptimalAstcSupported()),
500 device{device_}, resource_manager{resource_manager_},
501 memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{staging_pool_} {}
500 502
501VKTextureCache::~VKTextureCache() = default; 503VKTextureCache::~VKTextureCache() = default;
502 504
503Surface VKTextureCache::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) { 505Surface VKTextureCache::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) {
504 return std::make_shared<CachedSurface>(system, device, resource_manager, memory_manager, 506 return std::make_shared<CachedSurface>(device, resource_manager, memory_manager, scheduler,
505 scheduler, staging_pool, gpu_addr, params); 507 staging_pool, gpu_addr, params);
506} 508}
507 509
508void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface, 510void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface,
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 807e26c8a..e47d02c41 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -15,10 +15,6 @@
15#include "video_core/texture_cache/surface_base.h" 15#include "video_core/texture_cache/surface_base.h"
16#include "video_core/texture_cache/texture_cache.h" 16#include "video_core/texture_cache/texture_cache.h"
17 17
18namespace Core {
19class System;
20}
21
22namespace VideoCore { 18namespace VideoCore {
23class RasterizerInterface; 19class RasterizerInterface;
24} 20}
@@ -45,10 +41,10 @@ class CachedSurface final : public VideoCommon::SurfaceBase<View> {
45 friend CachedSurfaceView; 41 friend CachedSurfaceView;
46 42
47public: 43public:
48 explicit CachedSurface(Core::System& system, const VKDevice& device, 44 explicit CachedSurface(const VKDevice& device, VKResourceManager& resource_manager,
49 VKResourceManager& resource_manager, VKMemoryManager& memory_manager, 45 VKMemoryManager& memory_manager, VKScheduler& scheduler,
50 VKScheduler& scheduler, VKStagingBufferPool& staging_pool, 46 VKStagingBufferPool& staging_pool, GPUVAddr gpu_addr,
51 GPUVAddr gpu_addr, const SurfaceParams& params); 47 const SurfaceParams& params);
52 ~CachedSurface(); 48 ~CachedSurface();
53 49
54 void UploadTexture(const std::vector<u8>& staging_buffer) override; 50 void UploadTexture(const std::vector<u8>& staging_buffer) override;
@@ -101,7 +97,6 @@ private:
101 97
102 VkImageSubresourceRange GetImageSubresourceRange() const; 98 VkImageSubresourceRange GetImageSubresourceRange() const;
103 99
104 Core::System& system;
105 const VKDevice& device; 100 const VKDevice& device;
106 VKResourceManager& resource_manager; 101 VKResourceManager& resource_manager;
107 VKMemoryManager& memory_manager; 102 VKMemoryManager& memory_manager;
@@ -201,7 +196,8 @@ private:
201 196
202class VKTextureCache final : public TextureCacheBase { 197class VKTextureCache final : public TextureCacheBase {
203public: 198public:
204 explicit VKTextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, 199 explicit VKTextureCache(VideoCore::RasterizerInterface& rasterizer,
200 Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
205 const VKDevice& device, VKResourceManager& resource_manager, 201 const VKDevice& device, VKResourceManager& resource_manager,
206 VKMemoryManager& memory_manager, VKScheduler& scheduler, 202 VKMemoryManager& memory_manager, VKScheduler& scheduler,
207 VKStagingBufferPool& staging_pool); 203 VKStagingBufferPool& staging_pool);