diff options
| author | 2021-11-05 15:52:31 +0100 | |
|---|---|---|
| committer | 2022-10-06 21:00:51 +0200 | |
| commit | 139ea93512aeead8a4aee3910a3de86eb109a838 (patch) | |
| tree | 857643fc08617b7035656a51728c399f30c8c2cb /src/video_core/renderer_vulkan | |
| parent | NVASGPU: Fix Remap. (diff) | |
| download | yuzu-139ea93512aeead8a4aee3910a3de86eb109a838.tar.gz yuzu-139ea93512aeead8a4aee3910a3de86eb109a838.tar.xz yuzu-139ea93512aeead8a4aee3910a3de86eb109a838.zip | |
VideoCore: implement channels on gpu caches.
Diffstat (limited to 'src/video_core/renderer_vulkan')
| -rw-r--r-- | src/video_core/renderer_vulkan/renderer_vulkan.cpp | 17 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_fence_manager.cpp | 4 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_fence_manager.h | 4 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_pipeline_cache.cpp | 28 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_pipeline_cache.h | 6 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_query_cache.cpp | 7 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_query_cache.h | 5 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_rasterizer.cpp | 87 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_rasterizer.h | 20 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_state_tracker.cpp | 13 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_state_tracker.h | 22 |
11 files changed, 134 insertions, 79 deletions
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index 7c78d0299..68c2bc34c 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp | |||
| @@ -95,20 +95,25 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_, | |||
| 95 | Core::Frontend::EmuWindow& emu_window, | 95 | Core::Frontend::EmuWindow& emu_window, |
| 96 | Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, | 96 | Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, |
| 97 | std::unique_ptr<Core::Frontend::GraphicsContext> context_) try | 97 | std::unique_ptr<Core::Frontend::GraphicsContext> context_) try |
| 98 | : RendererBase(emu_window, std::move(context_)), telemetry_session(telemetry_session_), | 98 | : RendererBase(emu_window, std::move(context_)), |
| 99 | cpu_memory(cpu_memory_), gpu(gpu_), library(OpenLibrary()), | 99 | telemetry_session(telemetry_session_), |
| 100 | cpu_memory(cpu_memory_), | ||
| 101 | gpu(gpu_), | ||
| 102 | library(OpenLibrary()), | ||
| 100 | instance(CreateInstance(library, dld, VK_API_VERSION_1_1, render_window.GetWindowInfo().type, | 103 | instance(CreateInstance(library, dld, VK_API_VERSION_1_1, render_window.GetWindowInfo().type, |
| 101 | true, Settings::values.renderer_debug.GetValue())), | 104 | true, Settings::values.renderer_debug.GetValue())), |
| 102 | debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr), | 105 | debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr), |
| 103 | surface(CreateSurface(instance, render_window)), | 106 | surface(CreateSurface(instance, render_window)), |
| 104 | device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false), | 107 | device(CreateDevice(instance, dld, *surface)), |
| 105 | state_tracker(gpu), scheduler(device, state_tracker), | 108 | memory_allocator(device, false), |
| 109 | state_tracker(gpu), | ||
| 110 | scheduler(device, state_tracker), | ||
| 106 | swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width, | 111 | swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width, |
| 107 | render_window.GetFramebufferLayout().height, false), | 112 | render_window.GetFramebufferLayout().height, false), |
| 108 | blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler, | 113 | blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler, |
| 109 | screen_info), | 114 | screen_info), |
| 110 | rasterizer(render_window, gpu, gpu.MemoryManager(), cpu_memory, screen_info, device, | 115 | rasterizer(render_window, gpu, cpu_memory, screen_info, device, memory_allocator, |
| 111 | memory_allocator, state_tracker, scheduler) { | 116 | state_tracker, scheduler) { |
| 112 | Report(); | 117 | Report(); |
| 113 | } catch (const vk::Exception& exception) { | 118 | } catch (const vk::Exception& exception) { |
| 114 | LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what()); | 119 | LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what()); |
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp index c249b34d4..301cbbabe 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp | |||
| @@ -14,7 +14,7 @@ namespace Vulkan { | |||
| 14 | InnerFence::InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_) | 14 | InnerFence::InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_) |
| 15 | : FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {} | 15 | : FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {} |
| 16 | 16 | ||
| 17 | InnerFence::InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_) | 17 | InnerFence::InnerFence(Scheduler& scheduler_, u8* address_, u32 payload_, bool is_stubbed_) |
| 18 | : FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {} | 18 | : FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {} |
| 19 | 19 | ||
| 20 | InnerFence::~InnerFence() = default; | 20 | InnerFence::~InnerFence() = default; |
| @@ -52,7 +52,7 @@ Fence FenceManager::CreateFence(u32 value, bool is_stubbed) { | |||
| 52 | return std::make_shared<InnerFence>(scheduler, value, is_stubbed); | 52 | return std::make_shared<InnerFence>(scheduler, value, is_stubbed); |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | Fence FenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) { | 55 | Fence FenceManager::CreateFence(u8* addr, u32 value, bool is_stubbed) { |
| 56 | return std::make_shared<InnerFence>(scheduler, addr, value, is_stubbed); | 56 | return std::make_shared<InnerFence>(scheduler, addr, value, is_stubbed); |
| 57 | } | 57 | } |
| 58 | 58 | ||
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h index 7c0bbd80a..ea9e88052 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.h +++ b/src/video_core/renderer_vulkan/vk_fence_manager.h | |||
| @@ -26,7 +26,7 @@ class Scheduler; | |||
| 26 | class InnerFence : public VideoCommon::FenceBase { | 26 | class InnerFence : public VideoCommon::FenceBase { |
| 27 | public: | 27 | public: |
| 28 | explicit InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_); | 28 | explicit InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_); |
| 29 | explicit InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_); | 29 | explicit InnerFence(Scheduler& scheduler_, u8* address_, u32 payload_, bool is_stubbed_); |
| 30 | ~InnerFence(); | 30 | ~InnerFence(); |
| 31 | 31 | ||
| 32 | void Queue(); | 32 | void Queue(); |
| @@ -51,7 +51,7 @@ public: | |||
| 51 | 51 | ||
| 52 | protected: | 52 | protected: |
| 53 | Fence CreateFence(u32 value, bool is_stubbed) override; | 53 | Fence CreateFence(u32 value, bool is_stubbed) override; |
| 54 | Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; | 54 | Fence CreateFence(u8* addr, u32 value, bool is_stubbed) override; |
| 55 | void QueueFence(Fence& fence) override; | 55 | void QueueFence(Fence& fence) override; |
| 56 | bool IsFenceSignaled(Fence& fence) const override; | 56 | bool IsFenceSignaled(Fence& fence) const override; |
| 57 | void WaitFence(Fence& fence) override; | 57 | void WaitFence(Fence& fence) override; |
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index accbfc8e1..b1e0b96c4 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp | |||
| @@ -259,17 +259,15 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c | |||
| 259 | return std::memcmp(&rhs, this, Size()) == 0; | 259 | return std::memcmp(&rhs, this, Size()) == 0; |
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, | 262 | PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device_, |
| 263 | Tegra::Engines::KeplerCompute& kepler_compute_, | ||
| 264 | Tegra::MemoryManager& gpu_memory_, const Device& device_, | ||
| 265 | Scheduler& scheduler_, DescriptorPool& descriptor_pool_, | 263 | Scheduler& scheduler_, DescriptorPool& descriptor_pool_, |
| 266 | UpdateDescriptorQueue& update_descriptor_queue_, | 264 | UpdateDescriptorQueue& update_descriptor_queue_, |
| 267 | RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_, | 265 | RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_, |
| 268 | TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_) | 266 | TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_) |
| 269 | : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_}, | 267 | : VideoCommon::ShaderCache{rasterizer_}, device{device_}, scheduler{scheduler_}, |
| 270 | device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, | 268 | descriptor_pool{descriptor_pool_}, update_descriptor_queue{update_descriptor_queue_}, |
| 271 | update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_}, | 269 | render_pass_cache{render_pass_cache_}, buffer_cache{buffer_cache_}, |
| 272 | buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_}, | 270 | texture_cache{texture_cache_}, shader_notify{shader_notify_}, |
| 273 | use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()}, | 271 | use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()}, |
| 274 | workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "VkPipelineBuilder"), | 272 | workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "VkPipelineBuilder"), |
| 275 | serialization_thread(1, "VkPipelineSerialization") { | 273 | serialization_thread(1, "VkPipelineSerialization") { |
| @@ -337,7 +335,7 @@ GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() { | |||
| 337 | current_pipeline = nullptr; | 335 | current_pipeline = nullptr; |
| 338 | return nullptr; | 336 | return nullptr; |
| 339 | } | 337 | } |
| 340 | graphics_key.state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported(), | 338 | graphics_key.state.Refresh(*maxwell3d, device.IsExtExtendedDynamicStateSupported(), |
| 341 | device.IsExtVertexInputDynamicStateSupported()); | 339 | device.IsExtVertexInputDynamicStateSupported()); |
| 342 | 340 | ||
| 343 | if (current_pipeline) { | 341 | if (current_pipeline) { |
| @@ -357,7 +355,7 @@ ComputePipeline* PipelineCache::CurrentComputePipeline() { | |||
| 357 | if (!shader) { | 355 | if (!shader) { |
| 358 | return nullptr; | 356 | return nullptr; |
| 359 | } | 357 | } |
| 360 | const auto& qmd{kepler_compute.launch_description}; | 358 | const auto& qmd{kepler_compute->launch_description}; |
| 361 | const ComputePipelineCacheKey key{ | 359 | const ComputePipelineCacheKey key{ |
| 362 | .unique_hash = shader->unique_hash, | 360 | .unique_hash = shader->unique_hash, |
| 363 | .shared_memory_size = qmd.shared_alloc, | 361 | .shared_memory_size = qmd.shared_alloc, |
| @@ -486,13 +484,13 @@ GraphicsPipeline* PipelineCache::BuiltPipeline(GraphicsPipeline* pipeline) const | |||
| 486 | } | 484 | } |
| 487 | // If something is using depth, we can assume that games are not rendering anything which | 485 | // If something is using depth, we can assume that games are not rendering anything which |
| 488 | // will be used one time. | 486 | // will be used one time. |
| 489 | if (maxwell3d.regs.zeta_enable) { | 487 | if (maxwell3d->regs.zeta_enable) { |
| 490 | return nullptr; | 488 | return nullptr; |
| 491 | } | 489 | } |
| 492 | // If games are using a small index count, we can assume these are full screen quads. | 490 | // If games are using a small index count, we can assume these are full screen quads. |
| 493 | // Usually these shaders are only used once for building textures so we can assume they | 491 | // Usually these shaders are only used once for building textures so we can assume they |
| 494 | // can't be built async | 492 | // can't be built async |
| 495 | if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) { | 493 | if (maxwell3d->regs.index_array.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) { |
| 496 | return pipeline; | 494 | return pipeline; |
| 497 | } | 495 | } |
| 498 | return nullptr; | 496 | return nullptr; |
| @@ -558,7 +556,7 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline( | |||
| 558 | } | 556 | } |
| 559 | Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr}; | 557 | Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr}; |
| 560 | return std::make_unique<GraphicsPipeline>( | 558 | return std::make_unique<GraphicsPipeline>( |
| 561 | maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device, | 559 | *maxwell3d, *gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device, |
| 562 | descriptor_pool, update_descriptor_queue, thread_worker, statistics, render_pass_cache, key, | 560 | descriptor_pool, update_descriptor_queue, thread_worker, statistics, render_pass_cache, key, |
| 563 | std::move(modules), infos); | 561 | std::move(modules), infos); |
| 564 | 562 | ||
| @@ -592,9 +590,9 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() { | |||
| 592 | 590 | ||
| 593 | std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline( | 591 | std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline( |
| 594 | const ComputePipelineCacheKey& key, const ShaderInfo* shader) { | 592 | const ComputePipelineCacheKey& key, const ShaderInfo* shader) { |
| 595 | const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; | 593 | const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()}; |
| 596 | const auto& qmd{kepler_compute.launch_description}; | 594 | const auto& qmd{kepler_compute->launch_description}; |
| 597 | ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; | 595 | ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start}; |
| 598 | env.SetCachedSize(shader->size_bytes); | 596 | env.SetCachedSize(shader->size_bytes); |
| 599 | 597 | ||
| 600 | main_pools.ReleaseContents(); | 598 | main_pools.ReleaseContents(); |
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h index 127957dbf..61f9e9366 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h | |||
| @@ -100,10 +100,8 @@ struct ShaderPools { | |||
| 100 | 100 | ||
| 101 | class PipelineCache : public VideoCommon::ShaderCache { | 101 | class PipelineCache : public VideoCommon::ShaderCache { |
| 102 | public: | 102 | public: |
| 103 | explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d, | 103 | explicit PipelineCache(RasterizerVulkan& rasterizer, const Device& device, Scheduler& scheduler, |
| 104 | Tegra::Engines::KeplerCompute& kepler_compute, | 104 | DescriptorPool& descriptor_pool, |
| 105 | Tegra::MemoryManager& gpu_memory, const Device& device, | ||
| 106 | Scheduler& scheduler, DescriptorPool& descriptor_pool, | ||
| 107 | UpdateDescriptorQueue& update_descriptor_queue, | 105 | UpdateDescriptorQueue& update_descriptor_queue, |
| 108 | RenderPassCache& render_pass_cache, BufferCache& buffer_cache, | 106 | RenderPassCache& render_pass_cache, BufferCache& buffer_cache, |
| 109 | TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_); | 107 | TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_); |
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index 2b859c6b8..393bbdf37 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp | |||
| @@ -65,10 +65,9 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) { | |||
| 65 | usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; | 65 | usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, | 68 | QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, |
| 69 | Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, | 69 | Scheduler& scheduler_) |
| 70 | const Device& device_, Scheduler& scheduler_) | 70 | : QueryCacheBase{rasterizer_}, device{device_}, scheduler{scheduler_}, |
| 71 | : QueryCacheBase{rasterizer_, maxwell3d_, gpu_memory_}, device{device_}, scheduler{scheduler_}, | ||
| 72 | query_pools{ | 71 | query_pools{ |
| 73 | QueryPool{device_, scheduler_, QueryType::SamplesPassed}, | 72 | QueryPool{device_, scheduler_, QueryType::SamplesPassed}, |
| 74 | } {} | 73 | } {} |
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h index b0d86c4f8..26762ee09 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.h +++ b/src/video_core/renderer_vulkan/vk_query_cache.h | |||
| @@ -52,9 +52,8 @@ private: | |||
| 52 | class QueryCache final | 52 | class QueryCache final |
| 53 | : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { | 53 | : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { |
| 54 | public: | 54 | public: |
| 55 | explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, | 55 | explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_, |
| 56 | Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, | 56 | Scheduler& scheduler_); |
| 57 | const Device& device_, Scheduler& scheduler_); | ||
| 58 | ~QueryCache(); | 57 | ~QueryCache(); |
| 59 | 58 | ||
| 60 | std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type); | 59 | std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type); |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 7e40c2df1..5d9ff0589 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include "common/microprofile.h" | 11 | #include "common/microprofile.h" |
| 12 | #include "common/scope_exit.h" | 12 | #include "common/scope_exit.h" |
| 13 | #include "common/settings.h" | 13 | #include "common/settings.h" |
| 14 | #include "video_core/control/channel_state.h" | ||
| 14 | #include "video_core/engines/kepler_compute.h" | 15 | #include "video_core/engines/kepler_compute.h" |
| 15 | #include "video_core/engines/maxwell_3d.h" | 16 | #include "video_core/engines/maxwell_3d.h" |
| 16 | #include "video_core/renderer_vulkan/blit_image.h" | 17 | #include "video_core/renderer_vulkan/blit_image.h" |
| @@ -148,14 +149,11 @@ DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instan | |||
| 148 | } // Anonymous namespace | 149 | } // Anonymous namespace |
| 149 | 150 | ||
| 150 | RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, | 151 | RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, |
| 151 | Tegra::MemoryManager& gpu_memory_, | ||
| 152 | Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, | 152 | Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, |
| 153 | const Device& device_, MemoryAllocator& memory_allocator_, | 153 | const Device& device_, MemoryAllocator& memory_allocator_, |
| 154 | StateTracker& state_tracker_, Scheduler& scheduler_) | 154 | StateTracker& state_tracker_, Scheduler& scheduler_) |
| 155 | : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, | 155 | : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, screen_info{screen_info_}, device{device_}, |
| 156 | gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()}, | 156 | memory_allocator{memory_allocator_}, state_tracker{state_tracker_}, scheduler{scheduler_}, |
| 157 | screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_}, | ||
| 158 | state_tracker{state_tracker_}, scheduler{scheduler_}, | ||
| 159 | staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), | 157 | staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), |
| 160 | update_descriptor_queue(device, scheduler), | 158 | update_descriptor_queue(device, scheduler), |
| 161 | blit_image(device, scheduler, state_tracker, descriptor_pool), | 159 | blit_image(device, scheduler, state_tracker, descriptor_pool), |
| @@ -165,14 +163,13 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra | |||
| 165 | memory_allocator, staging_pool, | 163 | memory_allocator, staging_pool, |
| 166 | blit_image, astc_decoder_pass, | 164 | blit_image, astc_decoder_pass, |
| 167 | render_pass_cache}, | 165 | render_pass_cache}, |
| 168 | texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), | 166 | texture_cache(texture_cache_runtime, *this), |
| 169 | buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, | 167 | buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, |
| 170 | update_descriptor_queue, descriptor_pool), | 168 | update_descriptor_queue, descriptor_pool), |
| 171 | buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime), | 169 | buffer_cache(*this, cpu_memory_, buffer_cache_runtime), |
| 172 | pipeline_cache(*this, maxwell3d, kepler_compute, gpu_memory, device, scheduler, | 170 | pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue, |
| 173 | descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache, | 171 | render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()), |
| 174 | texture_cache, gpu.ShaderNotify()), | 172 | query_cache{*this, device, scheduler}, accelerate_dma{buffer_cache}, |
| 175 | query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{buffer_cache}, | ||
| 176 | fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), | 173 | fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), |
| 177 | wfi_event(device.GetLogical().CreateEvent()) { | 174 | wfi_event(device.GetLogical().CreateEvent()) { |
| 178 | scheduler.SetQueryCache(query_cache); | 175 | scheduler.SetQueryCache(query_cache); |
| @@ -199,8 +196,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { | |||
| 199 | 196 | ||
| 200 | UpdateDynamicStates(); | 197 | UpdateDynamicStates(); |
| 201 | 198 | ||
| 202 | const auto& regs{maxwell3d.regs}; | 199 | const auto& regs{maxwell3d->regs}; |
| 203 | const u32 num_instances{maxwell3d.mme_draw.instance_count}; | 200 | const u32 num_instances{maxwell3d->mme_draw.instance_count}; |
| 204 | const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)}; | 201 | const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)}; |
| 205 | scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) { | 202 | scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) { |
| 206 | if (draw_params.is_indexed) { | 203 | if (draw_params.is_indexed) { |
| @@ -218,14 +215,14 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { | |||
| 218 | void RasterizerVulkan::Clear() { | 215 | void RasterizerVulkan::Clear() { |
| 219 | MICROPROFILE_SCOPE(Vulkan_Clearing); | 216 | MICROPROFILE_SCOPE(Vulkan_Clearing); |
| 220 | 217 | ||
| 221 | if (!maxwell3d.ShouldExecute()) { | 218 | if (!maxwell3d->ShouldExecute()) { |
| 222 | return; | 219 | return; |
| 223 | } | 220 | } |
| 224 | FlushWork(); | 221 | FlushWork(); |
| 225 | 222 | ||
| 226 | query_cache.UpdateCounters(); | 223 | query_cache.UpdateCounters(); |
| 227 | 224 | ||
| 228 | auto& regs = maxwell3d.regs; | 225 | auto& regs = maxwell3d->regs; |
| 229 | const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || | 226 | const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || |
| 230 | regs.clear_buffers.A; | 227 | regs.clear_buffers.A; |
| 231 | const bool use_depth = regs.clear_buffers.Z; | 228 | const bool use_depth = regs.clear_buffers.Z; |
| @@ -339,9 +336,9 @@ void RasterizerVulkan::DispatchCompute() { | |||
| 339 | return; | 336 | return; |
| 340 | } | 337 | } |
| 341 | std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex}; | 338 | std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex}; |
| 342 | pipeline->Configure(kepler_compute, gpu_memory, scheduler, buffer_cache, texture_cache); | 339 | pipeline->Configure(*kepler_compute, *gpu_memory, scheduler, buffer_cache, texture_cache); |
| 343 | 340 | ||
| 344 | const auto& qmd{kepler_compute.launch_description}; | 341 | const auto& qmd{kepler_compute->launch_description}; |
| 345 | const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z}; | 342 | const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z}; |
| 346 | scheduler.RequestOutsideRenderPassOperationContext(); | 343 | scheduler.RequestOutsideRenderPassOperationContext(); |
| 347 | scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); }); | 344 | scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); }); |
| @@ -451,10 +448,11 @@ void RasterizerVulkan::ModifyGPUMemory(GPUVAddr addr, u64 size) { | |||
| 451 | 448 | ||
| 452 | void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) { | 449 | void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) { |
| 453 | if (!gpu.IsAsync()) { | 450 | if (!gpu.IsAsync()) { |
| 454 | gpu_memory.Write<u32>(addr, value); | 451 | gpu_memory->Write<u32>(addr, value); |
| 455 | return; | 452 | return; |
| 456 | } | 453 | } |
| 457 | fence_manager.SignalSemaphore(addr, value); | 454 | auto paddr = gpu_memory->GetPointer(addr); |
| 455 | fence_manager.SignalSemaphore(paddr, value); | ||
| 458 | } | 456 | } |
| 459 | 457 | ||
| 460 | void RasterizerVulkan::SignalSyncPoint(u32 value) { | 458 | void RasterizerVulkan::SignalSyncPoint(u32 value) { |
| @@ -553,12 +551,12 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerVulkan::AccessAccelerateDMA() | |||
| 553 | 551 | ||
| 554 | void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, | 552 | void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, |
| 555 | std::span<u8> memory) { | 553 | std::span<u8> memory) { |
| 556 | auto cpu_addr = gpu_memory.GpuToCpuAddress(address); | 554 | auto cpu_addr = gpu_memory->GpuToCpuAddress(address); |
| 557 | if (!cpu_addr) [[unlikely]] { | 555 | if (!cpu_addr) [[unlikely]] { |
| 558 | gpu_memory.WriteBlock(address, memory.data(), copy_size); | 556 | gpu_memory->WriteBlock(address, memory.data(), copy_size); |
| 559 | return; | 557 | return; |
| 560 | } | 558 | } |
| 561 | gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size); | 559 | gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size); |
| 562 | { | 560 | { |
| 563 | std::unique_lock<std::mutex> lock{buffer_cache.mutex}; | 561 | std::unique_lock<std::mutex> lock{buffer_cache.mutex}; |
| 564 | if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { | 562 | if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { |
| @@ -627,7 +625,7 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 | |||
| 627 | } | 625 | } |
| 628 | 626 | ||
| 629 | void RasterizerVulkan::UpdateDynamicStates() { | 627 | void RasterizerVulkan::UpdateDynamicStates() { |
| 630 | auto& regs = maxwell3d.regs; | 628 | auto& regs = maxwell3d->regs; |
| 631 | UpdateViewportsState(regs); | 629 | UpdateViewportsState(regs); |
| 632 | UpdateScissorsState(regs); | 630 | UpdateScissorsState(regs); |
| 633 | UpdateDepthBias(regs); | 631 | UpdateDepthBias(regs); |
| @@ -651,7 +649,7 @@ void RasterizerVulkan::UpdateDynamicStates() { | |||
| 651 | } | 649 | } |
| 652 | 650 | ||
| 653 | void RasterizerVulkan::BeginTransformFeedback() { | 651 | void RasterizerVulkan::BeginTransformFeedback() { |
| 654 | const auto& regs = maxwell3d.regs; | 652 | const auto& regs = maxwell3d->regs; |
| 655 | if (regs.tfb_enabled == 0) { | 653 | if (regs.tfb_enabled == 0) { |
| 656 | return; | 654 | return; |
| 657 | } | 655 | } |
| @@ -667,7 +665,7 @@ void RasterizerVulkan::BeginTransformFeedback() { | |||
| 667 | } | 665 | } |
| 668 | 666 | ||
| 669 | void RasterizerVulkan::EndTransformFeedback() { | 667 | void RasterizerVulkan::EndTransformFeedback() { |
| 670 | const auto& regs = maxwell3d.regs; | 668 | const auto& regs = maxwell3d->regs; |
| 671 | if (regs.tfb_enabled == 0) { | 669 | if (regs.tfb_enabled == 0) { |
| 672 | return; | 670 | return; |
| 673 | } | 671 | } |
| @@ -917,7 +915,7 @@ void RasterizerVulkan::UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs& | |||
| 917 | } | 915 | } |
| 918 | 916 | ||
| 919 | void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) { | 917 | void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) { |
| 920 | auto& dirty{maxwell3d.dirty.flags}; | 918 | auto& dirty{maxwell3d->dirty.flags}; |
| 921 | if (!dirty[Dirty::VertexInput]) { | 919 | if (!dirty[Dirty::VertexInput]) { |
| 922 | return; | 920 | return; |
| 923 | } | 921 | } |
| @@ -974,4 +972,41 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) | |||
| 974 | }); | 972 | }); |
| 975 | } | 973 | } |
| 976 | 974 | ||
| 975 | void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) { | ||
| 976 | CreateChannel(channel); | ||
| 977 | { | ||
| 978 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 979 | texture_cache.CreateChannel(channel); | ||
| 980 | buffer_cache.CreateChannel(channel); | ||
| 981 | } | ||
| 982 | pipeline_cache.CreateChannel(channel); | ||
| 983 | query_cache.CreateChannel(channel); | ||
| 984 | state_tracker.SetupTables(channel); | ||
| 985 | } | ||
| 986 | |||
| 987 | void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) { | ||
| 988 | const s32 channel_id = channel.bind_id; | ||
| 989 | BindToChannel(channel_id); | ||
| 990 | { | ||
| 991 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 992 | texture_cache.BindToChannel(channel_id); | ||
| 993 | buffer_cache.BindToChannel(channel_id); | ||
| 994 | } | ||
| 995 | pipeline_cache.BindToChannel(channel_id); | ||
| 996 | query_cache.BindToChannel(channel_id); | ||
| 997 | state_tracker.ChangeChannel(channel); | ||
| 998 | scheduler.InvalidateState(); | ||
| 999 | } | ||
| 1000 | |||
| 1001 | void RasterizerVulkan::ReleaseChannel(s32 channel_id) { | ||
| 1002 | EraseChannel(channel_id); | ||
| 1003 | { | ||
| 1004 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 1005 | texture_cache.EraseChannel(channel_id); | ||
| 1006 | buffer_cache.EraseChannel(channel_id); | ||
| 1007 | } | ||
| 1008 | pipeline_cache.EraseChannel(channel_id); | ||
| 1009 | query_cache.EraseChannel(channel_id); | ||
| 1010 | } | ||
| 1011 | |||
| 977 | } // namespace Vulkan | 1012 | } // namespace Vulkan |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index 0370ea39b..642fe6576 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <boost/container/static_vector.hpp> | 8 | #include <boost/container/static_vector.hpp> |
| 9 | 9 | ||
| 10 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | #include "video_core/control/channel_state_cache.h" | ||
| 11 | #include "video_core/engines/maxwell_dma.h" | 12 | #include "video_core/engines/maxwell_dma.h" |
| 12 | #include "video_core/rasterizer_accelerated.h" | 13 | #include "video_core/rasterizer_accelerated.h" |
| 13 | #include "video_core/rasterizer_interface.h" | 14 | #include "video_core/rasterizer_interface.h" |
| @@ -54,13 +55,13 @@ private: | |||
| 54 | BufferCache& buffer_cache; | 55 | BufferCache& buffer_cache; |
| 55 | }; | 56 | }; |
| 56 | 57 | ||
| 57 | class RasterizerVulkan final : public VideoCore::RasterizerAccelerated { | 58 | class RasterizerVulkan final : public VideoCore::RasterizerAccelerated, |
| 59 | protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { | ||
| 58 | public: | 60 | public: |
| 59 | explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, | 61 | explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, |
| 60 | Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, | 62 | Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, |
| 61 | ScreenInfo& screen_info_, const Device& device_, | 63 | const Device& device_, MemoryAllocator& memory_allocator_, |
| 62 | MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, | 64 | StateTracker& state_tracker_, Scheduler& scheduler_); |
| 63 | Scheduler& scheduler_); | ||
| 64 | ~RasterizerVulkan() override; | 65 | ~RasterizerVulkan() override; |
| 65 | 66 | ||
| 66 | void Draw(bool is_indexed, bool is_instanced) override; | 67 | void Draw(bool is_indexed, bool is_instanced) override; |
| @@ -99,6 +100,12 @@ public: | |||
| 99 | void LoadDiskResources(u64 title_id, std::stop_token stop_loading, | 100 | void LoadDiskResources(u64 title_id, std::stop_token stop_loading, |
| 100 | const VideoCore::DiskResourceLoadCallback& callback) override; | 101 | const VideoCore::DiskResourceLoadCallback& callback) override; |
| 101 | 102 | ||
| 103 | void InitializeChannel(Tegra::Control::ChannelState& channel) override; | ||
| 104 | |||
| 105 | void BindChannel(Tegra::Control::ChannelState& channel) override; | ||
| 106 | |||
| 107 | void ReleaseChannel(s32 channel_id) override; | ||
| 108 | |||
| 102 | private: | 109 | private: |
| 103 | static constexpr size_t MAX_TEXTURES = 192; | 110 | static constexpr size_t MAX_TEXTURES = 192; |
| 104 | static constexpr size_t MAX_IMAGES = 48; | 111 | static constexpr size_t MAX_IMAGES = 48; |
| @@ -134,9 +141,6 @@ private: | |||
| 134 | void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs); | 141 | void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs); |
| 135 | 142 | ||
| 136 | Tegra::GPU& gpu; | 143 | Tegra::GPU& gpu; |
| 137 | Tegra::MemoryManager& gpu_memory; | ||
| 138 | Tegra::Engines::Maxwell3D& maxwell3d; | ||
| 139 | Tegra::Engines::KeplerCompute& kepler_compute; | ||
| 140 | 144 | ||
| 141 | ScreenInfo& screen_info; | 145 | ScreenInfo& screen_info; |
| 142 | const Device& device; | 146 | const Device& device; |
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp index 9ad096431..a87bf8dd3 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp +++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | 7 | ||
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "core/core.h" | 9 | #include "core/core.h" |
| 10 | #include "video_core/control/channel_state.h" | ||
| 10 | #include "video_core/dirty_flags.h" | 11 | #include "video_core/dirty_flags.h" |
| 11 | #include "video_core/engines/maxwell_3d.h" | 12 | #include "video_core/engines/maxwell_3d.h" |
| 12 | #include "video_core/gpu.h" | 13 | #include "video_core/gpu.h" |
| @@ -174,9 +175,8 @@ void SetupDirtyVertexBindings(Tables& tables) { | |||
| 174 | } | 175 | } |
| 175 | } // Anonymous namespace | 176 | } // Anonymous namespace |
| 176 | 177 | ||
| 177 | StateTracker::StateTracker(Tegra::GPU& gpu) | 178 | void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) { |
| 178 | : flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} { | 179 | auto& tables{channel_state.maxwell_3d->dirty.tables}; |
| 179 | auto& tables{gpu.Maxwell3D().dirty.tables}; | ||
| 180 | SetupDirtyFlags(tables); | 180 | SetupDirtyFlags(tables); |
| 181 | SetupDirtyViewports(tables); | 181 | SetupDirtyViewports(tables); |
| 182 | SetupDirtyScissors(tables); | 182 | SetupDirtyScissors(tables); |
| @@ -199,4 +199,11 @@ StateTracker::StateTracker(Tegra::GPU& gpu) | |||
| 199 | SetupDirtyVertexBindings(tables); | 199 | SetupDirtyVertexBindings(tables); |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) { | ||
| 203 | flags = &channel_state.maxwell_3d->dirty.flags; | ||
| 204 | } | ||
| 205 | |||
| 206 | StateTracker::StateTracker(Tegra::GPU& gpu) | ||
| 207 | : flags{}, invalidation_flags{MakeInvalidationFlags()} {} | ||
| 208 | |||
| 202 | } // namespace Vulkan | 209 | } // namespace Vulkan |
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h index a85bc1c10..9f8a887f9 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.h +++ b/src/video_core/renderer_vulkan/vk_state_tracker.h | |||
| @@ -10,6 +10,12 @@ | |||
| 10 | #include "video_core/dirty_flags.h" | 10 | #include "video_core/dirty_flags.h" |
| 11 | #include "video_core/engines/maxwell_3d.h" | 11 | #include "video_core/engines/maxwell_3d.h" |
| 12 | 12 | ||
| 13 | namespace Tegra { | ||
| 14 | namespace Control { | ||
| 15 | struct ChannelState; | ||
| 16 | } | ||
| 17 | } // namespace Tegra | ||
| 18 | |||
| 13 | namespace Vulkan { | 19 | namespace Vulkan { |
| 14 | 20 | ||
| 15 | namespace Dirty { | 21 | namespace Dirty { |
| @@ -56,16 +62,16 @@ public: | |||
| 56 | explicit StateTracker(Tegra::GPU& gpu); | 62 | explicit StateTracker(Tegra::GPU& gpu); |
| 57 | 63 | ||
| 58 | void InvalidateCommandBufferState() { | 64 | void InvalidateCommandBufferState() { |
| 59 | flags |= invalidation_flags; | 65 | (*flags) |= invalidation_flags; |
| 60 | current_topology = INVALID_TOPOLOGY; | 66 | current_topology = INVALID_TOPOLOGY; |
| 61 | } | 67 | } |
| 62 | 68 | ||
| 63 | void InvalidateViewports() { | 69 | void InvalidateViewports() { |
| 64 | flags[Dirty::Viewports] = true; | 70 | (*flags)[Dirty::Viewports] = true; |
| 65 | } | 71 | } |
| 66 | 72 | ||
| 67 | void InvalidateScissors() { | 73 | void InvalidateScissors() { |
| 68 | flags[Dirty::Scissors] = true; | 74 | (*flags)[Dirty::Scissors] = true; |
| 69 | } | 75 | } |
| 70 | 76 | ||
| 71 | bool TouchViewports() { | 77 | bool TouchViewports() { |
| @@ -139,16 +145,20 @@ public: | |||
| 139 | return has_changed; | 145 | return has_changed; |
| 140 | } | 146 | } |
| 141 | 147 | ||
| 148 | void SetupTables(Tegra::Control::ChannelState& channel_state); | ||
| 149 | |||
| 150 | void ChangeChannel(Tegra::Control::ChannelState& channel_state); | ||
| 151 | |||
| 142 | private: | 152 | private: |
| 143 | static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u); | 153 | static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u); |
| 144 | 154 | ||
| 145 | bool Exchange(std::size_t id, bool new_value) const noexcept { | 155 | bool Exchange(std::size_t id, bool new_value) const noexcept { |
| 146 | const bool is_dirty = flags[id]; | 156 | const bool is_dirty = (*flags)[id]; |
| 147 | flags[id] = new_value; | 157 | (*flags)[id] = new_value; |
| 148 | return is_dirty; | 158 | return is_dirty; |
| 149 | } | 159 | } |
| 150 | 160 | ||
| 151 | Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; | 161 | Tegra::Engines::Maxwell3D::DirtyState::Flags* flags; |
| 152 | Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags; | 162 | Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags; |
| 153 | Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY; | 163 | Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY; |
| 154 | }; | 164 | }; |