summaryrefslogtreecommitdiff
path: root/src/video_core
diff options
context:
space:
mode:
authorGravatar Fernando S2022-10-06 21:29:53 +0200
committerGravatar GitHub2022-10-06 21:29:53 +0200
commit1effa578f12f79d7816e3543291f302f126cc1d2 (patch)
tree14803b31b6817294d40d57446f6fa94c5ff3fe9a /src/video_core
parentMerge pull request #9025 from FernandoS27/slava-ukrayini (diff)
parentvulkan_blitter: Fix pool allocation double free. (diff)
downloadyuzu-1effa578f12f79d7816e3543291f302f126cc1d2.tar.gz
yuzu-1effa578f12f79d7816e3543291f302f126cc1d2.tar.xz
yuzu-1effa578f12f79d7816e3543291f302f126cc1d2.zip
Merge pull request #8467 from FernandoS27/yfc-rel-1
Project yuzu Fried Chicken (Y.F.C.) Part 1
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/CMakeLists.txt51
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h166
-rw-r--r--src/video_core/cdma_pusher.cpp29
-rw-r--r--src/video_core/cdma_pusher.h18
-rw-r--r--src/video_core/command_classes/host1x.cpp29
-rw-r--r--src/video_core/control/channel_state.cpp40
-rw-r--r--src/video_core/control/channel_state.h68
-rw-r--r--src/video_core/control/channel_state_cache.cpp14
-rw-r--r--src/video_core/control/channel_state_cache.h101
-rw-r--r--src/video_core/control/channel_state_cache.inc86
-rw-r--r--src/video_core/control/scheduler.cpp32
-rw-r--r--src/video_core/control/scheduler.h37
-rw-r--r--src/video_core/dma_pusher.cpp26
-rw-r--r--src/video_core/dma_pusher.h39
-rw-r--r--src/video_core/engines/engine_upload.cpp46
-rw-r--r--src/video_core/engines/engine_upload.h6
-rw-r--r--src/video_core/engines/kepler_compute.cpp13
-rw-r--r--src/video_core/engines/kepler_memory.cpp13
-rw-r--r--src/video_core/engines/maxwell_3d.cpp43
-rw-r--r--src/video_core/engines/maxwell_dma.cpp111
-rw-r--r--src/video_core/engines/maxwell_dma.h6
-rw-r--r--src/video_core/engines/puller.cpp306
-rw-r--r--src/video_core/engines/puller.h177
-rw-r--r--src/video_core/fence_manager.h104
-rw-r--r--src/video_core/gpu.cpp706
-rw-r--r--src/video_core/gpu.h93
-rw-r--r--src/video_core/gpu_thread.cpp22
-rw-r--r--src/video_core/gpu_thread.h14
-rw-r--r--src/video_core/host1x/codecs/codec.cpp (renamed from src/video_core/command_classes/codecs/codec.cpp)44
-rw-r--r--src/video_core/host1x/codecs/codec.h (renamed from src/video_core/command_classes/codecs/codec.h)21
-rw-r--r--src/video_core/host1x/codecs/h264.cpp (renamed from src/video_core/command_classes/codecs/h264.cpp)17
-rw-r--r--src/video_core/host1x/codecs/h264.h (renamed from src/video_core/command_classes/codecs/h264.h)16
-rw-r--r--src/video_core/host1x/codecs/vp8.cpp (renamed from src/video_core/command_classes/codecs/vp8.cpp)12
-rw-r--r--src/video_core/host1x/codecs/vp8.h (renamed from src/video_core/command_classes/codecs/vp8.h)15
-rw-r--r--src/video_core/host1x/codecs/vp9.cpp (renamed from src/video_core/command_classes/codecs/vp9.cpp)23
-rw-r--r--src/video_core/host1x/codecs/vp9.h (renamed from src/video_core/command_classes/codecs/vp9.h)22
-rw-r--r--src/video_core/host1x/codecs/vp9_types.h (renamed from src/video_core/command_classes/codecs/vp9_types.h)1
-rw-r--r--src/video_core/host1x/control.cpp33
-rw-r--r--src/video_core/host1x/control.h (renamed from src/video_core/command_classes/host1x.h)20
-rw-r--r--src/video_core/host1x/host1x.cpp17
-rw-r--r--src/video_core/host1x/host1x.h57
-rw-r--r--src/video_core/host1x/nvdec.cpp (renamed from src/video_core/command_classes/nvdec.cpp)11
-rw-r--r--src/video_core/host1x/nvdec.h (renamed from src/video_core/command_classes/nvdec.h)14
-rw-r--r--src/video_core/host1x/nvdec_common.h (renamed from src/video_core/command_classes/nvdec_common.h)4
-rw-r--r--src/video_core/host1x/sync_manager.cpp (renamed from src/video_core/command_classes/sync_manager.cpp)13
-rw-r--r--src/video_core/host1x/sync_manager.h (renamed from src/video_core/command_classes/sync_manager.h)12
-rw-r--r--src/video_core/host1x/syncpoint_manager.cpp96
-rw-r--r--src/video_core/host1x/syncpoint_manager.h98
-rw-r--r--src/video_core/host1x/vic.cpp (renamed from src/video_core/command_classes/vic.cpp)36
-rw-r--r--src/video_core/host1x/vic.h (renamed from src/video_core/command_classes/vic.h)13
-rw-r--r--src/video_core/macro/macro.cpp1
-rw-r--r--src/video_core/memory_manager.cpp754
-rw-r--r--src/video_core/memory_manager.h174
-rw-r--r--src/video_core/query_cache.h22
-rw-r--r--src/video_core/rasterizer_interface.h20
-rw-r--r--src/video_core/renderer_opengl/gl_compute_pipeline.cpp20
-rw-r--r--src/video_core/renderer_opengl/gl_compute_pipeline.h16
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.cpp13
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.h6
-rw-r--r--src/video_core/renderer_opengl/gl_graphics_pipeline.cpp29
-rw-r--r--src/video_core/renderer_opengl/gl_graphics_pipeline.h16
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.h3
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp217
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h22
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp42
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h9
-rw-r--r--src/video_core/renderer_opengl/gl_state_tracker.cpp17
-rw-r--r--src/video_core/renderer_opengl/gl_state_tracker.h83
-rw-r--r--src/video_core/renderer_opengl/maxwell_to_gl.h2
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp2
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp2
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp8
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.cpp15
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp18
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h28
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp34
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp7
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp123
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h29
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.cpp18
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.h27
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp15
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp32
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h5
-rw-r--r--src/video_core/shader_cache.cpp33
-rw-r--r--src/video_core/shader_cache.h15
-rw-r--r--src/video_core/surface.h8
-rw-r--r--src/video_core/texture_cache/format_lookup_table.cpp2
-rw-r--r--src/video_core/texture_cache/formatter.h4
-rw-r--r--src/video_core/texture_cache/image_base.cpp13
-rw-r--r--src/video_core/texture_cache/image_base.h3
-rw-r--r--src/video_core/texture_cache/render_targets.h1
-rw-r--r--src/video_core/texture_cache/texture_cache.cpp15
-rw-r--r--src/video_core/texture_cache/texture_cache.h222
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h85
-rw-r--r--src/video_core/texture_cache/util.cpp3
-rw-r--r--src/video_core/textures/decoders.cpp240
-rw-r--r--src/video_core/textures/decoders.h33
-rw-r--r--src/video_core/vulkan_common/vulkan_wrapper.h20
107 files changed, 3427 insertions, 2133 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 5b3808351..40e6d1ec4 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -4,7 +4,7 @@
4add_subdirectory(host_shaders) 4add_subdirectory(host_shaders)
5 5
6if(LIBVA_FOUND) 6if(LIBVA_FOUND)
7 set_source_files_properties(command_classes/codecs/codec.cpp 7 set_source_files_properties(host1x/codecs/codec.cpp
8 PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1) 8 PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1)
9 list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES}) 9 list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES})
10endif() 10endif()
@@ -15,26 +15,14 @@ add_library(video_core STATIC
15 buffer_cache/buffer_cache.h 15 buffer_cache/buffer_cache.h
16 cdma_pusher.cpp 16 cdma_pusher.cpp
17 cdma_pusher.h 17 cdma_pusher.h
18 command_classes/codecs/codec.cpp
19 command_classes/codecs/codec.h
20 command_classes/codecs/h264.cpp
21 command_classes/codecs/h264.h
22 command_classes/codecs/vp8.cpp
23 command_classes/codecs/vp8.h
24 command_classes/codecs/vp9.cpp
25 command_classes/codecs/vp9.h
26 command_classes/codecs/vp9_types.h
27 command_classes/host1x.cpp
28 command_classes/host1x.h
29 command_classes/nvdec.cpp
30 command_classes/nvdec.h
31 command_classes/nvdec_common.h
32 command_classes/sync_manager.cpp
33 command_classes/sync_manager.h
34 command_classes/vic.cpp
35 command_classes/vic.h
36 compatible_formats.cpp 18 compatible_formats.cpp
37 compatible_formats.h 19 compatible_formats.h
20 control/channel_state.cpp
21 control/channel_state.h
22 control/channel_state_cache.cpp
23 control/channel_state_cache.h
24 control/scheduler.cpp
25 control/scheduler.h
38 delayed_destruction_ring.h 26 delayed_destruction_ring.h
39 dirty_flags.cpp 27 dirty_flags.cpp
40 dirty_flags.h 28 dirty_flags.h
@@ -54,7 +42,31 @@ add_library(video_core STATIC
54 engines/maxwell_3d.h 42 engines/maxwell_3d.h
55 engines/maxwell_dma.cpp 43 engines/maxwell_dma.cpp
56 engines/maxwell_dma.h 44 engines/maxwell_dma.h
45 engines/puller.cpp
46 engines/puller.h
57 framebuffer_config.h 47 framebuffer_config.h
48 host1x/codecs/codec.cpp
49 host1x/codecs/codec.h
50 host1x/codecs/h264.cpp
51 host1x/codecs/h264.h
52 host1x/codecs/vp8.cpp
53 host1x/codecs/vp8.h
54 host1x/codecs/vp9.cpp
55 host1x/codecs/vp9.h
56 host1x/codecs/vp9_types.h
57 host1x/control.cpp
58 host1x/control.h
59 host1x/host1x.cpp
60 host1x/host1x.h
61 host1x/nvdec.cpp
62 host1x/nvdec.h
63 host1x/nvdec_common.h
64 host1x/sync_manager.cpp
65 host1x/sync_manager.h
66 host1x/syncpoint_manager.cpp
67 host1x/syncpoint_manager.h
68 host1x/vic.cpp
69 host1x/vic.h
58 macro/macro.cpp 70 macro/macro.cpp
59 macro/macro.h 71 macro/macro.h
60 macro/macro_hle.cpp 72 macro/macro_hle.cpp
@@ -195,6 +207,7 @@ add_library(video_core STATIC
195 texture_cache/render_targets.h 207 texture_cache/render_targets.h
196 texture_cache/samples_helper.h 208 texture_cache/samples_helper.h
197 texture_cache/slot_vector.h 209 texture_cache/slot_vector.h
210 texture_cache/texture_cache.cpp
198 texture_cache/texture_cache.h 211 texture_cache/texture_cache.h
199 texture_cache/texture_cache_base.h 212 texture_cache/texture_cache_base.h
200 texture_cache/types.h 213 texture_cache/types.h
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index f015dae56..8e26b3f95 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -5,7 +5,6 @@
5 5
6#include <algorithm> 6#include <algorithm>
7#include <array> 7#include <array>
8#include <deque>
9#include <memory> 8#include <memory>
10#include <mutex> 9#include <mutex>
11#include <numeric> 10#include <numeric>
@@ -23,6 +22,7 @@
23#include "common/settings.h" 22#include "common/settings.h"
24#include "core/memory.h" 23#include "core/memory.h"
25#include "video_core/buffer_cache/buffer_base.h" 24#include "video_core/buffer_cache/buffer_base.h"
25#include "video_core/control/channel_state_cache.h"
26#include "video_core/delayed_destruction_ring.h" 26#include "video_core/delayed_destruction_ring.h"
27#include "video_core/dirty_flags.h" 27#include "video_core/dirty_flags.h"
28#include "video_core/engines/kepler_compute.h" 28#include "video_core/engines/kepler_compute.h"
@@ -56,7 +56,7 @@ using UniformBufferSizes = std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFE
56using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>; 56using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>;
57 57
58template <typename P> 58template <typename P>
59class BufferCache { 59class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
60 60
61 // Page size for caching purposes. 61 // Page size for caching purposes.
62 // This is unrelated to the CPU page size and it can be changed as it seems optimal. 62 // This is unrelated to the CPU page size and it can be changed as it seems optimal.
@@ -116,10 +116,7 @@ public:
116 static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB); 116 static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB);
117 117
118 explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_, 118 explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_,
119 Tegra::Engines::Maxwell3D& maxwell3d_, 119 Core::Memory::Memory& cpu_memory_, Runtime& runtime_);
120 Tegra::Engines::KeplerCompute& kepler_compute_,
121 Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
122 Runtime& runtime_);
123 120
124 void TickFrame(); 121 void TickFrame();
125 122
@@ -129,7 +126,7 @@ public:
129 126
130 void DownloadMemory(VAddr cpu_addr, u64 size); 127 void DownloadMemory(VAddr cpu_addr, u64 size);
131 128
132 bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<u8> inlined_buffer); 129 bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
133 130
134 void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size); 131 void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
135 132
@@ -353,7 +350,7 @@ private:
353 350
354 void NotifyBufferDeletion(); 351 void NotifyBufferDeletion();
355 352
356 [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr) const; 353 [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr, bool is_written = false) const;
357 354
358 [[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size, 355 [[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size,
359 PixelFormat format); 356 PixelFormat format);
@@ -367,9 +364,6 @@ private:
367 void ClearDownload(IntervalType subtract_interval); 364 void ClearDownload(IntervalType subtract_interval);
368 365
369 VideoCore::RasterizerInterface& rasterizer; 366 VideoCore::RasterizerInterface& rasterizer;
370 Tegra::Engines::Maxwell3D& maxwell3d;
371 Tegra::Engines::KeplerCompute& kepler_compute;
372 Tegra::MemoryManager& gpu_memory;
373 Core::Memory::Memory& cpu_memory; 367 Core::Memory::Memory& cpu_memory;
374 368
375 SlotVector<Buffer> slot_buffers; 369 SlotVector<Buffer> slot_buffers;
@@ -444,12 +438,8 @@ private:
444 438
445template <class P> 439template <class P>
446BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_, 440BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
447 Tegra::Engines::Maxwell3D& maxwell3d_, 441 Core::Memory::Memory& cpu_memory_, Runtime& runtime_)
448 Tegra::Engines::KeplerCompute& kepler_compute_, 442 : runtime{runtime_}, rasterizer{rasterizer_}, cpu_memory{cpu_memory_} {
449 Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
450 Runtime& runtime_)
451 : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
452 kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_} {
453 // Ensure the first slot is used for the null buffer 443 // Ensure the first slot is used for the null buffer
454 void(slot_buffers.insert(runtime, NullBufferParams{})); 444 void(slot_buffers.insert(runtime, NullBufferParams{}));
455 common_ranges.clear(); 445 common_ranges.clear();
@@ -552,8 +542,8 @@ void BufferCache<P>::ClearDownload(IntervalType subtract_interval) {
552 542
553template <class P> 543template <class P>
554bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { 544bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
555 const std::optional<VAddr> cpu_src_address = gpu_memory.GpuToCpuAddress(src_address); 545 const std::optional<VAddr> cpu_src_address = gpu_memory->GpuToCpuAddress(src_address);
556 const std::optional<VAddr> cpu_dest_address = gpu_memory.GpuToCpuAddress(dest_address); 546 const std::optional<VAddr> cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address);
557 if (!cpu_src_address || !cpu_dest_address) { 547 if (!cpu_src_address || !cpu_dest_address) {
558 return false; 548 return false;
559 } 549 }
@@ -611,7 +601,7 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
611 601
612template <class P> 602template <class P>
613bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) { 603bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
614 const std::optional<VAddr> cpu_dst_address = gpu_memory.GpuToCpuAddress(dst_address); 604 const std::optional<VAddr> cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address);
615 if (!cpu_dst_address) { 605 if (!cpu_dst_address) {
616 return false; 606 return false;
617 } 607 }
@@ -635,7 +625,7 @@ bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
635template <class P> 625template <class P>
636void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, 626void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
637 u32 size) { 627 u32 size) {
638 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 628 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
639 const Binding binding{ 629 const Binding binding{
640 .cpu_addr = *cpu_addr, 630 .cpu_addr = *cpu_addr,
641 .size = size, 631 .size = size,
@@ -673,7 +663,7 @@ void BufferCache<P>::BindHostGeometryBuffers(bool is_indexed) {
673 if (is_indexed) { 663 if (is_indexed) {
674 BindHostIndexBuffer(); 664 BindHostIndexBuffer();
675 } else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { 665 } else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
676 const auto& regs = maxwell3d.regs; 666 const auto& regs = maxwell3d->regs;
677 if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) { 667 if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) {
678 runtime.BindQuadArrayIndexBuffer(regs.vertex_buffer.first, regs.vertex_buffer.count); 668 runtime.BindQuadArrayIndexBuffer(regs.vertex_buffer.first, regs.vertex_buffer.count);
679 } 669 }
@@ -733,9 +723,9 @@ void BufferCache<P>::BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index,
733 enabled_storage_buffers[stage] |= 1U << ssbo_index; 723 enabled_storage_buffers[stage] |= 1U << ssbo_index;
734 written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index; 724 written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index;
735 725
736 const auto& cbufs = maxwell3d.state.shader_stages[stage]; 726 const auto& cbufs = maxwell3d->state.shader_stages[stage];
737 const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset; 727 const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset;
738 storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr); 728 storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr, is_written);
739} 729}
740 730
741template <class P> 731template <class P>
@@ -770,12 +760,12 @@ void BufferCache<P>::BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index,
770 enabled_compute_storage_buffers |= 1U << ssbo_index; 760 enabled_compute_storage_buffers |= 1U << ssbo_index;
771 written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index; 761 written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index;
772 762
773 const auto& launch_desc = kepler_compute.launch_description; 763 const auto& launch_desc = kepler_compute->launch_description;
774 ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0); 764 ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0);
775 765
776 const auto& cbufs = launch_desc.const_buffer_config; 766 const auto& cbufs = launch_desc.const_buffer_config;
777 const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset; 767 const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset;
778 compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr); 768 compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr, is_written);
779} 769}
780 770
781template <class P> 771template <class P>
@@ -836,6 +826,19 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
836 const bool is_accuracy_normal = 826 const bool is_accuracy_normal =
837 Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal; 827 Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal;
838 828
829 auto it = committed_ranges.begin();
830 while (it != committed_ranges.end()) {
831 auto& current_intervals = *it;
832 auto next_it = std::next(it);
833 while (next_it != committed_ranges.end()) {
834 for (auto& interval : *next_it) {
835 current_intervals.subtract(interval);
836 }
837 next_it++;
838 }
839 it++;
840 }
841
839 boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads; 842 boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads;
840 u64 total_size_bytes = 0; 843 u64 total_size_bytes = 0;
841 u64 largest_copy = 0; 844 u64 largest_copy = 0;
@@ -991,19 +994,19 @@ void BufferCache<P>::BindHostIndexBuffer() {
991 const u32 size = index_buffer.size; 994 const u32 size = index_buffer.size;
992 SynchronizeBuffer(buffer, index_buffer.cpu_addr, size); 995 SynchronizeBuffer(buffer, index_buffer.cpu_addr, size);
993 if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { 996 if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
994 const u32 new_offset = offset + maxwell3d.regs.index_array.first * 997 const u32 new_offset = offset + maxwell3d->regs.index_array.first *
995 maxwell3d.regs.index_array.FormatSizeInBytes(); 998 maxwell3d->regs.index_array.FormatSizeInBytes();
996 runtime.BindIndexBuffer(buffer, new_offset, size); 999 runtime.BindIndexBuffer(buffer, new_offset, size);
997 } else { 1000 } else {
998 runtime.BindIndexBuffer(maxwell3d.regs.draw.topology, maxwell3d.regs.index_array.format, 1001 runtime.BindIndexBuffer(maxwell3d->regs.draw.topology, maxwell3d->regs.index_array.format,
999 maxwell3d.regs.index_array.first, maxwell3d.regs.index_array.count, 1002 maxwell3d->regs.index_array.first,
1000 buffer, offset, size); 1003 maxwell3d->regs.index_array.count, buffer, offset, size);
1001 } 1004 }
1002} 1005}
1003 1006
1004template <class P> 1007template <class P>
1005void BufferCache<P>::BindHostVertexBuffers() { 1008void BufferCache<P>::BindHostVertexBuffers() {
1006 auto& flags = maxwell3d.dirty.flags; 1009 auto& flags = maxwell3d->dirty.flags;
1007 for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { 1010 for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
1008 const Binding& binding = vertex_buffers[index]; 1011 const Binding& binding = vertex_buffers[index];
1009 Buffer& buffer = slot_buffers[binding.buffer_id]; 1012 Buffer& buffer = slot_buffers[binding.buffer_id];
@@ -1014,7 +1017,7 @@ void BufferCache<P>::BindHostVertexBuffers() {
1014 } 1017 }
1015 flags[Dirty::VertexBuffer0 + index] = false; 1018 flags[Dirty::VertexBuffer0 + index] = false;
1016 1019
1017 const u32 stride = maxwell3d.regs.vertex_array[index].stride; 1020 const u32 stride = maxwell3d->regs.vertex_array[index].stride;
1018 const u32 offset = buffer.Offset(binding.cpu_addr); 1021 const u32 offset = buffer.Offset(binding.cpu_addr);
1019 runtime.BindVertexBuffer(index, buffer, offset, binding.size, stride); 1022 runtime.BindVertexBuffer(index, buffer, offset, binding.size, stride);
1020 } 1023 }
@@ -1154,7 +1157,7 @@ void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) {
1154 1157
1155template <class P> 1158template <class P>
1156void BufferCache<P>::BindHostTransformFeedbackBuffers() { 1159void BufferCache<P>::BindHostTransformFeedbackBuffers() {
1157 if (maxwell3d.regs.tfb_enabled == 0) { 1160 if (maxwell3d->regs.tfb_enabled == 0) {
1158 return; 1161 return;
1159 } 1162 }
1160 for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { 1163 for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
@@ -1239,16 +1242,19 @@ void BufferCache<P>::BindHostComputeTextureBuffers() {
1239 1242
1240template <class P> 1243template <class P>
1241void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) { 1244void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
1242 if (is_indexed) { 1245 do {
1243 UpdateIndexBuffer(); 1246 has_deleted_buffers = false;
1244 } 1247 if (is_indexed) {
1245 UpdateVertexBuffers(); 1248 UpdateIndexBuffer();
1246 UpdateTransformFeedbackBuffers(); 1249 }
1247 for (size_t stage = 0; stage < NUM_STAGES; ++stage) { 1250 UpdateVertexBuffers();
1248 UpdateUniformBuffers(stage); 1251 UpdateTransformFeedbackBuffers();
1249 UpdateStorageBuffers(stage); 1252 for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
1250 UpdateTextureBuffers(stage); 1253 UpdateUniformBuffers(stage);
1251 } 1254 UpdateStorageBuffers(stage);
1255 UpdateTextureBuffers(stage);
1256 }
1257 } while (has_deleted_buffers);
1252} 1258}
1253 1259
1254template <class P> 1260template <class P>
@@ -1262,8 +1268,8 @@ template <class P>
1262void BufferCache<P>::UpdateIndexBuffer() { 1268void BufferCache<P>::UpdateIndexBuffer() {
1263 // We have to check for the dirty flags and index count 1269 // We have to check for the dirty flags and index count
1264 // The index count is currently changed without updating the dirty flags 1270 // The index count is currently changed without updating the dirty flags
1265 const auto& index_array = maxwell3d.regs.index_array; 1271 const auto& index_array = maxwell3d->regs.index_array;
1266 auto& flags = maxwell3d.dirty.flags; 1272 auto& flags = maxwell3d->dirty.flags;
1267 if (!flags[Dirty::IndexBuffer] && last_index_count == index_array.count) { 1273 if (!flags[Dirty::IndexBuffer] && last_index_count == index_array.count) {
1268 return; 1274 return;
1269 } 1275 }
@@ -1272,7 +1278,7 @@ void BufferCache<P>::UpdateIndexBuffer() {
1272 1278
1273 const GPUVAddr gpu_addr_begin = index_array.StartAddress(); 1279 const GPUVAddr gpu_addr_begin = index_array.StartAddress();
1274 const GPUVAddr gpu_addr_end = index_array.EndAddress(); 1280 const GPUVAddr gpu_addr_end = index_array.EndAddress();
1275 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin); 1281 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
1276 const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin); 1282 const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
1277 const u32 draw_size = (index_array.count + index_array.first) * index_array.FormatSizeInBytes(); 1283 const u32 draw_size = (index_array.count + index_array.first) * index_array.FormatSizeInBytes();
1278 const u32 size = std::min(address_size, draw_size); 1284 const u32 size = std::min(address_size, draw_size);
@@ -1289,8 +1295,8 @@ void BufferCache<P>::UpdateIndexBuffer() {
1289 1295
1290template <class P> 1296template <class P>
1291void BufferCache<P>::UpdateVertexBuffers() { 1297void BufferCache<P>::UpdateVertexBuffers() {
1292 auto& flags = maxwell3d.dirty.flags; 1298 auto& flags = maxwell3d->dirty.flags;
1293 if (!maxwell3d.dirty.flags[Dirty::VertexBuffers]) { 1299 if (!maxwell3d->dirty.flags[Dirty::VertexBuffers]) {
1294 return; 1300 return;
1295 } 1301 }
1296 flags[Dirty::VertexBuffers] = false; 1302 flags[Dirty::VertexBuffers] = false;
@@ -1302,33 +1308,25 @@ void BufferCache<P>::UpdateVertexBuffers() {
1302 1308
1303template <class P> 1309template <class P>
1304void BufferCache<P>::UpdateVertexBuffer(u32 index) { 1310void BufferCache<P>::UpdateVertexBuffer(u32 index) {
1305 if (!maxwell3d.dirty.flags[Dirty::VertexBuffer0 + index]) { 1311 if (!maxwell3d->dirty.flags[Dirty::VertexBuffer0 + index]) {
1306 return; 1312 return;
1307 } 1313 }
1308 const auto& array = maxwell3d.regs.vertex_array[index]; 1314 const auto& array = maxwell3d->regs.vertex_array[index];
1309 const auto& limit = maxwell3d.regs.vertex_array_limit[index]; 1315 const auto& limit = maxwell3d->regs.vertex_array_limit[index];
1310 const GPUVAddr gpu_addr_begin = array.StartAddress(); 1316 const GPUVAddr gpu_addr_begin = array.StartAddress();
1311 const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1; 1317 const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1;
1312 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin); 1318 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
1313 u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin); 1319 u32 address_size = static_cast<u32>(
1314 if (address_size >= 64_MiB) { 1320 std::min(gpu_addr_end - gpu_addr_begin, static_cast<u64>(std::numeric_limits<u32>::max())));
1315 // Reported vertex buffer size is very large, cap to mapped buffer size 1321 if (array.enable == 0 || address_size == 0 || !cpu_addr) {
1316 GPUVAddr submapped_addr_end = gpu_addr_begin;
1317
1318 const auto ranges{gpu_memory.GetSubmappedRange(gpu_addr_begin, address_size)};
1319 if (ranges.size() > 0) {
1320 const auto& [addr, size] = *ranges.begin();
1321 submapped_addr_end = addr + size;
1322 }
1323
1324 address_size =
1325 std::min(address_size, static_cast<u32>(submapped_addr_end - gpu_addr_begin));
1326 }
1327 const u32 size = address_size; // TODO: Analyze stride and number of vertices
1328 if (array.enable == 0 || size == 0 || !cpu_addr) {
1329 vertex_buffers[index] = NULL_BINDING; 1322 vertex_buffers[index] = NULL_BINDING;
1330 return; 1323 return;
1331 } 1324 }
1325 if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
1326 address_size =
1327 static_cast<u32>(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size));
1328 }
1329 const u32 size = address_size; // TODO: Analyze stride and number of vertices
1332 vertex_buffers[index] = Binding{ 1330 vertex_buffers[index] = Binding{
1333 .cpu_addr = *cpu_addr, 1331 .cpu_addr = *cpu_addr,
1334 .size = size, 1332 .size = size,
@@ -1382,7 +1380,7 @@ void BufferCache<P>::UpdateTextureBuffers(size_t stage) {
1382 1380
1383template <class P> 1381template <class P>
1384void BufferCache<P>::UpdateTransformFeedbackBuffers() { 1382void BufferCache<P>::UpdateTransformFeedbackBuffers() {
1385 if (maxwell3d.regs.tfb_enabled == 0) { 1383 if (maxwell3d->regs.tfb_enabled == 0) {
1386 return; 1384 return;
1387 } 1385 }
1388 for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { 1386 for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
@@ -1392,10 +1390,10 @@ void BufferCache<P>::UpdateTransformFeedbackBuffers() {
1392 1390
1393template <class P> 1391template <class P>
1394void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) { 1392void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) {
1395 const auto& binding = maxwell3d.regs.tfb_bindings[index]; 1393 const auto& binding = maxwell3d->regs.tfb_bindings[index];
1396 const GPUVAddr gpu_addr = binding.Address() + binding.buffer_offset; 1394 const GPUVAddr gpu_addr = binding.Address() + binding.buffer_offset;
1397 const u32 size = binding.buffer_size; 1395 const u32 size = binding.buffer_size;
1398 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 1396 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
1399 if (binding.buffer_enable == 0 || size == 0 || !cpu_addr) { 1397 if (binding.buffer_enable == 0 || size == 0 || !cpu_addr) {
1400 transform_feedback_buffers[index] = NULL_BINDING; 1398 transform_feedback_buffers[index] = NULL_BINDING;
1401 return; 1399 return;
@@ -1414,10 +1412,10 @@ void BufferCache<P>::UpdateComputeUniformBuffers() {
1414 ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) { 1412 ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
1415 Binding& binding = compute_uniform_buffers[index]; 1413 Binding& binding = compute_uniform_buffers[index];
1416 binding = NULL_BINDING; 1414 binding = NULL_BINDING;
1417 const auto& launch_desc = kepler_compute.launch_description; 1415 const auto& launch_desc = kepler_compute->launch_description;
1418 if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) { 1416 if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) {
1419 const auto& cbuf = launch_desc.const_buffer_config[index]; 1417 const auto& cbuf = launch_desc.const_buffer_config[index];
1420 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(cbuf.Address()); 1418 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(cbuf.Address());
1421 if (cpu_addr) { 1419 if (cpu_addr) {
1422 binding.cpu_addr = *cpu_addr; 1420 binding.cpu_addr = *cpu_addr;
1423 binding.size = cbuf.size; 1421 binding.size = cbuf.size;
@@ -1567,6 +1565,8 @@ BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) {
1567 const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size); 1565 const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size);
1568 const u32 size = static_cast<u32>(overlap.end - overlap.begin); 1566 const u32 size = static_cast<u32>(overlap.end - overlap.begin);
1569 const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size); 1567 const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size);
1568 auto& new_buffer = slot_buffers[new_buffer_id];
1569 runtime.ClearBuffer(new_buffer, 0, new_buffer.SizeBytes(), 0);
1570 for (const BufferId overlap_id : overlap.ids) { 1570 for (const BufferId overlap_id : overlap.ids) {
1571 JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap); 1571 JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap);
1572 } 1572 }
@@ -1695,7 +1695,7 @@ void BufferCache<P>::MappedUploadMemory(Buffer& buffer, u64 total_size_bytes,
1695 1695
1696template <class P> 1696template <class P>
1697bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size, 1697bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size,
1698 std::span<u8> inlined_buffer) { 1698 std::span<const u8> inlined_buffer) {
1699 const bool is_dirty = IsRegionRegistered(dest_address, copy_size); 1699 const bool is_dirty = IsRegionRegistered(dest_address, copy_size);
1700 if (!is_dirty) { 1700 if (!is_dirty) {
1701 return false; 1701 return false;
@@ -1831,7 +1831,7 @@ void BufferCache<P>::NotifyBufferDeletion() {
1831 dirty_uniform_buffers.fill(~u32{0}); 1831 dirty_uniform_buffers.fill(~u32{0});
1832 uniform_buffer_binding_sizes.fill({}); 1832 uniform_buffer_binding_sizes.fill({});
1833 } 1833 }
1834 auto& flags = maxwell3d.dirty.flags; 1834 auto& flags = maxwell3d->dirty.flags;
1835 flags[Dirty::IndexBuffer] = true; 1835 flags[Dirty::IndexBuffer] = true;
1836 flags[Dirty::VertexBuffers] = true; 1836 flags[Dirty::VertexBuffers] = true;
1837 for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { 1837 for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
@@ -1841,16 +1841,18 @@ void BufferCache<P>::NotifyBufferDeletion() {
1841} 1841}
1842 1842
1843template <class P> 1843template <class P>
1844typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr) const { 1844typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr,
1845 const GPUVAddr gpu_addr = gpu_memory.Read<u64>(ssbo_addr); 1845 bool is_written) const {
1846 const u32 size = gpu_memory.Read<u32>(ssbo_addr + 8); 1846 const GPUVAddr gpu_addr = gpu_memory->Read<u64>(ssbo_addr);
1847 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 1847 const u32 size = gpu_memory->Read<u32>(ssbo_addr + 8);
1848 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
1848 if (!cpu_addr || size == 0) { 1849 if (!cpu_addr || size == 0) {
1849 return NULL_BINDING; 1850 return NULL_BINDING;
1850 } 1851 }
1852 const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
1851 const Binding binding{ 1853 const Binding binding{
1852 .cpu_addr = *cpu_addr, 1854 .cpu_addr = *cpu_addr,
1853 .size = size, 1855 .size = is_written ? size : static_cast<u32>(cpu_end - *cpu_addr),
1854 .buffer_id = BufferId{}, 1856 .buffer_id = BufferId{},
1855 }; 1857 };
1856 return binding; 1858 return binding;
@@ -1859,7 +1861,7 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
1859template <class P> 1861template <class P>
1860typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding( 1862typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(
1861 GPUVAddr gpu_addr, u32 size, PixelFormat format) { 1863 GPUVAddr gpu_addr, u32 size, PixelFormat format) {
1862 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 1864 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
1863 TextureBufferBinding binding; 1865 TextureBufferBinding binding;
1864 if (!cpu_addr || size == 0) { 1866 if (!cpu_addr || size == 0) {
1865 binding.cpu_addr = 0; 1867 binding.cpu_addr = 0;
diff --git a/src/video_core/cdma_pusher.cpp b/src/video_core/cdma_pusher.cpp
index 8e890a85e..28a2d2090 100644
--- a/src/video_core/cdma_pusher.cpp
+++ b/src/video_core/cdma_pusher.cpp
@@ -2,20 +2,22 @@
2// SPDX-License-Identifier: MIT 2// SPDX-License-Identifier: MIT
3 3
4#include <bit> 4#include <bit>
5#include "command_classes/host1x.h"
6#include "command_classes/nvdec.h"
7#include "command_classes/vic.h"
8#include "video_core/cdma_pusher.h" 5#include "video_core/cdma_pusher.h"
9#include "video_core/command_classes/sync_manager.h"
10#include "video_core/engines/maxwell_3d.h" 6#include "video_core/engines/maxwell_3d.h"
11#include "video_core/gpu.h" 7#include "video_core/host1x/control.h"
8#include "video_core/host1x/host1x.h"
9#include "video_core/host1x/nvdec.h"
10#include "video_core/host1x/nvdec_common.h"
11#include "video_core/host1x/sync_manager.h"
12#include "video_core/host1x/vic.h"
13#include "video_core/memory_manager.h"
12 14
13namespace Tegra { 15namespace Tegra {
14CDmaPusher::CDmaPusher(GPU& gpu_) 16CDmaPusher::CDmaPusher(Host1x::Host1x& host1x_)
15 : gpu{gpu_}, nvdec_processor(std::make_shared<Nvdec>(gpu)), 17 : host1x{host1x_}, nvdec_processor(std::make_shared<Host1x::Nvdec>(host1x)),
16 vic_processor(std::make_unique<Vic>(gpu, nvdec_processor)), 18 vic_processor(std::make_unique<Host1x::Vic>(host1x, nvdec_processor)),
17 host1x_processor(std::make_unique<Host1x>(gpu)), 19 host1x_processor(std::make_unique<Host1x::Control>(host1x)),
18 sync_manager(std::make_unique<SyncptIncrManager>(gpu)) {} 20 sync_manager(std::make_unique<Host1x::SyncptIncrManager>(host1x)) {}
19 21
20CDmaPusher::~CDmaPusher() = default; 22CDmaPusher::~CDmaPusher() = default;
21 23
@@ -109,16 +111,17 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
109 case ThiMethod::SetMethod1: 111 case ThiMethod::SetMethod1:
110 LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})", 112 LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})",
111 static_cast<u32>(vic_thi_state.method_0), data); 113 static_cast<u32>(vic_thi_state.method_0), data);
112 vic_processor->ProcessMethod(static_cast<Vic::Method>(vic_thi_state.method_0), data); 114 vic_processor->ProcessMethod(static_cast<Host1x::Vic::Method>(vic_thi_state.method_0),
115 data);
113 break; 116 break;
114 default: 117 default:
115 break; 118 break;
116 } 119 }
117 break; 120 break;
118 case ChClassId::Host1x: 121 case ChClassId::Control:
119 // This device is mainly for syncpoint synchronization 122 // This device is mainly for syncpoint synchronization
120 LOG_DEBUG(Service_NVDRV, "Host1X Class Method"); 123 LOG_DEBUG(Service_NVDRV, "Host1X Class Method");
121 host1x_processor->ProcessMethod(static_cast<Host1x::Method>(offset), data); 124 host1x_processor->ProcessMethod(static_cast<Host1x::Control::Method>(offset), data);
122 break; 125 break;
123 default: 126 default:
124 UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class)); 127 UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class));
diff --git a/src/video_core/cdma_pusher.h b/src/video_core/cdma_pusher.h
index d6ffef95f..83112dfce 100644
--- a/src/video_core/cdma_pusher.h
+++ b/src/video_core/cdma_pusher.h
@@ -12,11 +12,13 @@
12 12
13namespace Tegra { 13namespace Tegra {
14 14
15class GPU; 15namespace Host1x {
16class Control;
16class Host1x; 17class Host1x;
17class Nvdec; 18class Nvdec;
18class SyncptIncrManager; 19class SyncptIncrManager;
19class Vic; 20class Vic;
21} // namespace Host1x
20 22
21enum class ChSubmissionMode : u32 { 23enum class ChSubmissionMode : u32 {
22 SetClass = 0, 24 SetClass = 0,
@@ -30,7 +32,7 @@ enum class ChSubmissionMode : u32 {
30 32
31enum class ChClassId : u32 { 33enum class ChClassId : u32 {
32 NoClass = 0x0, 34 NoClass = 0x0,
33 Host1x = 0x1, 35 Control = 0x1,
34 VideoEncodeMpeg = 0x20, 36 VideoEncodeMpeg = 0x20,
35 VideoEncodeNvEnc = 0x21, 37 VideoEncodeNvEnc = 0x21,
36 VideoStreamingVi = 0x30, 38 VideoStreamingVi = 0x30,
@@ -88,7 +90,7 @@ enum class ThiMethod : u32 {
88 90
89class CDmaPusher { 91class CDmaPusher {
90public: 92public:
91 explicit CDmaPusher(GPU& gpu_); 93 explicit CDmaPusher(Host1x::Host1x& host1x);
92 ~CDmaPusher(); 94 ~CDmaPusher();
93 95
94 /// Process the command entry 96 /// Process the command entry
@@ -101,11 +103,11 @@ private:
101 /// Write arguments value to the ThiRegisters member at the specified offset 103 /// Write arguments value to the ThiRegisters member at the specified offset
102 void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument); 104 void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument);
103 105
104 GPU& gpu; 106 Host1x::Host1x& host1x;
105 std::shared_ptr<Tegra::Nvdec> nvdec_processor; 107 std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor;
106 std::unique_ptr<Tegra::Vic> vic_processor; 108 std::unique_ptr<Tegra::Host1x::Vic> vic_processor;
107 std::unique_ptr<Tegra::Host1x> host1x_processor; 109 std::unique_ptr<Tegra::Host1x::Control> host1x_processor;
108 std::unique_ptr<SyncptIncrManager> sync_manager; 110 std::unique_ptr<Host1x::SyncptIncrManager> sync_manager;
109 ChClassId current_class{}; 111 ChClassId current_class{};
110 ThiRegisters vic_thi_state{}; 112 ThiRegisters vic_thi_state{};
111 ThiRegisters nvdec_thi_state{}; 113 ThiRegisters nvdec_thi_state{};
diff --git a/src/video_core/command_classes/host1x.cpp b/src/video_core/command_classes/host1x.cpp
deleted file mode 100644
index 11855fe10..000000000
--- a/src/video_core/command_classes/host1x.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/assert.h"
5#include "video_core/command_classes/host1x.h"
6#include "video_core/gpu.h"
7
8Tegra::Host1x::Host1x(GPU& gpu_) : gpu(gpu_) {}
9
10Tegra::Host1x::~Host1x() = default;
11
12void Tegra::Host1x::ProcessMethod(Method method, u32 argument) {
13 switch (method) {
14 case Method::LoadSyncptPayload32:
15 syncpoint_value = argument;
16 break;
17 case Method::WaitSyncpt:
18 case Method::WaitSyncpt32:
19 Execute(argument);
20 break;
21 default:
22 UNIMPLEMENTED_MSG("Host1x method 0x{:X}", static_cast<u32>(method));
23 break;
24 }
25}
26
27void Tegra::Host1x::Execute(u32 data) {
28 gpu.WaitFence(data, syncpoint_value);
29}
diff --git a/src/video_core/control/channel_state.cpp b/src/video_core/control/channel_state.cpp
new file mode 100644
index 000000000..cdecc3a91
--- /dev/null
+++ b/src/video_core/control/channel_state.cpp
@@ -0,0 +1,40 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "common/assert.h"
5#include "video_core/control/channel_state.h"
6#include "video_core/dma_pusher.h"
7#include "video_core/engines/fermi_2d.h"
8#include "video_core/engines/kepler_compute.h"
9#include "video_core/engines/kepler_memory.h"
10#include "video_core/engines/maxwell_3d.h"
11#include "video_core/engines/maxwell_dma.h"
12#include "video_core/engines/puller.h"
13#include "video_core/memory_manager.h"
14
15namespace Tegra::Control {
16
17ChannelState::ChannelState(s32 bind_id_) : bind_id{bind_id_}, initialized{} {}
18
19void ChannelState::Init(Core::System& system, GPU& gpu) {
20 ASSERT(memory_manager);
21 dma_pusher = std::make_unique<Tegra::DmaPusher>(system, gpu, *memory_manager, *this);
22 maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, *memory_manager);
23 fermi_2d = std::make_unique<Engines::Fermi2D>();
24 kepler_compute = std::make_unique<Engines::KeplerCompute>(system, *memory_manager);
25 maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager);
26 kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager);
27 initialized = true;
28}
29
30void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {
31 dma_pusher->BindRasterizer(rasterizer);
32 memory_manager->BindRasterizer(rasterizer);
33 maxwell_3d->BindRasterizer(rasterizer);
34 fermi_2d->BindRasterizer(rasterizer);
35 kepler_memory->BindRasterizer(rasterizer);
36 kepler_compute->BindRasterizer(rasterizer);
37 maxwell_dma->BindRasterizer(rasterizer);
38}
39
40} // namespace Tegra::Control
diff --git a/src/video_core/control/channel_state.h b/src/video_core/control/channel_state.h
new file mode 100644
index 000000000..3a7b9872c
--- /dev/null
+++ b/src/video_core/control/channel_state.h
@@ -0,0 +1,68 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include <memory>
7
8#include "common/common_types.h"
9
10namespace Core {
11class System;
12}
13
14namespace VideoCore {
15class RasterizerInterface;
16}
17
18namespace Tegra {
19
20class GPU;
21
22namespace Engines {
23class Puller;
24class Fermi2D;
25class Maxwell3D;
26class MaxwellDMA;
27class KeplerCompute;
28class KeplerMemory;
29} // namespace Engines
30
31class MemoryManager;
32class DmaPusher;
33
34namespace Control {
35
36struct ChannelState {
37 explicit ChannelState(s32 bind_id);
38 ChannelState(const ChannelState& state) = delete;
39 ChannelState& operator=(const ChannelState&) = delete;
40 ChannelState(ChannelState&& other) noexcept = default;
41 ChannelState& operator=(ChannelState&& other) noexcept = default;
42
43 void Init(Core::System& system, GPU& gpu);
44
45 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
46
47 s32 bind_id = -1;
48 /// 3D engine
49 std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
50 /// 2D engine
51 std::unique_ptr<Engines::Fermi2D> fermi_2d;
52 /// Compute engine
53 std::unique_ptr<Engines::KeplerCompute> kepler_compute;
54 /// DMA engine
55 std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
56 /// Inline memory engine
57 std::unique_ptr<Engines::KeplerMemory> kepler_memory;
58
59 std::shared_ptr<MemoryManager> memory_manager;
60
61 std::unique_ptr<DmaPusher> dma_pusher;
62
63 bool initialized{};
64};
65
66} // namespace Control
67
68} // namespace Tegra
diff --git a/src/video_core/control/channel_state_cache.cpp b/src/video_core/control/channel_state_cache.cpp
new file mode 100644
index 000000000..4ebeb6356
--- /dev/null
+++ b/src/video_core/control/channel_state_cache.cpp
@@ -0,0 +1,14 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "video_core/control/channel_state_cache.inc"
5
6namespace VideoCommon {
7
8ChannelInfo::ChannelInfo(Tegra::Control::ChannelState& channel_state)
9 : maxwell3d{*channel_state.maxwell_3d}, kepler_compute{*channel_state.kepler_compute},
10 gpu_memory{*channel_state.memory_manager} {}
11
12template class VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo>;
13
14} // namespace VideoCommon
diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h
new file mode 100644
index 000000000..584a0c26c
--- /dev/null
+++ b/src/video_core/control/channel_state_cache.h
@@ -0,0 +1,101 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include <deque>
7#include <limits>
8#include <mutex>
9#include <optional>
10#include <unordered_map>
11#include <vector>
12
13#include "common/common_types.h"
14
15namespace Tegra {
16
17namespace Engines {
18class Maxwell3D;
19class KeplerCompute;
20} // namespace Engines
21
22class MemoryManager;
23
24namespace Control {
25struct ChannelState;
26}
27
28} // namespace Tegra
29
30namespace VideoCommon {
31
32class ChannelInfo {
33public:
34 ChannelInfo() = delete;
35 explicit ChannelInfo(Tegra::Control::ChannelState& state);
36 ChannelInfo(const ChannelInfo& state) = delete;
37 ChannelInfo& operator=(const ChannelInfo&) = delete;
38 ChannelInfo(ChannelInfo&& other) = default;
39 ChannelInfo& operator=(ChannelInfo&& other) = default;
40
41 Tegra::Engines::Maxwell3D& maxwell3d;
42 Tegra::Engines::KeplerCompute& kepler_compute;
43 Tegra::MemoryManager& gpu_memory;
44};
45
46template <class P>
47class ChannelSetupCaches {
48public:
49 /// Operations for seting the channel of execution.
50 virtual ~ChannelSetupCaches();
51
52 /// Create channel state.
53 virtual void CreateChannel(Tegra::Control::ChannelState& channel);
54
55 /// Bind a channel for execution.
56 void BindToChannel(s32 id);
57
58 /// Erase channel's state.
59 void EraseChannel(s32 id);
60
61 Tegra::MemoryManager* GetFromID(size_t id) const {
62 std::unique_lock<std::mutex> lk(config_mutex);
63 const auto ref = address_spaces.find(id);
64 return ref->second.gpu_memory;
65 }
66
67 std::optional<size_t> getStorageID(size_t id) const {
68 std::unique_lock<std::mutex> lk(config_mutex);
69 const auto ref = address_spaces.find(id);
70 if (ref == address_spaces.end()) {
71 return std::nullopt;
72 }
73 return ref->second.storage_id;
74 }
75
76protected:
77 static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()};
78
79 P* channel_state;
80 size_t current_channel_id{UNSET_CHANNEL};
81 size_t current_address_space{};
82 Tegra::Engines::Maxwell3D* maxwell3d;
83 Tegra::Engines::KeplerCompute* kepler_compute;
84 Tegra::MemoryManager* gpu_memory;
85
86 std::deque<P> channel_storage;
87 std::deque<size_t> free_channel_ids;
88 std::unordered_map<s32, size_t> channel_map;
89 std::vector<size_t> active_channel_ids;
90 struct AddresSpaceRef {
91 size_t ref_count;
92 size_t storage_id;
93 Tegra::MemoryManager* gpu_memory;
94 };
95 std::unordered_map<size_t, AddresSpaceRef> address_spaces;
96 mutable std::mutex config_mutex;
97
98 virtual void OnGPUASRegister([[maybe_unused]] size_t map_id) {}
99};
100
101} // namespace VideoCommon
diff --git a/src/video_core/control/channel_state_cache.inc b/src/video_core/control/channel_state_cache.inc
new file mode 100644
index 000000000..460313893
--- /dev/null
+++ b/src/video_core/control/channel_state_cache.inc
@@ -0,0 +1,86 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include <algorithm>
5
6#include "video_core/control/channel_state.h"
7#include "video_core/control/channel_state_cache.h"
8#include "video_core/engines/kepler_compute.h"
9#include "video_core/engines/maxwell_3d.h"
10#include "video_core/memory_manager.h"
11
12namespace VideoCommon {
13
14template <class P>
15ChannelSetupCaches<P>::~ChannelSetupCaches() = default;
16
17template <class P>
18void ChannelSetupCaches<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) {
19 std::unique_lock<std::mutex> lk(config_mutex);
20 ASSERT(channel_map.find(channel.bind_id) == channel_map.end() && channel.bind_id >= 0);
21 auto new_id = [this, &channel]() {
22 if (!free_channel_ids.empty()) {
23 auto id = free_channel_ids.front();
24 free_channel_ids.pop_front();
25 new (&channel_storage[id]) P(channel);
26 return id;
27 }
28 channel_storage.emplace_back(channel);
29 return channel_storage.size() - 1;
30 }();
31 channel_map.emplace(channel.bind_id, new_id);
32 if (current_channel_id != UNSET_CHANNEL) {
33 channel_state = &channel_storage[current_channel_id];
34 }
35 active_channel_ids.push_back(new_id);
36 auto as_it = address_spaces.find(channel.memory_manager->GetID());
37 if (as_it != address_spaces.end()) {
38 as_it->second.ref_count++;
39 return;
40 }
41 AddresSpaceRef new_gpu_mem_ref{
42 .ref_count = 1,
43 .storage_id = address_spaces.size(),
44 .gpu_memory = channel.memory_manager.get(),
45 };
46 address_spaces.emplace(channel.memory_manager->GetID(), new_gpu_mem_ref);
47 OnGPUASRegister(channel.memory_manager->GetID());
48}
49
50/// Bind a channel for execution.
51template <class P>
52void ChannelSetupCaches<P>::BindToChannel(s32 id) {
53 std::unique_lock<std::mutex> lk(config_mutex);
54 auto it = channel_map.find(id);
55 ASSERT(it != channel_map.end() && id >= 0);
56 current_channel_id = it->second;
57 channel_state = &channel_storage[current_channel_id];
58 maxwell3d = &channel_state->maxwell3d;
59 kepler_compute = &channel_state->kepler_compute;
60 gpu_memory = &channel_state->gpu_memory;
61 current_address_space = gpu_memory->GetID();
62}
63
64/// Erase channel's channel_state.
65template <class P>
66void ChannelSetupCaches<P>::EraseChannel(s32 id) {
67 std::unique_lock<std::mutex> lk(config_mutex);
68 const auto it = channel_map.find(id);
69 ASSERT(it != channel_map.end() && id >= 0);
70 const auto this_id = it->second;
71 free_channel_ids.push_back(this_id);
72 channel_map.erase(it);
73 if (this_id == current_channel_id) {
74 current_channel_id = UNSET_CHANNEL;
75 channel_state = nullptr;
76 maxwell3d = nullptr;
77 kepler_compute = nullptr;
78 gpu_memory = nullptr;
79 } else if (current_channel_id != UNSET_CHANNEL) {
80 channel_state = &channel_storage[current_channel_id];
81 }
82 active_channel_ids.erase(
83 std::find(active_channel_ids.begin(), active_channel_ids.end(), this_id));
84}
85
86} // namespace VideoCommon
diff --git a/src/video_core/control/scheduler.cpp b/src/video_core/control/scheduler.cpp
new file mode 100644
index 000000000..f7cbe204e
--- /dev/null
+++ b/src/video_core/control/scheduler.cpp
@@ -0,0 +1,32 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include <memory>
5
6#include "common/assert.h"
7#include "video_core/control/channel_state.h"
8#include "video_core/control/scheduler.h"
9#include "video_core/gpu.h"
10
11namespace Tegra::Control {
12Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {}
13
14Scheduler::~Scheduler() = default;
15
16void Scheduler::Push(s32 channel, CommandList&& entries) {
17 std::unique_lock lk(scheduling_guard);
18 auto it = channels.find(channel);
19 ASSERT(it != channels.end());
20 auto channel_state = it->second;
21 gpu.BindChannel(channel_state->bind_id);
22 channel_state->dma_pusher->Push(std::move(entries));
23 channel_state->dma_pusher->DispatchCalls();
24}
25
26void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) {
27 s32 channel = new_channel->bind_id;
28 std::unique_lock lk(scheduling_guard);
29 channels.emplace(channel, new_channel);
30}
31
32} // namespace Tegra::Control
diff --git a/src/video_core/control/scheduler.h b/src/video_core/control/scheduler.h
new file mode 100644
index 000000000..44addf61c
--- /dev/null
+++ b/src/video_core/control/scheduler.h
@@ -0,0 +1,37 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include <memory>
7#include <mutex>
8#include <unordered_map>
9
10#include "video_core/dma_pusher.h"
11
12namespace Tegra {
13
14class GPU;
15
16namespace Control {
17
18struct ChannelState;
19
20class Scheduler {
21public:
22 explicit Scheduler(GPU& gpu_);
23 ~Scheduler();
24
25 void Push(s32 channel, CommandList&& entries);
26
27 void DeclareChannel(std::shared_ptr<ChannelState> new_channel);
28
29private:
30 std::unordered_map<s32, std::shared_ptr<ChannelState>> channels;
31 std::mutex scheduling_guard;
32 GPU& gpu;
33};
34
35} // namespace Control
36
37} // namespace Tegra
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index 29b8582ab..9835e3ac1 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -12,7 +12,10 @@
12 12
13namespace Tegra { 13namespace Tegra {
14 14
15DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_) : gpu{gpu_}, system{system_} {} 15DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_,
16 Control::ChannelState& channel_state_)
17 : gpu{gpu_}, system{system_}, memory_manager{memory_manager_}, puller{gpu_, memory_manager_,
18 *this, channel_state_} {}
16 19
17DmaPusher::~DmaPusher() = default; 20DmaPusher::~DmaPusher() = default;
18 21
@@ -21,8 +24,6 @@ MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128,
21void DmaPusher::DispatchCalls() { 24void DmaPusher::DispatchCalls() {
22 MICROPROFILE_SCOPE(DispatchCalls); 25 MICROPROFILE_SCOPE(DispatchCalls);
23 26
24 gpu.SyncGuestHost();
25
26 dma_pushbuffer_subindex = 0; 27 dma_pushbuffer_subindex = 0;
27 28
28 dma_state.is_last_call = true; 29 dma_state.is_last_call = true;
@@ -33,7 +34,6 @@ void DmaPusher::DispatchCalls() {
33 } 34 }
34 } 35 }
35 gpu.FlushCommands(); 36 gpu.FlushCommands();
36 gpu.SyncGuestHost();
37 gpu.OnCommandListEnd(); 37 gpu.OnCommandListEnd();
38} 38}
39 39
@@ -76,11 +76,11 @@ bool DmaPusher::Step() {
76 // Push buffer non-empty, read a word 76 // Push buffer non-empty, read a word
77 command_headers.resize(command_list_header.size); 77 command_headers.resize(command_list_header.size);
78 if (Settings::IsGPULevelHigh()) { 78 if (Settings::IsGPULevelHigh()) {
79 gpu.MemoryManager().ReadBlock(dma_get, command_headers.data(), 79 memory_manager.ReadBlock(dma_get, command_headers.data(),
80 command_list_header.size * sizeof(u32)); 80 command_list_header.size * sizeof(u32));
81 } else { 81 } else {
82 gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(), 82 memory_manager.ReadBlockUnsafe(dma_get, command_headers.data(),
83 command_list_header.size * sizeof(u32)); 83 command_list_header.size * sizeof(u32));
84 } 84 }
85 } 85 }
86 for (std::size_t index = 0; index < command_headers.size();) { 86 for (std::size_t index = 0; index < command_headers.size();) {
@@ -154,7 +154,7 @@ void DmaPusher::SetState(const CommandHeader& command_header) {
154 154
155void DmaPusher::CallMethod(u32 argument) const { 155void DmaPusher::CallMethod(u32 argument) const {
156 if (dma_state.method < non_puller_methods) { 156 if (dma_state.method < non_puller_methods) {
157 gpu.CallMethod(GPU::MethodCall{ 157 puller.CallPullerMethod(Engines::Puller::MethodCall{
158 dma_state.method, 158 dma_state.method,
159 argument, 159 argument,
160 dma_state.subchannel, 160 dma_state.subchannel,
@@ -168,12 +168,16 @@ void DmaPusher::CallMethod(u32 argument) const {
168 168
169void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const { 169void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
170 if (dma_state.method < non_puller_methods) { 170 if (dma_state.method < non_puller_methods) {
171 gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods, 171 puller.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
172 dma_state.method_count); 172 dma_state.method_count);
173 } else { 173 } else {
174 subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start, 174 subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start,
175 num_methods, dma_state.method_count); 175 num_methods, dma_state.method_count);
176 } 176 }
177} 177}
178 178
179void DmaPusher::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {
180 puller.BindRasterizer(rasterizer);
181}
182
179} // namespace Tegra 183} // namespace Tegra
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h
index 872fd146a..938f0f11c 100644
--- a/src/video_core/dma_pusher.h
+++ b/src/video_core/dma_pusher.h
@@ -10,6 +10,7 @@
10#include "common/bit_field.h" 10#include "common/bit_field.h"
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "video_core/engines/engine_interface.h" 12#include "video_core/engines/engine_interface.h"
13#include "video_core/engines/puller.h"
13 14
14namespace Core { 15namespace Core {
15class System; 16class System;
@@ -17,7 +18,12 @@ class System;
17 18
18namespace Tegra { 19namespace Tegra {
19 20
21namespace Control {
22struct ChannelState;
23}
24
20class GPU; 25class GPU;
26class MemoryManager;
21 27
22enum class SubmissionMode : u32 { 28enum class SubmissionMode : u32 {
23 IncreasingOld = 0, 29 IncreasingOld = 0,
@@ -31,24 +37,32 @@ enum class SubmissionMode : u32 {
31// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence 37// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence
32// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4. 38// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4.
33// So the values you see in docs might be multiplied by 4. 39// So the values you see in docs might be multiplied by 4.
40// Register documentation:
41// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/classes/host/cla26f.h
42//
43// Register Description (approx):
44// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/manuals/volta/gv100/dev_pbdma.ref.txt
34enum class BufferMethods : u32 { 45enum class BufferMethods : u32 {
35 BindObject = 0x0, 46 BindObject = 0x0,
47 Illegal = 0x1,
36 Nop = 0x2, 48 Nop = 0x2,
37 SemaphoreAddressHigh = 0x4, 49 SemaphoreAddressHigh = 0x4,
38 SemaphoreAddressLow = 0x5, 50 SemaphoreAddressLow = 0x5,
39 SemaphoreSequence = 0x6, 51 SemaphoreSequencePayload = 0x6,
40 SemaphoreTrigger = 0x7, 52 SemaphoreOperation = 0x7,
41 NotifyIntr = 0x8, 53 NonStallInterrupt = 0x8,
42 WrcacheFlush = 0x9, 54 WrcacheFlush = 0x9,
43 Unk28 = 0xA, 55 MemOpA = 0xA,
44 UnkCacheFlush = 0xB, 56 MemOpB = 0xB,
57 MemOpC = 0xC,
58 MemOpD = 0xD,
45 RefCnt = 0x14, 59 RefCnt = 0x14,
46 SemaphoreAcquire = 0x1A, 60 SemaphoreAcquire = 0x1A,
47 SemaphoreRelease = 0x1B, 61 SemaphoreRelease = 0x1B,
48 FenceValue = 0x1C, 62 SyncpointPayload = 0x1C,
49 FenceAction = 0x1D, 63 SyncpointOperation = 0x1D,
50 WaitForInterrupt = 0x1E, 64 WaitForIdle = 0x1E,
51 Unk7c = 0x1F, 65 CRCCheck = 0x1F,
52 Yield = 0x20, 66 Yield = 0x20,
53 NonPullerMethods = 0x40, 67 NonPullerMethods = 0x40,
54}; 68};
@@ -102,7 +116,8 @@ struct CommandList final {
102 */ 116 */
103class DmaPusher final { 117class DmaPusher final {
104public: 118public:
105 explicit DmaPusher(Core::System& system_, GPU& gpu_); 119 explicit DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_,
120 Control::ChannelState& channel_state_);
106 ~DmaPusher(); 121 ~DmaPusher();
107 122
108 void Push(CommandList&& entries) { 123 void Push(CommandList&& entries) {
@@ -115,6 +130,8 @@ public:
115 subchannels[subchannel_id] = engine; 130 subchannels[subchannel_id] = engine;
116 } 131 }
117 132
133 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
134
118private: 135private:
119 static constexpr u32 non_puller_methods = 0x40; 136 static constexpr u32 non_puller_methods = 0x40;
120 static constexpr u32 max_subchannels = 8; 137 static constexpr u32 max_subchannels = 8;
@@ -148,6 +165,8 @@ private:
148 165
149 GPU& gpu; 166 GPU& gpu;
150 Core::System& system; 167 Core::System& system;
168 MemoryManager& memory_manager;
169 mutable Engines::Puller puller;
151}; 170};
152 171
153} // namespace Tegra 172} // namespace Tegra
diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp
index 6ff5b1eca..a34819234 100644
--- a/src/video_core/engines/engine_upload.cpp
+++ b/src/video_core/engines/engine_upload.cpp
@@ -3,6 +3,7 @@
3 3
4#include <cstring> 4#include <cstring>
5 5
6#include "common/algorithm.h"
6#include "common/assert.h" 7#include "common/assert.h"
7#include "video_core/engines/engine_upload.h" 8#include "video_core/engines/engine_upload.h"
8#include "video_core/memory_manager.h" 9#include "video_core/memory_manager.h"
@@ -34,21 +35,48 @@ void State::ProcessData(const u32 data, const bool is_last_call) {
34 if (!is_last_call) { 35 if (!is_last_call) {
35 return; 36 return;
36 } 37 }
38 ProcessData(inner_buffer);
39}
40
41void State::ProcessData(const u32* data, size_t num_data) {
42 std::span<const u8> read_buffer(reinterpret_cast<const u8*>(data), num_data * sizeof(u32));
43 ProcessData(read_buffer);
44}
45
46void State::ProcessData(std::span<const u8> read_buffer) {
37 const GPUVAddr address{regs.dest.Address()}; 47 const GPUVAddr address{regs.dest.Address()};
38 if (is_linear) { 48 if (is_linear) {
39 rasterizer->AccelerateInlineToMemory(address, copy_size, inner_buffer); 49 if (regs.line_count == 1) {
50 rasterizer->AccelerateInlineToMemory(address, copy_size, read_buffer);
51 } else {
52 for (u32 line = 0; line < regs.line_count; ++line) {
53 const GPUVAddr dest_line = address + static_cast<size_t>(line) * regs.dest.pitch;
54 memory_manager.WriteBlockUnsafe(
55 dest_line, read_buffer.data() + static_cast<size_t>(line) * regs.line_length_in,
56 regs.line_length_in);
57 }
58 memory_manager.InvalidateRegion(address, regs.dest.pitch * regs.line_count);
59 }
40 } else { 60 } else {
41 UNIMPLEMENTED_IF(regs.dest.z != 0); 61 u32 width = regs.dest.width;
42 UNIMPLEMENTED_IF(regs.dest.depth != 1); 62 u32 x_elements = regs.line_length_in;
43 UNIMPLEMENTED_IF(regs.dest.BlockWidth() != 0); 63 u32 x_offset = regs.dest.x;
44 UNIMPLEMENTED_IF(regs.dest.BlockDepth() != 0); 64 const u32 bpp_shift = Common::FoldRight(
65 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
66 width, x_elements, x_offset, static_cast<u32>(address));
67 width >>= bpp_shift;
68 x_elements >>= bpp_shift;
69 x_offset >>= bpp_shift;
70 const u32 bytes_per_pixel = 1U << bpp_shift;
45 const std::size_t dst_size = Tegra::Texture::CalculateSize( 71 const std::size_t dst_size = Tegra::Texture::CalculateSize(
46 true, 1, regs.dest.width, regs.dest.height, 1, regs.dest.BlockHeight(), 0); 72 true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth,
73 regs.dest.BlockHeight(), regs.dest.BlockDepth());
47 tmp_buffer.resize(dst_size); 74 tmp_buffer.resize(dst_size);
48 memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size); 75 memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size);
49 Tegra::Texture::SwizzleKepler(regs.dest.width, regs.dest.height, regs.dest.x, regs.dest.y, 76 Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width,
50 regs.dest.BlockHeight(), copy_size, inner_buffer.data(), 77 regs.dest.height, regs.dest.depth, x_offset, regs.dest.y,
51 tmp_buffer.data()); 78 x_elements, regs.line_count, regs.dest.BlockHeight(),
79 regs.dest.BlockDepth(), regs.line_length_in);
52 memory_manager.WriteBlock(address, tmp_buffer.data(), dst_size); 80 memory_manager.WriteBlock(address, tmp_buffer.data(), dst_size);
53 } 81 }
54} 82}
diff --git a/src/video_core/engines/engine_upload.h b/src/video_core/engines/engine_upload.h
index 94ff3314a..f08f6e36a 100644
--- a/src/video_core/engines/engine_upload.h
+++ b/src/video_core/engines/engine_upload.h
@@ -3,6 +3,7 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <span>
6#include <vector> 7#include <vector>
7#include "common/bit_field.h" 8#include "common/bit_field.h"
8#include "common/common_types.h" 9#include "common/common_types.h"
@@ -33,7 +34,7 @@ struct Registers {
33 u32 width; 34 u32 width;
34 u32 height; 35 u32 height;
35 u32 depth; 36 u32 depth;
36 u32 z; 37 u32 layer;
37 u32 x; 38 u32 x;
38 u32 y; 39 u32 y;
39 40
@@ -62,11 +63,14 @@ public:
62 63
63 void ProcessExec(bool is_linear_); 64 void ProcessExec(bool is_linear_);
64 void ProcessData(u32 data, bool is_last_call); 65 void ProcessData(u32 data, bool is_last_call);
66 void ProcessData(const u32* data, size_t num_data);
65 67
66 /// Binds a rasterizer to this engine. 68 /// Binds a rasterizer to this engine.
67 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); 69 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
68 70
69private: 71private:
72 void ProcessData(std::span<const u8> read_buffer);
73
70 u32 write_offset = 0; 74 u32 write_offset = 0;
71 u32 copy_size = 0; 75 u32 copy_size = 0;
72 std::vector<u8> inner_buffer; 76 std::vector<u8> inner_buffer;
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 5db254d94..7c50bdbe0 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -36,8 +36,6 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal
36 } 36 }
37 case KEPLER_COMPUTE_REG_INDEX(data_upload): { 37 case KEPLER_COMPUTE_REG_INDEX(data_upload): {
38 upload_state.ProcessData(method_argument, is_last_call); 38 upload_state.ProcessData(method_argument, is_last_call);
39 if (is_last_call) {
40 }
41 break; 39 break;
42 } 40 }
43 case KEPLER_COMPUTE_REG_INDEX(launch): 41 case KEPLER_COMPUTE_REG_INDEX(launch):
@@ -50,8 +48,15 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal
50 48
51void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount, 49void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
52 u32 methods_pending) { 50 u32 methods_pending) {
53 for (std::size_t i = 0; i < amount; i++) { 51 switch (method) {
54 CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); 52 case KEPLER_COMPUTE_REG_INDEX(data_upload):
53 upload_state.ProcessData(base_start, static_cast<size_t>(amount));
54 return;
55 default:
56 for (std::size_t i = 0; i < amount; i++) {
57 CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
58 }
59 break;
55 } 60 }
56} 61}
57 62
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index e2b029542..a3fbab1e5 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -33,8 +33,6 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call
33 } 33 }
34 case KEPLERMEMORY_REG_INDEX(data): { 34 case KEPLERMEMORY_REG_INDEX(data): {
35 upload_state.ProcessData(method_argument, is_last_call); 35 upload_state.ProcessData(method_argument, is_last_call);
36 if (is_last_call) {
37 }
38 break; 36 break;
39 } 37 }
40 } 38 }
@@ -42,8 +40,15 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call
42 40
43void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount, 41void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
44 u32 methods_pending) { 42 u32 methods_pending) {
45 for (std::size_t i = 0; i < amount; i++) { 43 switch (method) {
46 CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); 44 case KEPLERMEMORY_REG_INDEX(data):
45 upload_state.ProcessData(base_start, static_cast<size_t>(amount));
46 return;
47 default:
48 for (std::size_t i = 0; i < amount; i++) {
49 CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
50 }
51 break;
47 } 52 }
48} 53}
49 54
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 3a4646289..3c6e44a25 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -219,6 +219,8 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
219 regs.index_array.count = regs.small_index_2.count; 219 regs.index_array.count = regs.small_index_2.count;
220 regs.index_array.first = regs.small_index_2.first; 220 regs.index_array.first = regs.small_index_2.first;
221 dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; 221 dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
222 // a macro calls this one over and over, should it increase instancing?
223 // Used by Hades and likely other Vulkan games.
222 return DrawArrays(); 224 return DrawArrays();
223 case MAXWELL3D_REG_INDEX(topology_override): 225 case MAXWELL3D_REG_INDEX(topology_override):
224 use_topology_override = true; 226 use_topology_override = true;
@@ -237,11 +239,12 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
237 return upload_state.ProcessExec(regs.exec_upload.linear != 0); 239 return upload_state.ProcessExec(regs.exec_upload.linear != 0);
238 case MAXWELL3D_REG_INDEX(data_upload): 240 case MAXWELL3D_REG_INDEX(data_upload):
239 upload_state.ProcessData(argument, is_last_call); 241 upload_state.ProcessData(argument, is_last_call);
240 if (is_last_call) {
241 }
242 return; 242 return;
243 case MAXWELL3D_REG_INDEX(fragment_barrier): 243 case MAXWELL3D_REG_INDEX(fragment_barrier):
244 return rasterizer->FragmentBarrier(); 244 return rasterizer->FragmentBarrier();
245 case MAXWELL3D_REG_INDEX(invalidate_texture_data_cache):
246 rasterizer->InvalidateGPUCache();
247 return rasterizer->WaitForIdle();
245 case MAXWELL3D_REG_INDEX(tiled_cache_barrier): 248 case MAXWELL3D_REG_INDEX(tiled_cache_barrier):
246 return rasterizer->TiledCacheBarrier(); 249 return rasterizer->TiledCacheBarrier();
247 } 250 }
@@ -311,6 +314,9 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
311 case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15: 314 case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15:
312 ProcessCBMultiData(base_start, amount); 315 ProcessCBMultiData(base_start, amount);
313 break; 316 break;
317 case MAXWELL3D_REG_INDEX(data_upload):
318 upload_state.ProcessData(base_start, static_cast<size_t>(amount));
319 return;
314 default: 320 default:
315 for (std::size_t i = 0; i < amount; i++) { 321 for (std::size_t i = 0; i < amount; i++) {
316 CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); 322 CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
@@ -447,18 +453,10 @@ void Maxwell3D::ProcessFirmwareCall4() {
447} 453}
448 454
449void Maxwell3D::StampQueryResult(u64 payload, bool long_query) { 455void Maxwell3D::StampQueryResult(u64 payload, bool long_query) {
450 struct LongQueryResult {
451 u64_le value;
452 u64_le timestamp;
453 };
454 static_assert(sizeof(LongQueryResult) == 16, "LongQueryResult has wrong size");
455 const GPUVAddr sequence_address{regs.query.QueryAddress()}; 456 const GPUVAddr sequence_address{regs.query.QueryAddress()};
456 if (long_query) { 457 if (long_query) {
457 // Write the 128-bit result structure in long mode. Note: We emulate an infinitely fast 458 memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
458 // GPU, this command may actually take a while to complete in real hardware due to GPU 459 memory_manager.Write<u64>(sequence_address, payload);
459 // wait queues.
460 LongQueryResult query_result{payload, system.GPU().GetTicks()};
461 memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result));
462 } else { 460 } else {
463 memory_manager.Write<u32>(sequence_address, static_cast<u32>(payload)); 461 memory_manager.Write<u32>(sequence_address, static_cast<u32>(payload));
464 } 462 }
@@ -472,10 +470,25 @@ void Maxwell3D::ProcessQueryGet() {
472 470
473 switch (regs.query.query_get.operation) { 471 switch (regs.query.query_get.operation) {
474 case Regs::QueryOperation::Release: 472 case Regs::QueryOperation::Release:
475 if (regs.query.query_get.fence == 1) { 473 if (regs.query.query_get.fence == 1 || regs.query.query_get.short_query != 0) {
476 rasterizer->SignalSemaphore(regs.query.QueryAddress(), regs.query.query_sequence); 474 const GPUVAddr sequence_address{regs.query.QueryAddress()};
475 const u32 payload = regs.query.query_sequence;
476 std::function<void()> operation([this, sequence_address, payload] {
477 memory_manager.Write<u32>(sequence_address, payload);
478 });
479 rasterizer->SignalFence(std::move(operation));
477 } else { 480 } else {
478 StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0); 481 struct LongQueryResult {
482 u64_le value;
483 u64_le timestamp;
484 };
485 const GPUVAddr sequence_address{regs.query.QueryAddress()};
486 const u32 payload = regs.query.query_sequence;
487 std::function<void()> operation([this, sequence_address, payload] {
488 memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
489 memory_manager.Write<u64>(sequence_address, payload);
490 });
491 rasterizer->SyncOperation(std::move(operation));
479 } 492 }
480 break; 493 break;
481 case Regs::QueryOperation::Acquire: 494 case Regs::QueryOperation::Acquire:
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 0efe58282..3909d36c1 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -1,6 +1,7 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "common/algorithm.h"
4#include "common/assert.h" 5#include "common/assert.h"
5#include "common/logging/log.h" 6#include "common/logging/log.h"
6#include "common/microprofile.h" 7#include "common/microprofile.h"
@@ -54,8 +55,6 @@ void MaxwellDMA::Launch() {
54 const LaunchDMA& launch = regs.launch_dma; 55 const LaunchDMA& launch = regs.launch_dma;
55 ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); 56 ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
56 ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); 57 ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
57 ASSERT(regs.dst_params.origin.x == 0);
58 ASSERT(regs.dst_params.origin.y == 0);
59 58
60 const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; 59 const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
61 const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH; 60 const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
@@ -121,23 +120,40 @@ void MaxwellDMA::CopyPitchToPitch() {
121 120
122void MaxwellDMA::CopyBlockLinearToPitch() { 121void MaxwellDMA::CopyBlockLinearToPitch() {
123 UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0); 122 UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0);
124 UNIMPLEMENTED_IF(regs.src_params.block_size.depth != 0);
125 UNIMPLEMENTED_IF(regs.src_params.layer != 0); 123 UNIMPLEMENTED_IF(regs.src_params.layer != 0);
126 124
125 const bool is_remapping = regs.launch_dma.remap_enable != 0;
126
127 // Optimized path for micro copies. 127 // Optimized path for micro copies.
128 const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count; 128 const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
129 if (dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X && 129 if (!is_remapping && dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X &&
130 regs.src_params.height > GOB_SIZE_Y) { 130 regs.src_params.height > GOB_SIZE_Y) {
131 FastCopyBlockLinearToPitch(); 131 FastCopyBlockLinearToPitch();
132 return; 132 return;
133 } 133 }
134 134
135 // Deswizzle the input and copy it over. 135 // Deswizzle the input and copy it over.
136 UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
137 const u32 bytes_per_pixel =
138 regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1;
139 const Parameters& src_params = regs.src_params; 136 const Parameters& src_params = regs.src_params;
140 const u32 width = src_params.width; 137
138 const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
139 const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
140
141 const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
142
143 u32 width = src_params.width;
144 u32 x_elements = regs.line_length_in;
145 u32 x_offset = src_params.origin.x;
146 u32 bpp_shift = 0U;
147 if (!is_remapping) {
148 bpp_shift = Common::FoldRight(
149 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
150 width, x_elements, x_offset, static_cast<u32>(regs.offset_in));
151 width >>= bpp_shift;
152 x_elements >>= bpp_shift;
153 x_offset >>= bpp_shift;
154 }
155
156 const u32 bytes_per_pixel = base_bpp << bpp_shift;
141 const u32 height = src_params.height; 157 const u32 height = src_params.height;
142 const u32 depth = src_params.depth; 158 const u32 depth = src_params.depth;
143 const u32 block_height = src_params.block_size.height; 159 const u32 block_height = src_params.block_size.height;
@@ -155,30 +171,45 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
155 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); 171 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
156 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); 172 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
157 173
158 UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, width, bytes_per_pixel, 174 UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
159 block_height, src_params.origin.x, src_params.origin.y, write_buffer.data(), 175 src_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
160 read_buffer.data()); 176 regs.pitch_out);
161 177
162 memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); 178 memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
163} 179}
164 180
165void MaxwellDMA::CopyPitchToBlockLinear() { 181void MaxwellDMA::CopyPitchToBlockLinear() {
166 UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one"); 182 UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one");
167 UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0); 183 UNIMPLEMENTED_IF(regs.dst_params.layer != 0);
184
185 const bool is_remapping = regs.launch_dma.remap_enable != 0;
186 const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
187 const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
168 188
169 const auto& dst_params = regs.dst_params; 189 const auto& dst_params = regs.dst_params;
170 const u32 bytes_per_pixel = 190
171 regs.launch_dma.remap_enable ? regs.pitch_in / regs.line_length_in : 1; 191 const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
172 const u32 width = dst_params.width; 192
193 u32 width = dst_params.width;
194 u32 x_elements = regs.line_length_in;
195 u32 x_offset = dst_params.origin.x;
196 u32 bpp_shift = 0U;
197 if (!is_remapping) {
198 bpp_shift = Common::FoldRight(
199 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
200 width, x_elements, x_offset, static_cast<u32>(regs.offset_out));
201 width >>= bpp_shift;
202 x_elements >>= bpp_shift;
203 x_offset >>= bpp_shift;
204 }
205
206 const u32 bytes_per_pixel = base_bpp << bpp_shift;
173 const u32 height = dst_params.height; 207 const u32 height = dst_params.height;
174 const u32 depth = dst_params.depth; 208 const u32 depth = dst_params.depth;
175 const u32 block_height = dst_params.block_size.height; 209 const u32 block_height = dst_params.block_size.height;
176 const u32 block_depth = dst_params.block_size.depth; 210 const u32 block_depth = dst_params.block_size.depth;
177 const size_t dst_size = 211 const size_t dst_size =
178 CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth); 212 CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
179 const size_t dst_layer_size =
180 CalculateSize(true, bytes_per_pixel, width, height, 1, block_height, block_depth);
181
182 const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count; 213 const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count;
183 214
184 if (read_buffer.size() < src_size) { 215 if (read_buffer.size() < src_size) {
@@ -188,32 +219,23 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
188 write_buffer.resize(dst_size); 219 write_buffer.resize(dst_size);
189 } 220 }
190 221
222 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
191 if (Settings::IsGPULevelExtreme()) { 223 if (Settings::IsGPULevelExtreme()) {
192 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
193 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); 224 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
194 } else { 225 } else {
195 memory_manager.ReadBlockUnsafe(regs.offset_in, read_buffer.data(), src_size);
196 memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); 226 memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
197 } 227 }
198 228
199 // If the input is linear and the output is tiled, swizzle the input and copy it over. 229 // If the input is linear and the output is tiled, swizzle the input and copy it over.
200 if (regs.dst_params.block_size.depth > 0) { 230 SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
201 ASSERT(dst_params.layer == 0); 231 dst_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
202 SwizzleSliceToVoxel(regs.line_length_in, regs.line_count, regs.pitch_in, width, height, 232 regs.pitch_in);
203 bytes_per_pixel, block_height, block_depth, dst_params.origin.x,
204 dst_params.origin.y, write_buffer.data(), read_buffer.data());
205 } else {
206 SwizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_in, width, bytes_per_pixel,
207 write_buffer.data() + dst_layer_size * dst_params.layer, read_buffer.data(),
208 block_height, dst_params.origin.x, dst_params.origin.y);
209 }
210 233
211 memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); 234 memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
212} 235}
213 236
214void MaxwellDMA::FastCopyBlockLinearToPitch() { 237void MaxwellDMA::FastCopyBlockLinearToPitch() {
215 const u32 bytes_per_pixel = 238 const u32 bytes_per_pixel = 1U;
216 regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1;
217 const size_t src_size = GOB_SIZE; 239 const size_t src_size = GOB_SIZE;
218 const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count; 240 const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
219 u32 pos_x = regs.src_params.origin.x; 241 u32 pos_x = regs.src_params.origin.x;
@@ -239,9 +261,10 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
239 memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); 261 memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
240 } 262 }
241 263
242 UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, regs.src_params.width, 264 UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, regs.src_params.width,
243 bytes_per_pixel, regs.src_params.block_size.height, pos_x, pos_y, 265 regs.src_params.height, 1, pos_x, pos_y, regs.line_length_in, regs.line_count,
244 write_buffer.data(), read_buffer.data()); 266 regs.src_params.block_size.height, regs.src_params.block_size.depth,
267 regs.pitch_out);
245 268
246 memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); 269 memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
247} 270}
@@ -249,16 +272,24 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
249void MaxwellDMA::ReleaseSemaphore() { 272void MaxwellDMA::ReleaseSemaphore() {
250 const auto type = regs.launch_dma.semaphore_type; 273 const auto type = regs.launch_dma.semaphore_type;
251 const GPUVAddr address = regs.semaphore.address; 274 const GPUVAddr address = regs.semaphore.address;
275 const u32 payload = regs.semaphore.payload;
252 switch (type) { 276 switch (type) {
253 case LaunchDMA::SemaphoreType::NONE: 277 case LaunchDMA::SemaphoreType::NONE:
254 break; 278 break;
255 case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: 279 case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: {
256 memory_manager.Write<u32>(address, regs.semaphore.payload); 280 std::function<void()> operation(
281 [this, address, payload] { memory_manager.Write<u32>(address, payload); });
282 rasterizer->SignalFence(std::move(operation));
257 break; 283 break;
258 case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: 284 }
259 memory_manager.Write<u64>(address, static_cast<u64>(regs.semaphore.payload)); 285 case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: {
260 memory_manager.Write<u64>(address + 8, system.GPU().GetTicks()); 286 std::function<void()> operation([this, address, payload] {
287 memory_manager.Write<u64>(address + sizeof(u64), system.GPU().GetTicks());
288 memory_manager.Write<u64>(address, payload);
289 });
290 rasterizer->SignalFence(std::move(operation));
261 break; 291 break;
292 }
262 default: 293 default:
263 ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value())); 294 ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value()));
264 } 295 }
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index 074bac92c..bc48320ce 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -189,10 +189,16 @@ public:
189 BitField<4, 3, Swizzle> dst_y; 189 BitField<4, 3, Swizzle> dst_y;
190 BitField<8, 3, Swizzle> dst_z; 190 BitField<8, 3, Swizzle> dst_z;
191 BitField<12, 3, Swizzle> dst_w; 191 BitField<12, 3, Swizzle> dst_w;
192 BitField<0, 12, u32> dst_components_raw;
192 BitField<16, 2, u32> component_size_minus_one; 193 BitField<16, 2, u32> component_size_minus_one;
193 BitField<20, 2, u32> num_src_components_minus_one; 194 BitField<20, 2, u32> num_src_components_minus_one;
194 BitField<24, 2, u32> num_dst_components_minus_one; 195 BitField<24, 2, u32> num_dst_components_minus_one;
195 }; 196 };
197
198 Swizzle GetComponent(size_t i) const {
199 const u32 raw = dst_components_raw;
200 return static_cast<Swizzle>((raw >> (i * 3)) & 0x7);
201 }
196 }; 202 };
197 static_assert(sizeof(RemapConst) == 12); 203 static_assert(sizeof(RemapConst) == 12);
198 204
diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp
new file mode 100644
index 000000000..cca890792
--- /dev/null
+++ b/src/video_core/engines/puller.cpp
@@ -0,0 +1,306 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "common/assert.h"
5#include "common/logging/log.h"
6#include "common/settings.h"
7#include "core/core.h"
8#include "video_core/control/channel_state.h"
9#include "video_core/dma_pusher.h"
10#include "video_core/engines/fermi_2d.h"
11#include "video_core/engines/kepler_compute.h"
12#include "video_core/engines/kepler_memory.h"
13#include "video_core/engines/maxwell_3d.h"
14#include "video_core/engines/maxwell_dma.h"
15#include "video_core/engines/puller.h"
16#include "video_core/gpu.h"
17#include "video_core/memory_manager.h"
18#include "video_core/rasterizer_interface.h"
19
20namespace Tegra::Engines {
21
22Puller::Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher_,
23 Control::ChannelState& channel_state_)
24 : gpu{gpu_}, memory_manager{memory_manager_}, dma_pusher{dma_pusher_}, channel_state{
25 channel_state_} {}
26
27Puller::~Puller() = default;
28
29void Puller::ProcessBindMethod(const MethodCall& method_call) {
30 // Bind the current subchannel to the desired engine id.
31 LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
32 method_call.argument);
33 const auto engine_id = static_cast<EngineID>(method_call.argument);
34 bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id);
35 switch (engine_id) {
36 case EngineID::FERMI_TWOD_A:
37 dma_pusher.BindSubchannel(channel_state.fermi_2d.get(), method_call.subchannel);
38 break;
39 case EngineID::MAXWELL_B:
40 dma_pusher.BindSubchannel(channel_state.maxwell_3d.get(), method_call.subchannel);
41 break;
42 case EngineID::KEPLER_COMPUTE_B:
43 dma_pusher.BindSubchannel(channel_state.kepler_compute.get(), method_call.subchannel);
44 break;
45 case EngineID::MAXWELL_DMA_COPY_A:
46 dma_pusher.BindSubchannel(channel_state.maxwell_dma.get(), method_call.subchannel);
47 break;
48 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
49 dma_pusher.BindSubchannel(channel_state.kepler_memory.get(), method_call.subchannel);
50 break;
51 default:
52 UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id);
53 }
54}
55
56void Puller::ProcessFenceActionMethod() {
57 switch (regs.fence_action.op) {
58 case Puller::FenceOperation::Acquire:
59 // UNIMPLEMENTED_MSG("Channel Scheduling pending.");
60 // WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
61 rasterizer->ReleaseFences();
62 break;
63 case Puller::FenceOperation::Increment:
64 rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id);
65 break;
66 default:
67 UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value());
68 }
69}
70
71void Puller::ProcessSemaphoreTriggerMethod() {
72 const auto semaphoreOperationMask = 0xF;
73 const auto op =
74 static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask);
75 if (op == GpuSemaphoreOperation::WriteLong) {
76 const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
77 const u32 payload = regs.semaphore_sequence;
78 std::function<void()> operation([this, sequence_address, payload] {
79 memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks());
80 memory_manager.Write<u64>(sequence_address, payload);
81 });
82 rasterizer->SignalFence(std::move(operation));
83 } else {
84 do {
85 const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())};
86 regs.acquire_source = true;
87 regs.acquire_value = regs.semaphore_sequence;
88 if (op == GpuSemaphoreOperation::AcquireEqual) {
89 regs.acquire_active = true;
90 regs.acquire_mode = false;
91 if (word != regs.acquire_value) {
92 rasterizer->ReleaseFences();
93 continue;
94 }
95 } else if (op == GpuSemaphoreOperation::AcquireGequal) {
96 regs.acquire_active = true;
97 regs.acquire_mode = true;
98 if (word < regs.acquire_value) {
99 rasterizer->ReleaseFences();
100 continue;
101 }
102 } else if (op == GpuSemaphoreOperation::AcquireMask) {
103 if (word && regs.semaphore_sequence == 0) {
104 rasterizer->ReleaseFences();
105 continue;
106 }
107 } else {
108 LOG_ERROR(HW_GPU, "Invalid semaphore operation");
109 }
110 } while (false);
111 }
112}
113
114void Puller::ProcessSemaphoreRelease() {
115 const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
116 const u32 payload = regs.semaphore_release;
117 std::function<void()> operation([this, sequence_address, payload] {
118 memory_manager.Write<u32>(sequence_address, payload);
119 });
120 rasterizer->SyncOperation(std::move(operation));
121}
122
123void Puller::ProcessSemaphoreAcquire() {
124 u32 word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress());
125 const auto value = regs.semaphore_acquire;
126 while (word != value) {
127 regs.acquire_active = true;
128 regs.acquire_value = value;
129 std::this_thread::sleep_for(std::chrono::milliseconds(1));
130 rasterizer->ReleaseFences();
131 word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress());
132 // TODO(kemathe73) figure out how to do the acquire_timeout
133 regs.acquire_mode = false;
134 regs.acquire_source = false;
135 }
136}
137
138/// Calls a GPU puller method.
139void Puller::CallPullerMethod(const MethodCall& method_call) {
140 regs.reg_array[method_call.method] = method_call.argument;
141 const auto method = static_cast<BufferMethods>(method_call.method);
142
143 switch (method) {
144 case BufferMethods::BindObject: {
145 ProcessBindMethod(method_call);
146 break;
147 }
148 case BufferMethods::Nop:
149 case BufferMethods::SemaphoreAddressHigh:
150 case BufferMethods::SemaphoreAddressLow:
151 case BufferMethods::SemaphoreSequencePayload:
152 case BufferMethods::SyncpointPayload:
153 break;
154 case BufferMethods::WrcacheFlush:
155 case BufferMethods::RefCnt:
156 rasterizer->SignalReference();
157 break;
158 case BufferMethods::SyncpointOperation:
159 ProcessFenceActionMethod();
160 break;
161 case BufferMethods::WaitForIdle:
162 rasterizer->WaitForIdle();
163 break;
164 case BufferMethods::SemaphoreOperation: {
165 ProcessSemaphoreTriggerMethod();
166 break;
167 }
168 case BufferMethods::NonStallInterrupt: {
169 LOG_ERROR(HW_GPU, "Special puller engine method NonStallInterrupt not implemented");
170 break;
171 }
172 case BufferMethods::MemOpA: {
173 LOG_ERROR(HW_GPU, "Memory Operation A");
174 break;
175 }
176 case BufferMethods::MemOpB: {
177 // Implement this better.
178 rasterizer->InvalidateGPUCache();
179 break;
180 }
181 case BufferMethods::MemOpC:
182 case BufferMethods::MemOpD: {
183 LOG_ERROR(HW_GPU, "Memory Operation C,D");
184 break;
185 }
186 case BufferMethods::SemaphoreAcquire: {
187 ProcessSemaphoreAcquire();
188 break;
189 }
190 case BufferMethods::SemaphoreRelease: {
191 ProcessSemaphoreRelease();
192 break;
193 }
194 case BufferMethods::Yield: {
195 // TODO(Kmather73): Research and implement this method.
196 LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented");
197 break;
198 }
199 default:
200 LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method);
201 break;
202 }
203}
204
205/// Calls a GPU engine method.
206void Puller::CallEngineMethod(const MethodCall& method_call) {
207 const EngineID engine = bound_engines[method_call.subchannel];
208
209 switch (engine) {
210 case EngineID::FERMI_TWOD_A:
211 channel_state.fermi_2d->CallMethod(method_call.method, method_call.argument,
212 method_call.IsLastCall());
213 break;
214 case EngineID::MAXWELL_B:
215 channel_state.maxwell_3d->CallMethod(method_call.method, method_call.argument,
216 method_call.IsLastCall());
217 break;
218 case EngineID::KEPLER_COMPUTE_B:
219 channel_state.kepler_compute->CallMethod(method_call.method, method_call.argument,
220 method_call.IsLastCall());
221 break;
222 case EngineID::MAXWELL_DMA_COPY_A:
223 channel_state.maxwell_dma->CallMethod(method_call.method, method_call.argument,
224 method_call.IsLastCall());
225 break;
226 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
227 channel_state.kepler_memory->CallMethod(method_call.method, method_call.argument,
228 method_call.IsLastCall());
229 break;
230 default:
231 UNIMPLEMENTED_MSG("Unimplemented engine");
232 }
233}
234
235/// Calls a GPU engine multivalue method.
236void Puller::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
237 u32 methods_pending) {
238 const EngineID engine = bound_engines[subchannel];
239
240 switch (engine) {
241 case EngineID::FERMI_TWOD_A:
242 channel_state.fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
243 break;
244 case EngineID::MAXWELL_B:
245 channel_state.maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
246 break;
247 case EngineID::KEPLER_COMPUTE_B:
248 channel_state.kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
249 break;
250 case EngineID::MAXWELL_DMA_COPY_A:
251 channel_state.maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
252 break;
253 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
254 channel_state.kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
255 break;
256 default:
257 UNIMPLEMENTED_MSG("Unimplemented engine");
258 }
259}
260
261/// Calls a GPU method.
262void Puller::CallMethod(const MethodCall& method_call) {
263 LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method,
264 method_call.subchannel);
265
266 ASSERT(method_call.subchannel < bound_engines.size());
267
268 if (ExecuteMethodOnEngine(method_call.method)) {
269 CallEngineMethod(method_call);
270 } else {
271 CallPullerMethod(method_call);
272 }
273}
274
275/// Calls a GPU multivalue method.
276void Puller::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
277 u32 methods_pending) {
278 LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
279
280 ASSERT(subchannel < bound_engines.size());
281
282 if (ExecuteMethodOnEngine(method)) {
283 CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
284 } else {
285 for (std::size_t i = 0; i < amount; i++) {
286 CallPullerMethod(MethodCall{
287 method,
288 base_start[i],
289 subchannel,
290 methods_pending - static_cast<u32>(i),
291 });
292 }
293 }
294}
295
296void Puller::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
297 rasterizer = rasterizer_;
298}
299
300/// Determines where the method should be executed.
301[[nodiscard]] bool Puller::ExecuteMethodOnEngine(u32 method) {
302 const auto buffer_method = static_cast<BufferMethods>(method);
303 return buffer_method >= BufferMethods::NonPullerMethods;
304}
305
306} // namespace Tegra::Engines
diff --git a/src/video_core/engines/puller.h b/src/video_core/engines/puller.h
new file mode 100644
index 000000000..d4175ee94
--- /dev/null
+++ b/src/video_core/engines/puller.h
@@ -0,0 +1,177 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include <array>
7#include <cstddef>
8#include <vector>
9#include "common/bit_field.h"
10#include "common/common_funcs.h"
11#include "common/common_types.h"
12#include "video_core/engines/engine_interface.h"
13
14namespace Core {
15class System;
16}
17
18namespace Tegra {
19class MemoryManager;
20class DmaPusher;
21
22enum class EngineID {
23 FERMI_TWOD_A = 0x902D, // 2D Engine
24 MAXWELL_B = 0xB197, // 3D Engine
25 KEPLER_COMPUTE_B = 0xB1C0,
26 KEPLER_INLINE_TO_MEMORY_B = 0xA140,
27 MAXWELL_DMA_COPY_A = 0xB0B5,
28};
29
30namespace Control {
31struct ChannelState;
32}
33} // namespace Tegra
34
35namespace VideoCore {
36class RasterizerInterface;
37}
38
39namespace Tegra::Engines {
40
41class Puller final {
42public:
43 struct MethodCall {
44 u32 method{};
45 u32 argument{};
46 u32 subchannel{};
47 u32 method_count{};
48
49 explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0)
50 : method(method_), argument(argument_), subchannel(subchannel_),
51 method_count(method_count_) {}
52
53 [[nodiscard]] bool IsLastCall() const {
54 return method_count <= 1;
55 }
56 };
57
58 enum class FenceOperation : u32 {
59 Acquire = 0,
60 Increment = 1,
61 };
62
63 union FenceAction {
64 u32 raw;
65 BitField<0, 1, FenceOperation> op;
66 BitField<8, 24, u32> syncpoint_id;
67 };
68
69 explicit Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher,
70 Control::ChannelState& channel_state);
71 ~Puller();
72
73 void CallMethod(const MethodCall& method_call);
74
75 void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
76 u32 methods_pending);
77
78 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
79
80 void CallPullerMethod(const MethodCall& method_call);
81
82 void CallEngineMethod(const MethodCall& method_call);
83
84 void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
85 u32 methods_pending);
86
87private:
88 Tegra::GPU& gpu;
89
90 MemoryManager& memory_manager;
91 DmaPusher& dma_pusher;
92 Control::ChannelState& channel_state;
93 VideoCore::RasterizerInterface* rasterizer = nullptr;
94
95 static constexpr std::size_t NUM_REGS = 0x800;
96 struct Regs {
97 static constexpr size_t NUM_REGS = 0x40;
98
99 union {
100 struct {
101 INSERT_PADDING_WORDS_NOINIT(0x4);
102 struct {
103 u32 address_high;
104 u32 address_low;
105
106 [[nodiscard]] GPUVAddr SemaphoreAddress() const {
107 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
108 address_low);
109 }
110 } semaphore_address;
111
112 u32 semaphore_sequence;
113 u32 semaphore_trigger;
114 INSERT_PADDING_WORDS_NOINIT(0xC);
115
116 // The pusher and the puller share the reference counter, the pusher only has read
117 // access
118 u32 reference_count;
119 INSERT_PADDING_WORDS_NOINIT(0x5);
120
121 u32 semaphore_acquire;
122 u32 semaphore_release;
123 u32 fence_value;
124 FenceAction fence_action;
125 INSERT_PADDING_WORDS_NOINIT(0xE2);
126
127 // Puller state
128 u32 acquire_mode;
129 u32 acquire_source;
130 u32 acquire_active;
131 u32 acquire_timeout;
132 u32 acquire_value;
133 };
134 std::array<u32, NUM_REGS> reg_array;
135 };
136 } regs{};
137
138 void ProcessBindMethod(const MethodCall& method_call);
139 void ProcessFenceActionMethod();
140 void ProcessSemaphoreAcquire();
141 void ProcessSemaphoreRelease();
142 void ProcessSemaphoreTriggerMethod();
143 [[nodiscard]] bool ExecuteMethodOnEngine(u32 method);
144
145 /// Mapping of command subchannels to their bound engine ids
146 std::array<EngineID, 8> bound_engines{};
147
148 enum class GpuSemaphoreOperation {
149 AcquireEqual = 0x1,
150 WriteLong = 0x2,
151 AcquireGequal = 0x4,
152 AcquireMask = 0x8,
153 };
154
155#define ASSERT_REG_POSITION(field_name, position) \
156 static_assert(offsetof(Regs, field_name) == position * 4, \
157 "Field " #field_name " has invalid position")
158
159 ASSERT_REG_POSITION(semaphore_address, 0x4);
160 ASSERT_REG_POSITION(semaphore_sequence, 0x6);
161 ASSERT_REG_POSITION(semaphore_trigger, 0x7);
162 ASSERT_REG_POSITION(reference_count, 0x14);
163 ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
164 ASSERT_REG_POSITION(semaphore_release, 0x1B);
165 ASSERT_REG_POSITION(fence_value, 0x1C);
166 ASSERT_REG_POSITION(fence_action, 0x1D);
167
168 ASSERT_REG_POSITION(acquire_mode, 0x100);
169 ASSERT_REG_POSITION(acquire_source, 0x101);
170 ASSERT_REG_POSITION(acquire_active, 0x102);
171 ASSERT_REG_POSITION(acquire_timeout, 0x103);
172 ASSERT_REG_POSITION(acquire_value, 0x104);
173
174#undef ASSERT_REG_POSITION
175};
176
177} // namespace Tegra::Engines
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
index 1e9832ddd..c390ac91b 100644
--- a/src/video_core/fence_manager.h
+++ b/src/video_core/fence_manager.h
@@ -4,40 +4,24 @@
4#pragma once 4#pragma once
5 5
6#include <algorithm> 6#include <algorithm>
7#include <cstring>
8#include <deque>
9#include <functional>
10#include <memory>
7#include <queue> 11#include <queue>
8 12
9#include "common/common_types.h" 13#include "common/common_types.h"
10#include "video_core/delayed_destruction_ring.h" 14#include "video_core/delayed_destruction_ring.h"
11#include "video_core/gpu.h" 15#include "video_core/gpu.h"
12#include "video_core/memory_manager.h" 16#include "video_core/host1x/host1x.h"
17#include "video_core/host1x/syncpoint_manager.h"
13#include "video_core/rasterizer_interface.h" 18#include "video_core/rasterizer_interface.h"
14 19
15namespace VideoCommon { 20namespace VideoCommon {
16 21
17class FenceBase { 22class FenceBase {
18public: 23public:
19 explicit FenceBase(u32 payload_, bool is_stubbed_) 24 explicit FenceBase(bool is_stubbed_) : is_stubbed{is_stubbed_} {}
20 : address{}, payload{payload_}, is_semaphore{false}, is_stubbed{is_stubbed_} {}
21
22 explicit FenceBase(GPUVAddr address_, u32 payload_, bool is_stubbed_)
23 : address{address_}, payload{payload_}, is_semaphore{true}, is_stubbed{is_stubbed_} {}
24
25 GPUVAddr GetAddress() const {
26 return address;
27 }
28
29 u32 GetPayload() const {
30 return payload;
31 }
32
33 bool IsSemaphore() const {
34 return is_semaphore;
35 }
36
37private:
38 GPUVAddr address;
39 u32 payload;
40 bool is_semaphore;
41 25
42protected: 26protected:
43 bool is_stubbed; 27 bool is_stubbed;
@@ -57,30 +41,28 @@ public:
57 buffer_cache.AccumulateFlushes(); 41 buffer_cache.AccumulateFlushes();
58 } 42 }
59 43
60 void SignalSemaphore(GPUVAddr addr, u32 value) { 44 void SyncOperation(std::function<void()>&& func) {
45 uncommitted_operations.emplace_back(std::move(func));
46 }
47
48 void SignalFence(std::function<void()>&& func) {
61 TryReleasePendingFences(); 49 TryReleasePendingFences();
62 const bool should_flush = ShouldFlush(); 50 const bool should_flush = ShouldFlush();
63 CommitAsyncFlushes(); 51 CommitAsyncFlushes();
64 TFence new_fence = CreateFence(addr, value, !should_flush); 52 uncommitted_operations.emplace_back(std::move(func));
53 CommitOperations();
54 TFence new_fence = CreateFence(!should_flush);
65 fences.push(new_fence); 55 fences.push(new_fence);
66 QueueFence(new_fence); 56 QueueFence(new_fence);
67 if (should_flush) { 57 if (should_flush) {
68 rasterizer.FlushCommands(); 58 rasterizer.FlushCommands();
69 } 59 }
70 rasterizer.SyncGuestHost();
71 } 60 }
72 61
73 void SignalSyncPoint(u32 value) { 62 void SignalSyncPoint(u32 value) {
74 TryReleasePendingFences(); 63 syncpoint_manager.IncrementGuest(value);
75 const bool should_flush = ShouldFlush(); 64 std::function<void()> func([this, value] { syncpoint_manager.IncrementHost(value); });
76 CommitAsyncFlushes(); 65 SignalFence(std::move(func));
77 TFence new_fence = CreateFence(value, !should_flush);
78 fences.push(new_fence);
79 QueueFence(new_fence);
80 if (should_flush) {
81 rasterizer.FlushCommands();
82 }
83 rasterizer.SyncGuestHost();
84 } 66 }
85 67
86 void WaitPendingFences() { 68 void WaitPendingFences() {
@@ -90,11 +72,10 @@ public:
90 WaitFence(current_fence); 72 WaitFence(current_fence);
91 } 73 }
92 PopAsyncFlushes(); 74 PopAsyncFlushes();
93 if (current_fence->IsSemaphore()) { 75 auto operations = std::move(pending_operations.front());
94 gpu_memory.template Write<u32>(current_fence->GetAddress(), 76 pending_operations.pop_front();
95 current_fence->GetPayload()); 77 for (auto& operation : operations) {
96 } else { 78 operation();
97 gpu.IncrementSyncPoint(current_fence->GetPayload());
98 } 79 }
99 PopFence(); 80 PopFence();
100 } 81 }
@@ -104,16 +85,14 @@ protected:
104 explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, 85 explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
105 TTextureCache& texture_cache_, TTBufferCache& buffer_cache_, 86 TTextureCache& texture_cache_, TTBufferCache& buffer_cache_,
106 TQueryCache& query_cache_) 87 TQueryCache& query_cache_)
107 : rasterizer{rasterizer_}, gpu{gpu_}, gpu_memory{gpu.MemoryManager()}, 88 : rasterizer{rasterizer_}, gpu{gpu_}, syncpoint_manager{gpu.Host1x().GetSyncpointManager()},
108 texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {} 89 texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {}
109 90
110 virtual ~FenceManager() = default; 91 virtual ~FenceManager() = default;
111 92
112 /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is 93 /// Creates a Fence Interface, does not create a backend fence if 'is_stubbed' is
113 /// true 94 /// true
114 virtual TFence CreateFence(u32 value, bool is_stubbed) = 0; 95 virtual TFence CreateFence(bool is_stubbed) = 0;
115 /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true
116 virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0;
117 /// Queues a fence into the backend if the fence isn't stubbed. 96 /// Queues a fence into the backend if the fence isn't stubbed.
118 virtual void QueueFence(TFence& fence) = 0; 97 virtual void QueueFence(TFence& fence) = 0;
119 /// Notifies that the backend fence has been signaled/reached in host GPU. 98 /// Notifies that the backend fence has been signaled/reached in host GPU.
@@ -123,7 +102,7 @@ protected:
123 102
124 VideoCore::RasterizerInterface& rasterizer; 103 VideoCore::RasterizerInterface& rasterizer;
125 Tegra::GPU& gpu; 104 Tegra::GPU& gpu;
126 Tegra::MemoryManager& gpu_memory; 105 Tegra::Host1x::SyncpointManager& syncpoint_manager;
127 TTextureCache& texture_cache; 106 TTextureCache& texture_cache;
128 TTBufferCache& buffer_cache; 107 TTBufferCache& buffer_cache;
129 TQueryCache& query_cache; 108 TQueryCache& query_cache;
@@ -136,11 +115,10 @@ private:
136 return; 115 return;
137 } 116 }
138 PopAsyncFlushes(); 117 PopAsyncFlushes();
139 if (current_fence->IsSemaphore()) { 118 auto operations = std::move(pending_operations.front());
140 gpu_memory.template Write<u32>(current_fence->GetAddress(), 119 pending_operations.pop_front();
141 current_fence->GetPayload()); 120 for (auto& operation : operations) {
142 } else { 121 operation();
143 gpu.IncrementSyncPoint(current_fence->GetPayload());
144 } 122 }
145 PopFence(); 123 PopFence();
146 } 124 }
@@ -159,16 +137,20 @@ private:
159 } 137 }
160 138
161 void PopAsyncFlushes() { 139 void PopAsyncFlushes() {
162 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 140 {
163 texture_cache.PopAsyncFlushes(); 141 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
164 buffer_cache.PopAsyncFlushes(); 142 texture_cache.PopAsyncFlushes();
143 buffer_cache.PopAsyncFlushes();
144 }
165 query_cache.PopAsyncFlushes(); 145 query_cache.PopAsyncFlushes();
166 } 146 }
167 147
168 void CommitAsyncFlushes() { 148 void CommitAsyncFlushes() {
169 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 149 {
170 texture_cache.CommitAsyncFlushes(); 150 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
171 buffer_cache.CommitAsyncFlushes(); 151 texture_cache.CommitAsyncFlushes();
152 buffer_cache.CommitAsyncFlushes();
153 }
172 query_cache.CommitAsyncFlushes(); 154 query_cache.CommitAsyncFlushes();
173 } 155 }
174 156
@@ -177,7 +159,13 @@ private:
177 fences.pop(); 159 fences.pop();
178 } 160 }
179 161
162 void CommitOperations() {
163 pending_operations.emplace_back(std::move(uncommitted_operations));
164 }
165
180 std::queue<TFence> fences; 166 std::queue<TFence> fences;
167 std::deque<std::function<void()>> uncommitted_operations;
168 std::deque<std::deque<std::function<void()>>> pending_operations;
181 169
182 DelayedDestructionRing<TFence, 6> delayed_destruction_ring; 170 DelayedDestructionRing<TFence, 6> delayed_destruction_ring;
183}; 171};
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 33431f2a0..28b38273e 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -14,10 +14,11 @@
14#include "core/core.h" 14#include "core/core.h"
15#include "core/core_timing.h" 15#include "core/core_timing.h"
16#include "core/frontend/emu_window.h" 16#include "core/frontend/emu_window.h"
17#include "core/hardware_interrupt_manager.h"
18#include "core/hle/service/nvdrv/nvdata.h" 17#include "core/hle/service/nvdrv/nvdata.h"
19#include "core/perf_stats.h" 18#include "core/perf_stats.h"
20#include "video_core/cdma_pusher.h" 19#include "video_core/cdma_pusher.h"
20#include "video_core/control/channel_state.h"
21#include "video_core/control/scheduler.h"
21#include "video_core/dma_pusher.h" 22#include "video_core/dma_pusher.h"
22#include "video_core/engines/fermi_2d.h" 23#include "video_core/engines/fermi_2d.h"
23#include "video_core/engines/kepler_compute.h" 24#include "video_core/engines/kepler_compute.h"
@@ -26,75 +27,64 @@
26#include "video_core/engines/maxwell_dma.h" 27#include "video_core/engines/maxwell_dma.h"
27#include "video_core/gpu.h" 28#include "video_core/gpu.h"
28#include "video_core/gpu_thread.h" 29#include "video_core/gpu_thread.h"
30#include "video_core/host1x/host1x.h"
31#include "video_core/host1x/syncpoint_manager.h"
29#include "video_core/memory_manager.h" 32#include "video_core/memory_manager.h"
30#include "video_core/renderer_base.h" 33#include "video_core/renderer_base.h"
31#include "video_core/shader_notify.h" 34#include "video_core/shader_notify.h"
32 35
33namespace Tegra { 36namespace Tegra {
34 37
35MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
36
37struct GPU::Impl { 38struct GPU::Impl {
38 explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_) 39 explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_)
39 : gpu{gpu_}, system{system_}, memory_manager{std::make_unique<Tegra::MemoryManager>( 40 : gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_},
40 system)},
41 dma_pusher{std::make_unique<Tegra::DmaPusher>(system, gpu)}, use_nvdec{use_nvdec_},
42 maxwell_3d{std::make_unique<Engines::Maxwell3D>(system, *memory_manager)},
43 fermi_2d{std::make_unique<Engines::Fermi2D>()},
44 kepler_compute{std::make_unique<Engines::KeplerCompute>(system, *memory_manager)},
45 maxwell_dma{std::make_unique<Engines::MaxwellDMA>(system, *memory_manager)},
46 kepler_memory{std::make_unique<Engines::KeplerMemory>(system, *memory_manager)},
47 shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_}, 41 shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_},
48 gpu_thread{system_, is_async_} {} 42 gpu_thread{system_, is_async_}, scheduler{std::make_unique<Control::Scheduler>(gpu)} {}
49 43
50 ~Impl() = default; 44 ~Impl() = default;
51 45
52 /// Binds a renderer to the GPU. 46 std::shared_ptr<Control::ChannelState> CreateChannel(s32 channel_id) {
53 void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) { 47 auto channel_state = std::make_shared<Tegra::Control::ChannelState>(channel_id);
54 renderer = std::move(renderer_); 48 channels.emplace(channel_id, channel_state);
55 rasterizer = renderer->ReadRasterizer(); 49 scheduler->DeclareChannel(channel_state);
56 50 return channel_state;
57 memory_manager->BindRasterizer(rasterizer);
58 maxwell_3d->BindRasterizer(rasterizer);
59 fermi_2d->BindRasterizer(rasterizer);
60 kepler_compute->BindRasterizer(rasterizer);
61 kepler_memory->BindRasterizer(rasterizer);
62 maxwell_dma->BindRasterizer(rasterizer);
63 } 51 }
64 52
65 /// Calls a GPU method. 53 void BindChannel(s32 channel_id) {
66 void CallMethod(const GPU::MethodCall& method_call) { 54 if (bound_channel == channel_id) {
67 LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method, 55 return;
68 method_call.subchannel); 56 }
57 auto it = channels.find(channel_id);
58 ASSERT(it != channels.end());
59 bound_channel = channel_id;
60 current_channel = it->second.get();
69 61
70 ASSERT(method_call.subchannel < bound_engines.size()); 62 rasterizer->BindChannel(*current_channel);
63 }
71 64
72 if (ExecuteMethodOnEngine(method_call.method)) { 65 std::shared_ptr<Control::ChannelState> AllocateChannel() {
73 CallEngineMethod(method_call); 66 return CreateChannel(new_channel_id++);
74 } else {
75 CallPullerMethod(method_call);
76 }
77 } 67 }
78 68
79 /// Calls a GPU multivalue method. 69 void InitChannel(Control::ChannelState& to_init) {
80 void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, 70 to_init.Init(system, gpu);
81 u32 methods_pending) { 71 to_init.BindRasterizer(rasterizer);
82 LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel); 72 rasterizer->InitializeChannel(to_init);
73 }
83 74
84 ASSERT(subchannel < bound_engines.size()); 75 void InitAddressSpace(Tegra::MemoryManager& memory_manager) {
76 memory_manager.BindRasterizer(rasterizer);
77 }
85 78
86 if (ExecuteMethodOnEngine(method)) { 79 void ReleaseChannel(Control::ChannelState& to_release) {
87 CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending); 80 UNIMPLEMENTED();
88 } else { 81 }
89 for (std::size_t i = 0; i < amount; i++) { 82
90 CallPullerMethod(GPU::MethodCall{ 83 /// Binds a renderer to the GPU.
91 method, 84 void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) {
92 base_start[i], 85 renderer = std::move(renderer_);
93 subchannel, 86 rasterizer = renderer->ReadRasterizer();
94 methods_pending - static_cast<u32>(i), 87 host1x.MemoryManager().BindRasterizer(rasterizer);
95 });
96 }
97 }
98 } 88 }
99 89
100 /// Flush all current written commands into the host GPU for execution. 90 /// Flush all current written commands into the host GPU for execution.
@@ -103,85 +93,82 @@ struct GPU::Impl {
103 } 93 }
104 94
105 /// Synchronizes CPU writes with Host GPU memory. 95 /// Synchronizes CPU writes with Host GPU memory.
106 void SyncGuestHost() { 96 void InvalidateGPUCache() {
107 rasterizer->SyncGuestHost(); 97 rasterizer->InvalidateGPUCache();
108 } 98 }
109 99
110 /// Signal the ending of command list. 100 /// Signal the ending of command list.
111 void OnCommandListEnd() { 101 void OnCommandListEnd() {
112 if (is_async) { 102 gpu_thread.OnCommandListEnd();
113 // This command only applies to asynchronous GPU mode
114 gpu_thread.OnCommandListEnd();
115 }
116 } 103 }
117 104
118 /// Request a host GPU memory flush from the CPU. 105 /// Request a host GPU memory flush from the CPU.
119 [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size) { 106 template <typename Func>
120 std::unique_lock lck{flush_request_mutex}; 107 [[nodiscard]] u64 RequestSyncOperation(Func&& action) {
121 const u64 fence = ++last_flush_fence; 108 std::unique_lock lck{sync_request_mutex};
122 flush_requests.emplace_back(fence, addr, size); 109 const u64 fence = ++last_sync_fence;
110 sync_requests.emplace_back(action);
123 return fence; 111 return fence;
124 } 112 }
125 113
126 /// Obtains current flush request fence id. 114 /// Obtains current flush request fence id.
127 [[nodiscard]] u64 CurrentFlushRequestFence() const { 115 [[nodiscard]] u64 CurrentSyncRequestFence() const {
128 return current_flush_fence.load(std::memory_order_relaxed); 116 return current_sync_fence.load(std::memory_order_relaxed);
117 }
118
119 void WaitForSyncOperation(const u64 fence) {
120 std::unique_lock lck{sync_request_mutex};
121 sync_request_cv.wait(lck, [this, fence] { return CurrentSyncRequestFence() >= fence; });
129 } 122 }
130 123
131 /// Tick pending requests within the GPU. 124 /// Tick pending requests within the GPU.
132 void TickWork() { 125 void TickWork() {
133 std::unique_lock lck{flush_request_mutex}; 126 std::unique_lock lck{sync_request_mutex};
134 while (!flush_requests.empty()) { 127 while (!sync_requests.empty()) {
135 auto& request = flush_requests.front(); 128 auto request = std::move(sync_requests.front());
136 const u64 fence = request.fence; 129 sync_requests.pop_front();
137 const VAddr addr = request.addr; 130 sync_request_mutex.unlock();
138 const std::size_t size = request.size; 131 request();
139 flush_requests.pop_front(); 132 current_sync_fence.fetch_add(1, std::memory_order_release);
140 flush_request_mutex.unlock(); 133 sync_request_mutex.lock();
141 rasterizer->FlushRegion(addr, size); 134 sync_request_cv.notify_all();
142 current_flush_fence.store(fence);
143 flush_request_mutex.lock();
144 } 135 }
145 } 136 }
146 137
147 /// Returns a reference to the Maxwell3D GPU engine. 138 /// Returns a reference to the Maxwell3D GPU engine.
148 [[nodiscard]] Engines::Maxwell3D& Maxwell3D() { 139 [[nodiscard]] Engines::Maxwell3D& Maxwell3D() {
149 return *maxwell_3d; 140 ASSERT(current_channel);
141 return *current_channel->maxwell_3d;
150 } 142 }
151 143
152 /// Returns a const reference to the Maxwell3D GPU engine. 144 /// Returns a const reference to the Maxwell3D GPU engine.
153 [[nodiscard]] const Engines::Maxwell3D& Maxwell3D() const { 145 [[nodiscard]] const Engines::Maxwell3D& Maxwell3D() const {
154 return *maxwell_3d; 146 ASSERT(current_channel);
147 return *current_channel->maxwell_3d;
155 } 148 }
156 149
157 /// Returns a reference to the KeplerCompute GPU engine. 150 /// Returns a reference to the KeplerCompute GPU engine.
158 [[nodiscard]] Engines::KeplerCompute& KeplerCompute() { 151 [[nodiscard]] Engines::KeplerCompute& KeplerCompute() {
159 return *kepler_compute; 152 ASSERT(current_channel);
153 return *current_channel->kepler_compute;
160 } 154 }
161 155
162 /// Returns a reference to the KeplerCompute GPU engine. 156 /// Returns a reference to the KeplerCompute GPU engine.
163 [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const { 157 [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const {
164 return *kepler_compute; 158 ASSERT(current_channel);
165 } 159 return *current_channel->kepler_compute;
166
167 /// Returns a reference to the GPU memory manager.
168 [[nodiscard]] Tegra::MemoryManager& MemoryManager() {
169 return *memory_manager;
170 }
171
172 /// Returns a const reference to the GPU memory manager.
173 [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const {
174 return *memory_manager;
175 } 160 }
176 161
177 /// Returns a reference to the GPU DMA pusher. 162 /// Returns a reference to the GPU DMA pusher.
178 [[nodiscard]] Tegra::DmaPusher& DmaPusher() { 163 [[nodiscard]] Tegra::DmaPusher& DmaPusher() {
179 return *dma_pusher; 164 ASSERT(current_channel);
165 return *current_channel->dma_pusher;
180 } 166 }
181 167
182 /// Returns a const reference to the GPU DMA pusher. 168 /// Returns a const reference to the GPU DMA pusher.
183 [[nodiscard]] const Tegra::DmaPusher& DmaPusher() const { 169 [[nodiscard]] const Tegra::DmaPusher& DmaPusher() const {
184 return *dma_pusher; 170 ASSERT(current_channel);
171 return *current_channel->dma_pusher;
185 } 172 }
186 173
187 /// Returns a reference to the underlying renderer. 174 /// Returns a reference to the underlying renderer.
@@ -204,77 +191,6 @@ struct GPU::Impl {
204 return *shader_notify; 191 return *shader_notify;
205 } 192 }
206 193
207 /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
208 void WaitFence(u32 syncpoint_id, u32 value) {
209 // Synced GPU, is always in sync
210 if (!is_async) {
211 return;
212 }
213 if (syncpoint_id == UINT32_MAX) {
214 // TODO: Research what this does.
215 LOG_ERROR(HW_GPU, "Waiting for syncpoint -1 not implemented");
216 return;
217 }
218 MICROPROFILE_SCOPE(GPU_wait);
219 std::unique_lock lock{sync_mutex};
220 sync_cv.wait(lock, [=, this] {
221 if (shutting_down.load(std::memory_order_relaxed)) {
222 // We're shutting down, ensure no threads continue to wait for the next syncpoint
223 return true;
224 }
225 return syncpoints.at(syncpoint_id).load() >= value;
226 });
227 }
228
229 void IncrementSyncPoint(u32 syncpoint_id) {
230 auto& syncpoint = syncpoints.at(syncpoint_id);
231 syncpoint++;
232 std::scoped_lock lock{sync_mutex};
233 sync_cv.notify_all();
234 auto& interrupt = syncpt_interrupts.at(syncpoint_id);
235 if (!interrupt.empty()) {
236 u32 value = syncpoint.load();
237 auto it = interrupt.begin();
238 while (it != interrupt.end()) {
239 if (value >= *it) {
240 TriggerCpuInterrupt(syncpoint_id, *it);
241 it = interrupt.erase(it);
242 continue;
243 }
244 it++;
245 }
246 }
247 }
248
249 [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const {
250 return syncpoints.at(syncpoint_id).load();
251 }
252
253 void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
254 std::scoped_lock lock{sync_mutex};
255 auto& interrupt = syncpt_interrupts.at(syncpoint_id);
256 bool contains = std::any_of(interrupt.begin(), interrupt.end(),
257 [value](u32 in_value) { return in_value == value; });
258 if (contains) {
259 return;
260 }
261 interrupt.emplace_back(value);
262 }
263
264 [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value) {
265 std::scoped_lock lock{sync_mutex};
266 auto& interrupt = syncpt_interrupts.at(syncpoint_id);
267 const auto iter =
268 std::find_if(interrupt.begin(), interrupt.end(),
269 [value](u32 interrupt_value) { return value == interrupt_value; });
270
271 if (iter == interrupt.end()) {
272 return false;
273 }
274 interrupt.erase(iter);
275 return true;
276 }
277
278 [[nodiscard]] u64 GetTicks() const { 194 [[nodiscard]] u64 GetTicks() const {
279 // This values were reversed engineered by fincs from NVN 195 // This values were reversed engineered by fincs from NVN
280 // The gpu clock is reported in units of 385/625 nanoseconds 196 // The gpu clock is reported in units of 385/625 nanoseconds
@@ -306,7 +222,7 @@ struct GPU::Impl {
306 /// This can be used to launch any necessary threads and register any necessary 222 /// This can be used to launch any necessary threads and register any necessary
307 /// core timing events. 223 /// core timing events.
308 void Start() { 224 void Start() {
309 gpu_thread.StartThread(*renderer, renderer->Context(), *dma_pusher); 225 gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler);
310 cpu_context = renderer->GetRenderWindow().CreateSharedContext(); 226 cpu_context = renderer->GetRenderWindow().CreateSharedContext();
311 cpu_context->MakeCurrent(); 227 cpu_context->MakeCurrent();
312 } 228 }
@@ -328,8 +244,8 @@ struct GPU::Impl {
328 } 244 }
329 245
330 /// Push GPU command entries to be processed 246 /// Push GPU command entries to be processed
331 void PushGPUEntries(Tegra::CommandList&& entries) { 247 void PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
332 gpu_thread.SubmitList(std::move(entries)); 248 gpu_thread.SubmitList(channel, std::move(entries));
333 } 249 }
334 250
335 /// Push GPU command buffer entries to be processed 251 /// Push GPU command buffer entries to be processed
@@ -339,7 +255,7 @@ struct GPU::Impl {
339 } 255 }
340 256
341 if (!cdma_pushers.contains(id)) { 257 if (!cdma_pushers.contains(id)) {
342 cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(gpu)); 258 cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(host1x));
343 } 259 }
344 260
345 // SubmitCommandBuffer would make the nvdec operations async, this is not currently working 261 // SubmitCommandBuffer would make the nvdec operations async, this is not currently working
@@ -376,308 +292,55 @@ struct GPU::Impl {
376 gpu_thread.FlushAndInvalidateRegion(addr, size); 292 gpu_thread.FlushAndInvalidateRegion(addr, size);
377 } 293 }
378 294
379 void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const { 295 void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
380 auto& interrupt_manager = system.InterruptManager(); 296 std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) {
381 interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value); 297 size_t current_request_counter{};
382 } 298 {
383 299 std::unique_lock<std::mutex> lk(request_swap_mutex);
384 void ProcessBindMethod(const GPU::MethodCall& method_call) { 300 if (free_swap_counters.empty()) {
385 // Bind the current subchannel to the desired engine id. 301 current_request_counter = request_swap_counters.size();
386 LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel, 302 request_swap_counters.emplace_back(num_fences);
387 method_call.argument);
388 const auto engine_id = static_cast<EngineID>(method_call.argument);
389 bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id);
390 switch (engine_id) {
391 case EngineID::FERMI_TWOD_A:
392 dma_pusher->BindSubchannel(fermi_2d.get(), method_call.subchannel);
393 break;
394 case EngineID::MAXWELL_B:
395 dma_pusher->BindSubchannel(maxwell_3d.get(), method_call.subchannel);
396 break;
397 case EngineID::KEPLER_COMPUTE_B:
398 dma_pusher->BindSubchannel(kepler_compute.get(), method_call.subchannel);
399 break;
400 case EngineID::MAXWELL_DMA_COPY_A:
401 dma_pusher->BindSubchannel(maxwell_dma.get(), method_call.subchannel);
402 break;
403 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
404 dma_pusher->BindSubchannel(kepler_memory.get(), method_call.subchannel);
405 break;
406 default:
407 UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id);
408 }
409 }
410
411 void ProcessFenceActionMethod() {
412 switch (regs.fence_action.op) {
413 case GPU::FenceOperation::Acquire:
414 WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
415 break;
416 case GPU::FenceOperation::Increment:
417 IncrementSyncPoint(regs.fence_action.syncpoint_id);
418 break;
419 default:
420 UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value());
421 }
422 }
423
424 void ProcessWaitForInterruptMethod() {
425 // TODO(bunnei) ImplementMe
426 LOG_WARNING(HW_GPU, "(STUBBED) called");
427 }
428
429 void ProcessSemaphoreTriggerMethod() {
430 const auto semaphoreOperationMask = 0xF;
431 const auto op =
432 static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask);
433 if (op == GpuSemaphoreOperation::WriteLong) {
434 struct Block {
435 u32 sequence;
436 u32 zeros = 0;
437 u64 timestamp;
438 };
439
440 Block block{};
441 block.sequence = regs.semaphore_sequence;
442 // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of
443 // CoreTiming
444 block.timestamp = GetTicks();
445 memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block,
446 sizeof(block));
447 } else {
448 const u32 word{memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress())};
449 if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) ||
450 (op == GpuSemaphoreOperation::AcquireGequal &&
451 static_cast<s32>(word - regs.semaphore_sequence) > 0) ||
452 (op == GpuSemaphoreOperation::AcquireMask && (word & regs.semaphore_sequence))) {
453 // Nothing to do in this case
454 } else { 303 } else {
455 regs.acquire_source = true; 304 current_request_counter = free_swap_counters.front();
456 regs.acquire_value = regs.semaphore_sequence; 305 request_swap_counters[current_request_counter] = num_fences;
457 if (op == GpuSemaphoreOperation::AcquireEqual) { 306 free_swap_counters.pop_front();
458 regs.acquire_active = true;
459 regs.acquire_mode = false;
460 } else if (op == GpuSemaphoreOperation::AcquireGequal) {
461 regs.acquire_active = true;
462 regs.acquire_mode = true;
463 } else if (op == GpuSemaphoreOperation::AcquireMask) {
464 // TODO(kemathe) The acquire mask operation waits for a value that, ANDed with
465 // semaphore_sequence, gives a non-0 result
466 LOG_ERROR(HW_GPU, "Invalid semaphore operation AcquireMask not implemented");
467 } else {
468 LOG_ERROR(HW_GPU, "Invalid semaphore operation");
469 }
470 } 307 }
471 } 308 }
472 } 309 const auto wait_fence =
473 310 RequestSyncOperation([this, current_request_counter, framebuffer, fences, num_fences] {
474 void ProcessSemaphoreRelease() { 311 auto& syncpoint_manager = host1x.GetSyncpointManager();
475 memory_manager->Write<u32>(regs.semaphore_address.SemaphoreAddress(), 312 if (num_fences == 0) {
476 regs.semaphore_release); 313 renderer->SwapBuffers(framebuffer);
477 } 314 }
478 315 const auto executer = [this, current_request_counter,
479 void ProcessSemaphoreAcquire() { 316 framebuffer_copy = *framebuffer]() {
480 const u32 word = memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress()); 317 {
481 const auto value = regs.semaphore_acquire; 318 std::unique_lock<std::mutex> lk(request_swap_mutex);
482 if (word != value) { 319 if (--request_swap_counters[current_request_counter] != 0) {
483 regs.acquire_active = true; 320 return;
484 regs.acquire_value = value; 321 }
485 // TODO(kemathe73) figure out how to do the acquire_timeout 322 free_swap_counters.push_back(current_request_counter);
486 regs.acquire_mode = false;
487 regs.acquire_source = false;
488 }
489 }
490
491 /// Calls a GPU puller method.
492 void CallPullerMethod(const GPU::MethodCall& method_call) {
493 regs.reg_array[method_call.method] = method_call.argument;
494 const auto method = static_cast<BufferMethods>(method_call.method);
495
496 switch (method) {
497 case BufferMethods::BindObject: {
498 ProcessBindMethod(method_call);
499 break;
500 }
501 case BufferMethods::Nop:
502 case BufferMethods::SemaphoreAddressHigh:
503 case BufferMethods::SemaphoreAddressLow:
504 case BufferMethods::SemaphoreSequence:
505 break;
506 case BufferMethods::UnkCacheFlush:
507 rasterizer->SyncGuestHost();
508 break;
509 case BufferMethods::WrcacheFlush:
510 rasterizer->SignalReference();
511 break;
512 case BufferMethods::FenceValue:
513 break;
514 case BufferMethods::RefCnt:
515 rasterizer->SignalReference();
516 break;
517 case BufferMethods::FenceAction:
518 ProcessFenceActionMethod();
519 break;
520 case BufferMethods::WaitForInterrupt:
521 rasterizer->WaitForIdle();
522 break;
523 case BufferMethods::SemaphoreTrigger: {
524 ProcessSemaphoreTriggerMethod();
525 break;
526 }
527 case BufferMethods::NotifyIntr: {
528 // TODO(Kmather73): Research and implement this method.
529 LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented");
530 break;
531 }
532 case BufferMethods::Unk28: {
533 // TODO(Kmather73): Research and implement this method.
534 LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented");
535 break;
536 }
537 case BufferMethods::SemaphoreAcquire: {
538 ProcessSemaphoreAcquire();
539 break;
540 }
541 case BufferMethods::SemaphoreRelease: {
542 ProcessSemaphoreRelease();
543 break;
544 }
545 case BufferMethods::Yield: {
546 // TODO(Kmather73): Research and implement this method.
547 LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented");
548 break;
549 }
550 default:
551 LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method);
552 break;
553 }
554 }
555
556 /// Calls a GPU engine method.
557 void CallEngineMethod(const GPU::MethodCall& method_call) {
558 const EngineID engine = bound_engines[method_call.subchannel];
559
560 switch (engine) {
561 case EngineID::FERMI_TWOD_A:
562 fermi_2d->CallMethod(method_call.method, method_call.argument,
563 method_call.IsLastCall());
564 break;
565 case EngineID::MAXWELL_B:
566 maxwell_3d->CallMethod(method_call.method, method_call.argument,
567 method_call.IsLastCall());
568 break;
569 case EngineID::KEPLER_COMPUTE_B:
570 kepler_compute->CallMethod(method_call.method, method_call.argument,
571 method_call.IsLastCall());
572 break;
573 case EngineID::MAXWELL_DMA_COPY_A:
574 maxwell_dma->CallMethod(method_call.method, method_call.argument,
575 method_call.IsLastCall());
576 break;
577 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
578 kepler_memory->CallMethod(method_call.method, method_call.argument,
579 method_call.IsLastCall());
580 break;
581 default:
582 UNIMPLEMENTED_MSG("Unimplemented engine");
583 }
584 }
585
586 /// Calls a GPU engine multivalue method.
587 void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
588 u32 methods_pending) {
589 const EngineID engine = bound_engines[subchannel];
590
591 switch (engine) {
592 case EngineID::FERMI_TWOD_A:
593 fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
594 break;
595 case EngineID::MAXWELL_B:
596 maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
597 break;
598 case EngineID::KEPLER_COMPUTE_B:
599 kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
600 break;
601 case EngineID::MAXWELL_DMA_COPY_A:
602 maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
603 break;
604 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
605 kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
606 break;
607 default:
608 UNIMPLEMENTED_MSG("Unimplemented engine");
609 }
610 }
611
612 /// Determines where the method should be executed.
613 [[nodiscard]] bool ExecuteMethodOnEngine(u32 method) {
614 const auto buffer_method = static_cast<BufferMethods>(method);
615 return buffer_method >= BufferMethods::NonPullerMethods;
616 }
617
618 struct Regs {
619 static constexpr size_t NUM_REGS = 0x40;
620
621 union {
622 struct {
623 INSERT_PADDING_WORDS_NOINIT(0x4);
624 struct {
625 u32 address_high;
626 u32 address_low;
627
628 [[nodiscard]] GPUVAddr SemaphoreAddress() const {
629 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
630 address_low);
631 } 323 }
632 } semaphore_address; 324 renderer->SwapBuffers(&framebuffer_copy);
633 325 };
634 u32 semaphore_sequence; 326 for (size_t i = 0; i < num_fences; i++) {
635 u32 semaphore_trigger; 327 syncpoint_manager.RegisterGuestAction(fences[i].id, fences[i].value, executer);
636 INSERT_PADDING_WORDS_NOINIT(0xC); 328 }
637 329 });
638 // The pusher and the puller share the reference counter, the pusher only has read 330 gpu_thread.TickGPU();
639 // access 331 WaitForSyncOperation(wait_fence);
640 u32 reference_count; 332 }
641 INSERT_PADDING_WORDS_NOINIT(0x5);
642
643 u32 semaphore_acquire;
644 u32 semaphore_release;
645 u32 fence_value;
646 GPU::FenceAction fence_action;
647 INSERT_PADDING_WORDS_NOINIT(0xE2);
648
649 // Puller state
650 u32 acquire_mode;
651 u32 acquire_source;
652 u32 acquire_active;
653 u32 acquire_timeout;
654 u32 acquire_value;
655 };
656 std::array<u32, NUM_REGS> reg_array;
657 };
658 } regs{};
659 333
660 GPU& gpu; 334 GPU& gpu;
661 Core::System& system; 335 Core::System& system;
662 std::unique_ptr<Tegra::MemoryManager> memory_manager; 336 Host1x::Host1x& host1x;
663 std::unique_ptr<Tegra::DmaPusher> dma_pusher; 337
664 std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers; 338 std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers;
665 std::unique_ptr<VideoCore::RendererBase> renderer; 339 std::unique_ptr<VideoCore::RendererBase> renderer;
666 VideoCore::RasterizerInterface* rasterizer = nullptr; 340 VideoCore::RasterizerInterface* rasterizer = nullptr;
667 const bool use_nvdec; 341 const bool use_nvdec;
668 342
669 /// Mapping of command subchannels to their bound engine ids 343 s32 new_channel_id{1};
670 std::array<EngineID, 8> bound_engines{};
671 /// 3D engine
672 std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
673 /// 2D engine
674 std::unique_ptr<Engines::Fermi2D> fermi_2d;
675 /// Compute engine
676 std::unique_ptr<Engines::KeplerCompute> kepler_compute;
677 /// DMA engine
678 std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
679 /// Inline memory engine
680 std::unique_ptr<Engines::KeplerMemory> kepler_memory;
681 /// Shader build notifier 344 /// Shader build notifier
682 std::unique_ptr<VideoCore::ShaderNotify> shader_notify; 345 std::unique_ptr<VideoCore::ShaderNotify> shader_notify;
683 /// When true, we are about to shut down emulation session, so terminate outstanding tasks 346 /// When true, we are about to shut down emulation session, so terminate outstanding tasks
@@ -692,51 +355,25 @@ struct GPU::Impl {
692 355
693 std::condition_variable sync_cv; 356 std::condition_variable sync_cv;
694 357
695 struct FlushRequest { 358 std::list<std::function<void()>> sync_requests;
696 explicit FlushRequest(u64 fence_, VAddr addr_, std::size_t size_) 359 std::atomic<u64> current_sync_fence{};
697 : fence{fence_}, addr{addr_}, size{size_} {} 360 u64 last_sync_fence{};
698 u64 fence; 361 std::mutex sync_request_mutex;
699 VAddr addr; 362 std::condition_variable sync_request_cv;
700 std::size_t size;
701 };
702
703 std::list<FlushRequest> flush_requests;
704 std::atomic<u64> current_flush_fence{};
705 u64 last_flush_fence{};
706 std::mutex flush_request_mutex;
707 363
708 const bool is_async; 364 const bool is_async;
709 365
710 VideoCommon::GPUThread::ThreadManager gpu_thread; 366 VideoCommon::GPUThread::ThreadManager gpu_thread;
711 std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context; 367 std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context;
712 368
713#define ASSERT_REG_POSITION(field_name, position) \ 369 std::unique_ptr<Tegra::Control::Scheduler> scheduler;
714 static_assert(offsetof(Regs, field_name) == position * 4, \ 370 std::unordered_map<s32, std::shared_ptr<Tegra::Control::ChannelState>> channels;
715 "Field " #field_name " has invalid position") 371 Tegra::Control::ChannelState* current_channel;
716 372 s32 bound_channel{-1};
717 ASSERT_REG_POSITION(semaphore_address, 0x4); 373
718 ASSERT_REG_POSITION(semaphore_sequence, 0x6); 374 std::deque<size_t> free_swap_counters;
719 ASSERT_REG_POSITION(semaphore_trigger, 0x7); 375 std::deque<size_t> request_swap_counters;
720 ASSERT_REG_POSITION(reference_count, 0x14); 376 std::mutex request_swap_mutex;
721 ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
722 ASSERT_REG_POSITION(semaphore_release, 0x1B);
723 ASSERT_REG_POSITION(fence_value, 0x1C);
724 ASSERT_REG_POSITION(fence_action, 0x1D);
725
726 ASSERT_REG_POSITION(acquire_mode, 0x100);
727 ASSERT_REG_POSITION(acquire_source, 0x101);
728 ASSERT_REG_POSITION(acquire_active, 0x102);
729 ASSERT_REG_POSITION(acquire_timeout, 0x103);
730 ASSERT_REG_POSITION(acquire_value, 0x104);
731
732#undef ASSERT_REG_POSITION
733
734 enum class GpuSemaphoreOperation {
735 AcquireEqual = 0x1,
736 WriteLong = 0x2,
737 AcquireGequal = 0x4,
738 AcquireMask = 0x8,
739 };
740}; 377};
741 378
742GPU::GPU(Core::System& system, bool is_async, bool use_nvdec) 379GPU::GPU(Core::System& system, bool is_async, bool use_nvdec)
@@ -744,25 +381,36 @@ GPU::GPU(Core::System& system, bool is_async, bool use_nvdec)
744 381
745GPU::~GPU() = default; 382GPU::~GPU() = default;
746 383
747void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) { 384std::shared_ptr<Control::ChannelState> GPU::AllocateChannel() {
748 impl->BindRenderer(std::move(renderer)); 385 return impl->AllocateChannel();
386}
387
388void GPU::InitChannel(Control::ChannelState& to_init) {
389 impl->InitChannel(to_init);
390}
391
392void GPU::BindChannel(s32 channel_id) {
393 impl->BindChannel(channel_id);
749} 394}
750 395
751void GPU::CallMethod(const MethodCall& method_call) { 396void GPU::ReleaseChannel(Control::ChannelState& to_release) {
752 impl->CallMethod(method_call); 397 impl->ReleaseChannel(to_release);
753} 398}
754 399
755void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, 400void GPU::InitAddressSpace(Tegra::MemoryManager& memory_manager) {
756 u32 methods_pending) { 401 impl->InitAddressSpace(memory_manager);
757 impl->CallMultiMethod(method, subchannel, base_start, amount, methods_pending); 402}
403
404void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) {
405 impl->BindRenderer(std::move(renderer));
758} 406}
759 407
760void GPU::FlushCommands() { 408void GPU::FlushCommands() {
761 impl->FlushCommands(); 409 impl->FlushCommands();
762} 410}
763 411
764void GPU::SyncGuestHost() { 412void GPU::InvalidateGPUCache() {
765 impl->SyncGuestHost(); 413 impl->InvalidateGPUCache();
766} 414}
767 415
768void GPU::OnCommandListEnd() { 416void GPU::OnCommandListEnd() {
@@ -770,17 +418,32 @@ void GPU::OnCommandListEnd() {
770} 418}
771 419
772u64 GPU::RequestFlush(VAddr addr, std::size_t size) { 420u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
773 return impl->RequestFlush(addr, size); 421 return impl->RequestSyncOperation(
422 [this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); });
774} 423}
775 424
776u64 GPU::CurrentFlushRequestFence() const { 425u64 GPU::CurrentSyncRequestFence() const {
777 return impl->CurrentFlushRequestFence(); 426 return impl->CurrentSyncRequestFence();
427}
428
429void GPU::WaitForSyncOperation(u64 fence) {
430 return impl->WaitForSyncOperation(fence);
778} 431}
779 432
780void GPU::TickWork() { 433void GPU::TickWork() {
781 impl->TickWork(); 434 impl->TickWork();
782} 435}
783 436
437/// Gets a mutable reference to the Host1x interface
438Host1x::Host1x& GPU::Host1x() {
439 return impl->host1x;
440}
441
442/// Gets an immutable reference to the Host1x interface.
443const Host1x::Host1x& GPU::Host1x() const {
444 return impl->host1x;
445}
446
784Engines::Maxwell3D& GPU::Maxwell3D() { 447Engines::Maxwell3D& GPU::Maxwell3D() {
785 return impl->Maxwell3D(); 448 return impl->Maxwell3D();
786} 449}
@@ -797,14 +460,6 @@ const Engines::KeplerCompute& GPU::KeplerCompute() const {
797 return impl->KeplerCompute(); 460 return impl->KeplerCompute();
798} 461}
799 462
800Tegra::MemoryManager& GPU::MemoryManager() {
801 return impl->MemoryManager();
802}
803
804const Tegra::MemoryManager& GPU::MemoryManager() const {
805 return impl->MemoryManager();
806}
807
808Tegra::DmaPusher& GPU::DmaPusher() { 463Tegra::DmaPusher& GPU::DmaPusher() {
809 return impl->DmaPusher(); 464 return impl->DmaPusher();
810} 465}
@@ -829,24 +484,9 @@ const VideoCore::ShaderNotify& GPU::ShaderNotify() const {
829 return impl->ShaderNotify(); 484 return impl->ShaderNotify();
830} 485}
831 486
832void GPU::WaitFence(u32 syncpoint_id, u32 value) { 487void GPU::RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
833 impl->WaitFence(syncpoint_id, value); 488 std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) {
834} 489 impl->RequestSwapBuffers(framebuffer, fences, num_fences);
835
836void GPU::IncrementSyncPoint(u32 syncpoint_id) {
837 impl->IncrementSyncPoint(syncpoint_id);
838}
839
840u32 GPU::GetSyncpointValue(u32 syncpoint_id) const {
841 return impl->GetSyncpointValue(syncpoint_id);
842}
843
844void GPU::RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
845 impl->RegisterSyncptInterrupt(syncpoint_id, value);
846}
847
848bool GPU::CancelSyncptInterrupt(u32 syncpoint_id, u32 value) {
849 return impl->CancelSyncptInterrupt(syncpoint_id, value);
850} 490}
851 491
852u64 GPU::GetTicks() const { 492u64 GPU::GetTicks() const {
@@ -881,8 +521,8 @@ void GPU::ReleaseContext() {
881 impl->ReleaseContext(); 521 impl->ReleaseContext();
882} 522}
883 523
884void GPU::PushGPUEntries(Tegra::CommandList&& entries) { 524void GPU::PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
885 impl->PushGPUEntries(std::move(entries)); 525 impl->PushGPUEntries(channel, std::move(entries));
886} 526}
887 527
888void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) { 528void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) {
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index b939ba315..0a4a8b14f 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -89,73 +89,58 @@ class Maxwell3D;
89class KeplerCompute; 89class KeplerCompute;
90} // namespace Engines 90} // namespace Engines
91 91
92enum class EngineID { 92namespace Control {
93 FERMI_TWOD_A = 0x902D, // 2D Engine 93struct ChannelState;
94 MAXWELL_B = 0xB197, // 3D Engine 94}
95 KEPLER_COMPUTE_B = 0xB1C0, 95
96 KEPLER_INLINE_TO_MEMORY_B = 0xA140, 96namespace Host1x {
97 MAXWELL_DMA_COPY_A = 0xB0B5, 97class Host1x;
98}; 98} // namespace Host1x
99 99
100class MemoryManager; 100class MemoryManager;
101 101
102class GPU final { 102class GPU final {
103public: 103public:
104 struct MethodCall {
105 u32 method{};
106 u32 argument{};
107 u32 subchannel{};
108 u32 method_count{};
109
110 explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0)
111 : method(method_), argument(argument_), subchannel(subchannel_),
112 method_count(method_count_) {}
113
114 [[nodiscard]] bool IsLastCall() const {
115 return method_count <= 1;
116 }
117 };
118
119 enum class FenceOperation : u32 {
120 Acquire = 0,
121 Increment = 1,
122 };
123
124 union FenceAction {
125 u32 raw;
126 BitField<0, 1, FenceOperation> op;
127 BitField<8, 24, u32> syncpoint_id;
128 };
129
130 explicit GPU(Core::System& system, bool is_async, bool use_nvdec); 104 explicit GPU(Core::System& system, bool is_async, bool use_nvdec);
131 ~GPU(); 105 ~GPU();
132 106
133 /// Binds a renderer to the GPU. 107 /// Binds a renderer to the GPU.
134 void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer); 108 void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer);
135 109
136 /// Calls a GPU method.
137 void CallMethod(const MethodCall& method_call);
138
139 /// Calls a GPU multivalue method.
140 void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
141 u32 methods_pending);
142
143 /// Flush all current written commands into the host GPU for execution. 110 /// Flush all current written commands into the host GPU for execution.
144 void FlushCommands(); 111 void FlushCommands();
145 /// Synchronizes CPU writes with Host GPU memory. 112 /// Synchronizes CPU writes with Host GPU memory.
146 void SyncGuestHost(); 113 void InvalidateGPUCache();
147 /// Signal the ending of command list. 114 /// Signal the ending of command list.
148 void OnCommandListEnd(); 115 void OnCommandListEnd();
149 116
117 std::shared_ptr<Control::ChannelState> AllocateChannel();
118
119 void InitChannel(Control::ChannelState& to_init);
120
121 void BindChannel(s32 channel_id);
122
123 void ReleaseChannel(Control::ChannelState& to_release);
124
125 void InitAddressSpace(Tegra::MemoryManager& memory_manager);
126
150 /// Request a host GPU memory flush from the CPU. 127 /// Request a host GPU memory flush from the CPU.
151 [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size); 128 [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size);
152 129
153 /// Obtains current flush request fence id. 130 /// Obtains current flush request fence id.
154 [[nodiscard]] u64 CurrentFlushRequestFence() const; 131 [[nodiscard]] u64 CurrentSyncRequestFence() const;
132
133 void WaitForSyncOperation(u64 fence);
155 134
156 /// Tick pending requests within the GPU. 135 /// Tick pending requests within the GPU.
157 void TickWork(); 136 void TickWork();
158 137
138 /// Gets a mutable reference to the Host1x interface
139 [[nodiscard]] Host1x::Host1x& Host1x();
140
141 /// Gets an immutable reference to the Host1x interface.
142 [[nodiscard]] const Host1x::Host1x& Host1x() const;
143
159 /// Returns a reference to the Maxwell3D GPU engine. 144 /// Returns a reference to the Maxwell3D GPU engine.
160 [[nodiscard]] Engines::Maxwell3D& Maxwell3D(); 145 [[nodiscard]] Engines::Maxwell3D& Maxwell3D();
161 146
@@ -168,12 +153,6 @@ public:
168 /// Returns a reference to the KeplerCompute GPU engine. 153 /// Returns a reference to the KeplerCompute GPU engine.
169 [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const; 154 [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const;
170 155
171 /// Returns a reference to the GPU memory manager.
172 [[nodiscard]] Tegra::MemoryManager& MemoryManager();
173
174 /// Returns a const reference to the GPU memory manager.
175 [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const;
176
177 /// Returns a reference to the GPU DMA pusher. 156 /// Returns a reference to the GPU DMA pusher.
178 [[nodiscard]] Tegra::DmaPusher& DmaPusher(); 157 [[nodiscard]] Tegra::DmaPusher& DmaPusher();
179 158
@@ -192,17 +171,6 @@ public:
192 /// Returns a const reference to the shader notifier. 171 /// Returns a const reference to the shader notifier.
193 [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const; 172 [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
194 173
195 /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
196 void WaitFence(u32 syncpoint_id, u32 value);
197
198 void IncrementSyncPoint(u32 syncpoint_id);
199
200 [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const;
201
202 void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value);
203
204 [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value);
205
206 [[nodiscard]] u64 GetTicks() const; 174 [[nodiscard]] u64 GetTicks() const;
207 175
208 [[nodiscard]] bool IsAsync() const; 176 [[nodiscard]] bool IsAsync() const;
@@ -211,6 +179,9 @@ public:
211 179
212 void RendererFrameEndNotify(); 180 void RendererFrameEndNotify();
213 181
182 void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
183 std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences);
184
214 /// Performs any additional setup necessary in order to begin GPU emulation. 185 /// Performs any additional setup necessary in order to begin GPU emulation.
215 /// This can be used to launch any necessary threads and register any necessary 186 /// This can be used to launch any necessary threads and register any necessary
216 /// core timing events. 187 /// core timing events.
@@ -226,7 +197,7 @@ public:
226 void ReleaseContext(); 197 void ReleaseContext();
227 198
228 /// Push GPU command entries to be processed 199 /// Push GPU command entries to be processed
229 void PushGPUEntries(Tegra::CommandList&& entries); 200 void PushGPUEntries(s32 channel, Tegra::CommandList&& entries);
230 201
231 /// Push GPU command buffer entries to be processed 202 /// Push GPU command buffer entries to be processed
232 void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries); 203 void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries);
@@ -248,7 +219,7 @@ public:
248 219
249private: 220private:
250 struct Impl; 221 struct Impl;
251 std::unique_ptr<Impl> impl; 222 mutable std::unique_ptr<Impl> impl;
252}; 223};
253 224
254} // namespace Tegra 225} // namespace Tegra
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index f0e48cfbd..1bd477011 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -8,6 +8,7 @@
8#include "common/thread.h" 8#include "common/thread.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/frontend/emu_window.h" 10#include "core/frontend/emu_window.h"
11#include "video_core/control/scheduler.h"
11#include "video_core/dma_pusher.h" 12#include "video_core/dma_pusher.h"
12#include "video_core/gpu.h" 13#include "video_core/gpu.h"
13#include "video_core/gpu_thread.h" 14#include "video_core/gpu_thread.h"
@@ -18,7 +19,7 @@ namespace VideoCommon::GPUThread {
18/// Runs the GPU thread 19/// Runs the GPU thread
19static void RunThread(std::stop_token stop_token, Core::System& system, 20static void RunThread(std::stop_token stop_token, Core::System& system,
20 VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, 21 VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
21 Tegra::DmaPusher& dma_pusher, SynchState& state) { 22 Tegra::Control::Scheduler& scheduler, SynchState& state) {
22 std::string name = "GPU"; 23 std::string name = "GPU";
23 MicroProfileOnThreadCreate(name.c_str()); 24 MicroProfileOnThreadCreate(name.c_str());
24 SCOPE_EXIT({ MicroProfileOnThreadExit(); }); 25 SCOPE_EXIT({ MicroProfileOnThreadExit(); });
@@ -36,8 +37,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
36 break; 37 break;
37 } 38 }
38 if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) { 39 if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) {
39 dma_pusher.Push(std::move(submit_list->entries)); 40 scheduler.Push(submit_list->channel, std::move(submit_list->entries));
40 dma_pusher.DispatchCalls();
41 } else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) { 41 } else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) {
42 renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr); 42 renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
43 } else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) { 43 } else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) {
@@ -68,14 +68,14 @@ ThreadManager::~ThreadManager() = default;
68 68
69void ThreadManager::StartThread(VideoCore::RendererBase& renderer, 69void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
70 Core::Frontend::GraphicsContext& context, 70 Core::Frontend::GraphicsContext& context,
71 Tegra::DmaPusher& dma_pusher) { 71 Tegra::Control::Scheduler& scheduler) {
72 rasterizer = renderer.ReadRasterizer(); 72 rasterizer = renderer.ReadRasterizer();
73 thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context), 73 thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context),
74 std::ref(dma_pusher), std::ref(state)); 74 std::ref(scheduler), std::ref(state));
75} 75}
76 76
77void ThreadManager::SubmitList(Tegra::CommandList&& entries) { 77void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) {
78 PushCommand(SubmitListCommand(std::move(entries))); 78 PushCommand(SubmitListCommand(channel, std::move(entries)));
79} 79}
80 80
81void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { 81void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
@@ -93,8 +93,12 @@ void ThreadManager::FlushRegion(VAddr addr, u64 size) {
93 } 93 }
94 auto& gpu = system.GPU(); 94 auto& gpu = system.GPU();
95 u64 fence = gpu.RequestFlush(addr, size); 95 u64 fence = gpu.RequestFlush(addr, size);
96 PushCommand(GPUTickCommand(), true); 96 TickGPU();
97 ASSERT(fence <= gpu.CurrentFlushRequestFence()); 97 gpu.WaitForSyncOperation(fence);
98}
99
100void ThreadManager::TickGPU() {
101 PushCommand(GPUTickCommand());
98} 102}
99 103
100void ThreadManager::InvalidateRegion(VAddr addr, u64 size) { 104void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index 2f8210cb9..64628d3e3 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -15,7 +15,9 @@
15 15
16namespace Tegra { 16namespace Tegra {
17struct FramebufferConfig; 17struct FramebufferConfig;
18class DmaPusher; 18namespace Control {
19class Scheduler;
20}
19} // namespace Tegra 21} // namespace Tegra
20 22
21namespace Core { 23namespace Core {
@@ -34,8 +36,10 @@ namespace VideoCommon::GPUThread {
34 36
35/// Command to signal to the GPU thread that a command list is ready for processing 37/// Command to signal to the GPU thread that a command list is ready for processing
36struct SubmitListCommand final { 38struct SubmitListCommand final {
37 explicit SubmitListCommand(Tegra::CommandList&& entries_) : entries{std::move(entries_)} {} 39 explicit SubmitListCommand(s32 channel_, Tegra::CommandList&& entries_)
40 : channel{channel_}, entries{std::move(entries_)} {}
38 41
42 s32 channel;
39 Tegra::CommandList entries; 43 Tegra::CommandList entries;
40}; 44};
41 45
@@ -112,10 +116,10 @@ public:
112 116
113 /// Creates and starts the GPU thread. 117 /// Creates and starts the GPU thread.
114 void StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, 118 void StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
115 Tegra::DmaPusher& dma_pusher); 119 Tegra::Control::Scheduler& scheduler);
116 120
117 /// Push GPU command entries to be processed 121 /// Push GPU command entries to be processed
118 void SubmitList(Tegra::CommandList&& entries); 122 void SubmitList(s32 channel, Tegra::CommandList&& entries);
119 123
120 /// Swap buffers (render frame) 124 /// Swap buffers (render frame)
121 void SwapBuffers(const Tegra::FramebufferConfig* framebuffer); 125 void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
@@ -131,6 +135,8 @@ public:
131 135
132 void OnCommandListEnd(); 136 void OnCommandListEnd();
133 137
138 void TickGPU();
139
134private: 140private:
135 /// Pushes a command to be executed by the GPU thread 141 /// Pushes a command to be executed by the GPU thread
136 u64 PushCommand(CommandData&& command_data, bool block = false); 142 u64 PushCommand(CommandData&& command_data, bool block = false);
diff --git a/src/video_core/command_classes/codecs/codec.cpp b/src/video_core/host1x/codecs/codec.cpp
index a5eb97b7f..42e7d6e4f 100644
--- a/src/video_core/command_classes/codecs/codec.cpp
+++ b/src/video_core/host1x/codecs/codec.cpp
@@ -6,11 +6,11 @@
6#include <vector> 6#include <vector>
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/settings.h" 8#include "common/settings.h"
9#include "video_core/command_classes/codecs/codec.h" 9#include "video_core/host1x/codecs/codec.h"
10#include "video_core/command_classes/codecs/h264.h" 10#include "video_core/host1x/codecs/h264.h"
11#include "video_core/command_classes/codecs/vp8.h" 11#include "video_core/host1x/codecs/vp8.h"
12#include "video_core/command_classes/codecs/vp9.h" 12#include "video_core/host1x/codecs/vp9.h"
13#include "video_core/gpu.h" 13#include "video_core/host1x/host1x.h"
14#include "video_core/memory_manager.h" 14#include "video_core/memory_manager.h"
15 15
16extern "C" { 16extern "C" {
@@ -73,10 +73,10 @@ void AVFrameDeleter(AVFrame* ptr) {
73 av_frame_free(&ptr); 73 av_frame_free(&ptr);
74} 74}
75 75
76Codec::Codec(GPU& gpu_, const NvdecCommon::NvdecRegisters& regs) 76Codec::Codec(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs)
77 : gpu(gpu_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(gpu)), 77 : host1x(host1x_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(host1x)),
78 vp8_decoder(std::make_unique<Decoder::VP8>(gpu)), 78 vp8_decoder(std::make_unique<Decoder::VP8>(host1x)),
79 vp9_decoder(std::make_unique<Decoder::VP9>(gpu)) {} 79 vp9_decoder(std::make_unique<Decoder::VP9>(host1x)) {}
80 80
81Codec::~Codec() { 81Codec::~Codec() {
82 if (!initialized) { 82 if (!initialized) {
@@ -168,11 +168,11 @@ void Codec::InitializeGpuDecoder() {
168void Codec::Initialize() { 168void Codec::Initialize() {
169 const AVCodecID codec = [&] { 169 const AVCodecID codec = [&] {
170 switch (current_codec) { 170 switch (current_codec) {
171 case NvdecCommon::VideoCodec::H264: 171 case Host1x::NvdecCommon::VideoCodec::H264:
172 return AV_CODEC_ID_H264; 172 return AV_CODEC_ID_H264;
173 case NvdecCommon::VideoCodec::VP8: 173 case Host1x::NvdecCommon::VideoCodec::VP8:
174 return AV_CODEC_ID_VP8; 174 return AV_CODEC_ID_VP8;
175 case NvdecCommon::VideoCodec::VP9: 175 case Host1x::NvdecCommon::VideoCodec::VP9:
176 return AV_CODEC_ID_VP9; 176 return AV_CODEC_ID_VP9;
177 default: 177 default:
178 UNIMPLEMENTED_MSG("Unknown codec {}", current_codec); 178 UNIMPLEMENTED_MSG("Unknown codec {}", current_codec);
@@ -197,7 +197,7 @@ void Codec::Initialize() {
197 initialized = true; 197 initialized = true;
198} 198}
199 199
200void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) { 200void Codec::SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec) {
201 if (current_codec != codec) { 201 if (current_codec != codec) {
202 current_codec = codec; 202 current_codec = codec;
203 LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName()); 203 LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName());
@@ -215,11 +215,11 @@ void Codec::Decode() {
215 bool vp9_hidden_frame = false; 215 bool vp9_hidden_frame = false;
216 const auto& frame_data = [&]() { 216 const auto& frame_data = [&]() {
217 switch (current_codec) { 217 switch (current_codec) {
218 case Tegra::NvdecCommon::VideoCodec::H264: 218 case Tegra::Host1x::NvdecCommon::VideoCodec::H264:
219 return h264_decoder->ComposeFrame(state, is_first_frame); 219 return h264_decoder->ComposeFrame(state, is_first_frame);
220 case Tegra::NvdecCommon::VideoCodec::VP8: 220 case Tegra::Host1x::NvdecCommon::VideoCodec::VP8:
221 return vp8_decoder->ComposeFrame(state); 221 return vp8_decoder->ComposeFrame(state);
222 case Tegra::NvdecCommon::VideoCodec::VP9: 222 case Tegra::Host1x::NvdecCommon::VideoCodec::VP9:
223 vp9_decoder->ComposeFrame(state); 223 vp9_decoder->ComposeFrame(state);
224 vp9_hidden_frame = vp9_decoder->WasFrameHidden(); 224 vp9_hidden_frame = vp9_decoder->WasFrameHidden();
225 return vp9_decoder->GetFrameBytes(); 225 return vp9_decoder->GetFrameBytes();
@@ -287,21 +287,21 @@ AVFramePtr Codec::GetCurrentFrame() {
287 return frame; 287 return frame;
288} 288}
289 289
290NvdecCommon::VideoCodec Codec::GetCurrentCodec() const { 290Host1x::NvdecCommon::VideoCodec Codec::GetCurrentCodec() const {
291 return current_codec; 291 return current_codec;
292} 292}
293 293
294std::string_view Codec::GetCurrentCodecName() const { 294std::string_view Codec::GetCurrentCodecName() const {
295 switch (current_codec) { 295 switch (current_codec) {
296 case NvdecCommon::VideoCodec::None: 296 case Host1x::NvdecCommon::VideoCodec::None:
297 return "None"; 297 return "None";
298 case NvdecCommon::VideoCodec::H264: 298 case Host1x::NvdecCommon::VideoCodec::H264:
299 return "H264"; 299 return "H264";
300 case NvdecCommon::VideoCodec::VP8: 300 case Host1x::NvdecCommon::VideoCodec::VP8:
301 return "VP8"; 301 return "VP8";
302 case NvdecCommon::VideoCodec::H265: 302 case Host1x::NvdecCommon::VideoCodec::H265:
303 return "H265"; 303 return "H265";
304 case NvdecCommon::VideoCodec::VP9: 304 case Host1x::NvdecCommon::VideoCodec::VP9:
305 return "VP9"; 305 return "VP9";
306 default: 306 default:
307 return "Unknown"; 307 return "Unknown";
diff --git a/src/video_core/command_classes/codecs/codec.h b/src/video_core/host1x/codecs/codec.h
index 0c2405465..0d45fb7fe 100644
--- a/src/video_core/command_classes/codecs/codec.h
+++ b/src/video_core/host1x/codecs/codec.h
@@ -6,8 +6,8 @@
6#include <memory> 6#include <memory>
7#include <string_view> 7#include <string_view>
8#include <queue> 8#include <queue>
9 9#include "common/common_types.h"
10#include "video_core/command_classes/nvdec_common.h" 10#include "video_core/host1x/nvdec_common.h"
11 11
12extern "C" { 12extern "C" {
13#if defined(__GNUC__) || defined(__clang__) 13#if defined(__GNUC__) || defined(__clang__)
@@ -21,7 +21,6 @@ extern "C" {
21} 21}
22 22
23namespace Tegra { 23namespace Tegra {
24class GPU;
25 24
26void AVFrameDeleter(AVFrame* ptr); 25void AVFrameDeleter(AVFrame* ptr);
27using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>; 26using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>;
@@ -32,16 +31,20 @@ class VP8;
32class VP9; 31class VP9;
33} // namespace Decoder 32} // namespace Decoder
34 33
34namespace Host1x {
35class Host1x;
36} // namespace Host1x
37
35class Codec { 38class Codec {
36public: 39public:
37 explicit Codec(GPU& gpu, const NvdecCommon::NvdecRegisters& regs); 40 explicit Codec(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs);
38 ~Codec(); 41 ~Codec();
39 42
40 /// Initialize the codec, returning success or failure 43 /// Initialize the codec, returning success or failure
41 void Initialize(); 44 void Initialize();
42 45
43 /// Sets NVDEC video stream codec 46 /// Sets NVDEC video stream codec
44 void SetTargetCodec(NvdecCommon::VideoCodec codec); 47 void SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec);
45 48
46 /// Call decoders to construct headers, decode AVFrame with ffmpeg 49 /// Call decoders to construct headers, decode AVFrame with ffmpeg
47 void Decode(); 50 void Decode();
@@ -50,7 +53,7 @@ public:
50 [[nodiscard]] AVFramePtr GetCurrentFrame(); 53 [[nodiscard]] AVFramePtr GetCurrentFrame();
51 54
52 /// Returns the value of current_codec 55 /// Returns the value of current_codec
53 [[nodiscard]] NvdecCommon::VideoCodec GetCurrentCodec() const; 56 [[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const;
54 57
55 /// Return name of the current codec 58 /// Return name of the current codec
56 [[nodiscard]] std::string_view GetCurrentCodecName() const; 59 [[nodiscard]] std::string_view GetCurrentCodecName() const;
@@ -63,14 +66,14 @@ private:
63 bool CreateGpuAvDevice(); 66 bool CreateGpuAvDevice();
64 67
65 bool initialized{}; 68 bool initialized{};
66 NvdecCommon::VideoCodec current_codec{NvdecCommon::VideoCodec::None}; 69 Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None};
67 70
68 const AVCodec* av_codec{nullptr}; 71 const AVCodec* av_codec{nullptr};
69 AVCodecContext* av_codec_ctx{nullptr}; 72 AVCodecContext* av_codec_ctx{nullptr};
70 AVBufferRef* av_gpu_decoder{nullptr}; 73 AVBufferRef* av_gpu_decoder{nullptr};
71 74
72 GPU& gpu; 75 Host1x::Host1x& host1x;
73 const NvdecCommon::NvdecRegisters& state; 76 const Host1x::NvdecCommon::NvdecRegisters& state;
74 std::unique_ptr<Decoder::H264> h264_decoder; 77 std::unique_ptr<Decoder::H264> h264_decoder;
75 std::unique_ptr<Decoder::VP8> vp8_decoder; 78 std::unique_ptr<Decoder::VP8> vp8_decoder;
76 std::unique_ptr<Decoder::VP9> vp9_decoder; 79 std::unique_ptr<Decoder::VP9> vp9_decoder;
diff --git a/src/video_core/command_classes/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp
index e2acd54d4..e87bd65fa 100644
--- a/src/video_core/command_classes/codecs/h264.cpp
+++ b/src/video_core/host1x/codecs/h264.cpp
@@ -5,8 +5,8 @@
5#include <bit> 5#include <bit>
6 6
7#include "common/settings.h" 7#include "common/settings.h"
8#include "video_core/command_classes/codecs/h264.h" 8#include "video_core/host1x/codecs/h264.h"
9#include "video_core/gpu.h" 9#include "video_core/host1x/host1x.h"
10#include "video_core/memory_manager.h" 10#include "video_core/memory_manager.h"
11 11
12namespace Tegra::Decoder { 12namespace Tegra::Decoder {
@@ -24,19 +24,20 @@ constexpr std::array<u8, 16> zig_zag_scan{
24}; 24};
25} // Anonymous namespace 25} // Anonymous namespace
26 26
27H264::H264(GPU& gpu_) : gpu(gpu_) {} 27H264::H264(Host1x::Host1x& host1x_) : host1x{host1x_} {}
28 28
29H264::~H264() = default; 29H264::~H264() = default;
30 30
31const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& state, 31const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state,
32 bool is_first_frame) { 32 bool is_first_frame) {
33 H264DecoderContext context; 33 H264DecoderContext context;
34 gpu.MemoryManager().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext)); 34 host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context,
35 sizeof(H264DecoderContext));
35 36
36 const s64 frame_number = context.h264_parameter_set.frame_number.Value(); 37 const s64 frame_number = context.h264_parameter_set.frame_number.Value();
37 if (!is_first_frame && frame_number != 0) { 38 if (!is_first_frame && frame_number != 0) {
38 frame.resize(context.stream_len); 39 frame.resize(context.stream_len);
39 gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size()); 40 host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
40 return frame; 41 return frame;
41 } 42 }
42 43
@@ -155,8 +156,8 @@ const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& sta
155 frame.resize(encoded_header.size() + context.stream_len); 156 frame.resize(encoded_header.size() + context.stream_len);
156 std::memcpy(frame.data(), encoded_header.data(), encoded_header.size()); 157 std::memcpy(frame.data(), encoded_header.data(), encoded_header.size());
157 158
158 gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, 159 host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
159 frame.data() + encoded_header.size(), context.stream_len); 160 frame.data() + encoded_header.size(), context.stream_len);
160 161
161 return frame; 162 return frame;
162} 163}
diff --git a/src/video_core/command_classes/codecs/h264.h b/src/video_core/host1x/codecs/h264.h
index 261574364..5cc86454e 100644
--- a/src/video_core/command_classes/codecs/h264.h
+++ b/src/video_core/host1x/codecs/h264.h
@@ -8,10 +8,14 @@
8#include "common/bit_field.h" 8#include "common/bit_field.h"
9#include "common/common_funcs.h" 9#include "common/common_funcs.h"
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/command_classes/nvdec_common.h" 11#include "video_core/host1x/nvdec_common.h"
12 12
13namespace Tegra { 13namespace Tegra {
14class GPU; 14
15namespace Host1x {
16class Host1x;
17} // namespace Host1x
18
15namespace Decoder { 19namespace Decoder {
16 20
17class H264BitWriter { 21class H264BitWriter {
@@ -55,16 +59,16 @@ private:
55 59
56class H264 { 60class H264 {
57public: 61public:
58 explicit H264(GPU& gpu); 62 explicit H264(Host1x::Host1x& host1x);
59 ~H264(); 63 ~H264();
60 64
61 /// Compose the H264 frame for FFmpeg decoding 65 /// Compose the H264 frame for FFmpeg decoding
62 [[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state, 66 [[nodiscard]] const std::vector<u8>& ComposeFrame(
63 bool is_first_frame = false); 67 const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame = false);
64 68
65private: 69private:
66 std::vector<u8> frame; 70 std::vector<u8> frame;
67 GPU& gpu; 71 Host1x::Host1x& host1x;
68 72
69 struct H264ParameterSet { 73 struct H264ParameterSet {
70 s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00 74 s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00
diff --git a/src/video_core/command_classes/codecs/vp8.cpp b/src/video_core/host1x/codecs/vp8.cpp
index c83b9bbc2..28fb12cb8 100644
--- a/src/video_core/command_classes/codecs/vp8.cpp
+++ b/src/video_core/host1x/codecs/vp8.cpp
@@ -3,18 +3,18 @@
3 3
4#include <vector> 4#include <vector>
5 5
6#include "video_core/command_classes/codecs/vp8.h" 6#include "video_core/host1x/codecs/vp8.h"
7#include "video_core/gpu.h" 7#include "video_core/host1x/host1x.h"
8#include "video_core/memory_manager.h" 8#include "video_core/memory_manager.h"
9 9
10namespace Tegra::Decoder { 10namespace Tegra::Decoder {
11VP8::VP8(GPU& gpu_) : gpu(gpu_) {} 11VP8::VP8(Host1x::Host1x& host1x_) : host1x{host1x_} {}
12 12
13VP8::~VP8() = default; 13VP8::~VP8() = default;
14 14
15const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& state) { 15const std::vector<u8>& VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
16 VP8PictureInfo info; 16 VP8PictureInfo info;
17 gpu.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo)); 17 host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
18 18
19 const bool is_key_frame = info.key_frame == 1u; 19 const bool is_key_frame = info.key_frame == 1u;
20 const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size); 20 const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size);
@@ -45,7 +45,7 @@ const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& stat
45 frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f)); 45 frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f));
46 } 46 }
47 const u64 bitstream_offset = state.frame_bitstream_offset; 47 const u64 bitstream_offset = state.frame_bitstream_offset;
48 gpu.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size); 48 host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
49 49
50 return frame; 50 return frame;
51} 51}
diff --git a/src/video_core/command_classes/codecs/vp8.h b/src/video_core/host1x/codecs/vp8.h
index 3357667b0..5bf07ecab 100644
--- a/src/video_core/command_classes/codecs/vp8.h
+++ b/src/video_core/host1x/codecs/vp8.h
@@ -8,23 +8,28 @@
8 8
9#include "common/common_funcs.h" 9#include "common/common_funcs.h"
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/command_classes/nvdec_common.h" 11#include "video_core/host1x/nvdec_common.h"
12 12
13namespace Tegra { 13namespace Tegra {
14class GPU; 14
15namespace Host1x {
16class Host1x;
17} // namespace Host1x
18
15namespace Decoder { 19namespace Decoder {
16 20
17class VP8 { 21class VP8 {
18public: 22public:
19 explicit VP8(GPU& gpu); 23 explicit VP8(Host1x::Host1x& host1x);
20 ~VP8(); 24 ~VP8();
21 25
22 /// Compose the VP8 frame for FFmpeg decoding 26 /// Compose the VP8 frame for FFmpeg decoding
23 [[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state); 27 [[nodiscard]] const std::vector<u8>& ComposeFrame(
28 const Host1x::NvdecCommon::NvdecRegisters& state);
24 29
25private: 30private:
26 std::vector<u8> frame; 31 std::vector<u8> frame;
27 GPU& gpu; 32 Host1x::Host1x& host1x;
28 33
29 struct VP8PictureInfo { 34 struct VP8PictureInfo {
30 INSERT_PADDING_WORDS_NOINIT(14); 35 INSERT_PADDING_WORDS_NOINIT(14);
diff --git a/src/video_core/command_classes/codecs/vp9.cpp b/src/video_core/host1x/codecs/vp9.cpp
index c01431441..cf40c9012 100644
--- a/src/video_core/command_classes/codecs/vp9.cpp
+++ b/src/video_core/host1x/codecs/vp9.cpp
@@ -4,8 +4,8 @@
4#include <algorithm> // for std::copy 4#include <algorithm> // for std::copy
5#include <numeric> 5#include <numeric>
6#include "common/assert.h" 6#include "common/assert.h"
7#include "video_core/command_classes/codecs/vp9.h" 7#include "video_core/host1x/codecs/vp9.h"
8#include "video_core/gpu.h" 8#include "video_core/host1x/host1x.h"
9#include "video_core/memory_manager.h" 9#include "video_core/memory_manager.h"
10 10
11namespace Tegra::Decoder { 11namespace Tegra::Decoder {
@@ -236,7 +236,7 @@ constexpr std::array<u8, 254> map_lut{
236} 236}
237} // Anonymous namespace 237} // Anonymous namespace
238 238
239VP9::VP9(GPU& gpu_) : gpu{gpu_} {} 239VP9::VP9(Host1x::Host1x& host1x_) : host1x{host1x_} {}
240 240
241VP9::~VP9() = default; 241VP9::~VP9() = default;
242 242
@@ -355,9 +355,9 @@ void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_
355 } 355 }
356} 356}
357 357
358Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state) { 358Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) {
359 PictureInfo picture_info; 359 PictureInfo picture_info;
360 gpu.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo)); 360 host1x.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
361 Vp9PictureInfo vp9_info = picture_info.Convert(); 361 Vp9PictureInfo vp9_info = picture_info.Convert();
362 362
363 InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy); 363 InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy);
@@ -372,18 +372,19 @@ Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state)
372 372
373void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) { 373void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) {
374 EntropyProbs entropy; 374 EntropyProbs entropy;
375 gpu.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs)); 375 host1x.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
376 entropy.Convert(dst); 376 entropy.Convert(dst);
377} 377}
378 378
379Vp9FrameContainer VP9::GetCurrentFrame(const NvdecCommon::NvdecRegisters& state) { 379Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
380 Vp9FrameContainer current_frame{}; 380 Vp9FrameContainer current_frame{};
381 { 381 {
382 gpu.SyncGuestHost(); 382 // gpu.SyncGuestHost(); epic, why?
383 current_frame.info = GetVp9PictureInfo(state); 383 current_frame.info = GetVp9PictureInfo(state);
384 current_frame.bit_stream.resize(current_frame.info.bitstream_size); 384 current_frame.bit_stream.resize(current_frame.info.bitstream_size);
385 gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(), 385 host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
386 current_frame.info.bitstream_size); 386 current_frame.bit_stream.data(),
387 current_frame.info.bitstream_size);
387 } 388 }
388 if (!next_frame.bit_stream.empty()) { 389 if (!next_frame.bit_stream.empty()) {
389 Vp9FrameContainer temp{ 390 Vp9FrameContainer temp{
@@ -769,7 +770,7 @@ VpxBitStreamWriter VP9::ComposeUncompressedHeader() {
769 return uncomp_writer; 770 return uncomp_writer;
770} 771}
771 772
772void VP9::ComposeFrame(const NvdecCommon::NvdecRegisters& state) { 773void VP9::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
773 std::vector<u8> bitstream; 774 std::vector<u8> bitstream;
774 { 775 {
775 Vp9FrameContainer curr_frame = GetCurrentFrame(state); 776 Vp9FrameContainer curr_frame = GetCurrentFrame(state);
diff --git a/src/video_core/command_classes/codecs/vp9.h b/src/video_core/host1x/codecs/vp9.h
index ecc40e8b1..d4083e8d3 100644
--- a/src/video_core/command_classes/codecs/vp9.h
+++ b/src/video_core/host1x/codecs/vp9.h
@@ -8,11 +8,15 @@
8 8
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/stream.h" 10#include "common/stream.h"
11#include "video_core/command_classes/codecs/vp9_types.h" 11#include "video_core/host1x/codecs/vp9_types.h"
12#include "video_core/command_classes/nvdec_common.h" 12#include "video_core/host1x/nvdec_common.h"
13 13
14namespace Tegra { 14namespace Tegra {
15class GPU; 15
16namespace Host1x {
17class Host1x;
18} // namespace Host1x
19
16namespace Decoder { 20namespace Decoder {
17 21
18/// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the 22/// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the
@@ -106,7 +110,7 @@ private:
106 110
107class VP9 { 111class VP9 {
108public: 112public:
109 explicit VP9(GPU& gpu_); 113 explicit VP9(Host1x::Host1x& host1x);
110 ~VP9(); 114 ~VP9();
111 115
112 VP9(const VP9&) = delete; 116 VP9(const VP9&) = delete;
@@ -117,7 +121,7 @@ public:
117 121
118 /// Composes the VP9 frame from the GPU state information. 122 /// Composes the VP9 frame from the GPU state information.
119 /// Based on the official VP9 spec documentation 123 /// Based on the official VP9 spec documentation
120 void ComposeFrame(const NvdecCommon::NvdecRegisters& state); 124 void ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state);
121 125
122 /// Returns true if the most recent frame was a hidden frame. 126 /// Returns true if the most recent frame was a hidden frame.
123 [[nodiscard]] bool WasFrameHidden() const { 127 [[nodiscard]] bool WasFrameHidden() const {
@@ -162,19 +166,21 @@ private:
162 void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob); 166 void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
163 167
164 /// Returns VP9 information from NVDEC provided offset and size 168 /// Returns VP9 information from NVDEC provided offset and size
165 [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state); 169 [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(
170 const Host1x::NvdecCommon::NvdecRegisters& state);
166 171
167 /// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct 172 /// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct
168 void InsertEntropy(u64 offset, Vp9EntropyProbs& dst); 173 void InsertEntropy(u64 offset, Vp9EntropyProbs& dst);
169 174
170 /// Returns frame to be decoded after buffering 175 /// Returns frame to be decoded after buffering
171 [[nodiscard]] Vp9FrameContainer GetCurrentFrame(const NvdecCommon::NvdecRegisters& state); 176 [[nodiscard]] Vp9FrameContainer GetCurrentFrame(
177 const Host1x::NvdecCommon::NvdecRegisters& state);
172 178
173 /// Use NVDEC providied information to compose the headers for the current frame 179 /// Use NVDEC providied information to compose the headers for the current frame
174 [[nodiscard]] std::vector<u8> ComposeCompressedHeader(); 180 [[nodiscard]] std::vector<u8> ComposeCompressedHeader();
175 [[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader(); 181 [[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader();
176 182
177 GPU& gpu; 183 Host1x::Host1x& host1x;
178 std::vector<u8> frame; 184 std::vector<u8> frame;
179 185
180 std::array<s8, 4> loop_filter_ref_deltas{}; 186 std::array<s8, 4> loop_filter_ref_deltas{};
diff --git a/src/video_core/command_classes/codecs/vp9_types.h b/src/video_core/host1x/codecs/vp9_types.h
index bb3d8df6e..adad8ed7e 100644
--- a/src/video_core/command_classes/codecs/vp9_types.h
+++ b/src/video_core/host1x/codecs/vp9_types.h
@@ -9,7 +9,6 @@
9#include "common/common_types.h" 9#include "common/common_types.h"
10 10
11namespace Tegra { 11namespace Tegra {
12class GPU;
13 12
14namespace Decoder { 13namespace Decoder {
15struct Vp9FrameDimensions { 14struct Vp9FrameDimensions {
diff --git a/src/video_core/host1x/control.cpp b/src/video_core/host1x/control.cpp
new file mode 100644
index 000000000..dceefdb7f
--- /dev/null
+++ b/src/video_core/host1x/control.cpp
@@ -0,0 +1,33 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "common/assert.h"
5#include "video_core/host1x/control.h"
6#include "video_core/host1x/host1x.h"
7
8namespace Tegra::Host1x {
9
10Control::Control(Host1x& host1x_) : host1x(host1x_) {}
11
12Control::~Control() = default;
13
14void Control::ProcessMethod(Method method, u32 argument) {
15 switch (method) {
16 case Method::LoadSyncptPayload32:
17 syncpoint_value = argument;
18 break;
19 case Method::WaitSyncpt:
20 case Method::WaitSyncpt32:
21 Execute(argument);
22 break;
23 default:
24 UNIMPLEMENTED_MSG("Control method 0x{:X}", static_cast<u32>(method));
25 break;
26 }
27}
28
29void Control::Execute(u32 data) {
30 host1x.GetSyncpointManager().WaitHost(data, syncpoint_value);
31}
32
33} // namespace Tegra::Host1x
diff --git a/src/video_core/command_classes/host1x.h b/src/video_core/host1x/control.h
index bb48a4381..e117888a3 100644
--- a/src/video_core/command_classes/host1x.h
+++ b/src/video_core/host1x/control.h
@@ -1,15 +1,19 @@
1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
3 4
4#pragma once 5#pragma once
5 6
6#include "common/common_types.h" 7#include "common/common_types.h"
7 8
8namespace Tegra { 9namespace Tegra {
9class GPU; 10
11namespace Host1x {
12
13class Host1x;
10class Nvdec; 14class Nvdec;
11 15
12class Host1x { 16class Control {
13public: 17public:
14 enum class Method : u32 { 18 enum class Method : u32 {
15 WaitSyncpt = 0x8, 19 WaitSyncpt = 0x8,
@@ -17,8 +21,8 @@ public:
17 WaitSyncpt32 = 0x50, 21 WaitSyncpt32 = 0x50,
18 }; 22 };
19 23
20 explicit Host1x(GPU& gpu); 24 explicit Control(Host1x& host1x);
21 ~Host1x(); 25 ~Control();
22 26
23 /// Writes the method into the state, Invoke Execute() if encountered 27 /// Writes the method into the state, Invoke Execute() if encountered
24 void ProcessMethod(Method method, u32 argument); 28 void ProcessMethod(Method method, u32 argument);
@@ -28,7 +32,9 @@ private:
28 void Execute(u32 data); 32 void Execute(u32 data);
29 33
30 u32 syncpoint_value{}; 34 u32 syncpoint_value{};
31 GPU& gpu; 35 Host1x& host1x;
32}; 36};
33 37
38} // namespace Host1x
39
34} // namespace Tegra 40} // namespace Tegra
diff --git a/src/video_core/host1x/host1x.cpp b/src/video_core/host1x/host1x.cpp
new file mode 100644
index 000000000..7c317a85d
--- /dev/null
+++ b/src/video_core/host1x/host1x.cpp
@@ -0,0 +1,17 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "core/core.h"
5#include "video_core/host1x/host1x.h"
6
7namespace Tegra {
8
9namespace Host1x {
10
11Host1x::Host1x(Core::System& system_)
12 : system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12},
13 allocator{std::make_unique<Common::FlatAllocator<u32, 0, 32>>(1 << 12)} {}
14
15} // namespace Host1x
16
17} // namespace Tegra
diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h
new file mode 100644
index 000000000..57082ae54
--- /dev/null
+++ b/src/video_core/host1x/host1x.h
@@ -0,0 +1,57 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include "common/common_types.h"
7
8#include "common/address_space.h"
9#include "video_core/host1x/syncpoint_manager.h"
10#include "video_core/memory_manager.h"
11
12namespace Core {
13class System;
14} // namespace Core
15
16namespace Tegra {
17
18namespace Host1x {
19
20class Host1x {
21public:
22 explicit Host1x(Core::System& system);
23
24 SyncpointManager& GetSyncpointManager() {
25 return syncpoint_manager;
26 }
27
28 const SyncpointManager& GetSyncpointManager() const {
29 return syncpoint_manager;
30 }
31
32 Tegra::MemoryManager& MemoryManager() {
33 return memory_manager;
34 }
35
36 const Tegra::MemoryManager& MemoryManager() const {
37 return memory_manager;
38 }
39
40 Common::FlatAllocator<u32, 0, 32>& Allocator() {
41 return *allocator;
42 }
43
44 const Common::FlatAllocator<u32, 0, 32>& Allocator() const {
45 return *allocator;
46 }
47
48private:
49 Core::System& system;
50 SyncpointManager syncpoint_manager;
51 Tegra::MemoryManager memory_manager;
52 std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator;
53};
54
55} // namespace Host1x
56
57} // namespace Tegra
diff --git a/src/video_core/command_classes/nvdec.cpp b/src/video_core/host1x/nvdec.cpp
index 4fbbe3da6..a4bd5b79f 100644
--- a/src/video_core/command_classes/nvdec.cpp
+++ b/src/video_core/host1x/nvdec.cpp
@@ -2,15 +2,16 @@
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "common/assert.h" 4#include "common/assert.h"
5#include "video_core/command_classes/nvdec.h" 5#include "video_core/host1x/host1x.h"
6#include "video_core/gpu.h" 6#include "video_core/host1x/nvdec.h"
7 7
8namespace Tegra { 8namespace Tegra::Host1x {
9 9
10#define NVDEC_REG_INDEX(field_name) \ 10#define NVDEC_REG_INDEX(field_name) \
11 (offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64)) 11 (offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64))
12 12
13Nvdec::Nvdec(GPU& gpu_) : gpu(gpu_), state{}, codec(std::make_unique<Codec>(gpu, state)) {} 13Nvdec::Nvdec(Host1x& host1x_)
14 : host1x(host1x_), state{}, codec(std::make_unique<Codec>(host1x, state)) {}
14 15
15Nvdec::~Nvdec() = default; 16Nvdec::~Nvdec() = default;
16 17
@@ -44,4 +45,4 @@ void Nvdec::Execute() {
44 } 45 }
45} 46}
46 47
47} // namespace Tegra 48} // namespace Tegra::Host1x
diff --git a/src/video_core/command_classes/nvdec.h b/src/video_core/host1x/nvdec.h
index 488531fc6..3949d5181 100644
--- a/src/video_core/command_classes/nvdec.h
+++ b/src/video_core/host1x/nvdec.h
@@ -6,14 +6,17 @@
6#include <memory> 6#include <memory>
7#include <vector> 7#include <vector>
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "video_core/command_classes/codecs/codec.h" 9#include "video_core/host1x/codecs/codec.h"
10 10
11namespace Tegra { 11namespace Tegra {
12class GPU; 12
13namespace Host1x {
14
15class Host1x;
13 16
14class Nvdec { 17class Nvdec {
15public: 18public:
16 explicit Nvdec(GPU& gpu); 19 explicit Nvdec(Host1x& host1x);
17 ~Nvdec(); 20 ~Nvdec();
18 21
19 /// Writes the method into the state, Invoke Execute() if encountered 22 /// Writes the method into the state, Invoke Execute() if encountered
@@ -26,8 +29,11 @@ private:
26 /// Invoke codec to decode a frame 29 /// Invoke codec to decode a frame
27 void Execute(); 30 void Execute();
28 31
29 GPU& gpu; 32 Host1x& host1x;
30 NvdecCommon::NvdecRegisters state; 33 NvdecCommon::NvdecRegisters state;
31 std::unique_ptr<Codec> codec; 34 std::unique_ptr<Codec> codec;
32}; 35};
36
37} // namespace Host1x
38
33} // namespace Tegra 39} // namespace Tegra
diff --git a/src/video_core/command_classes/nvdec_common.h b/src/video_core/host1x/nvdec_common.h
index 521e5b52b..49d67ebbe 100644
--- a/src/video_core/command_classes/nvdec_common.h
+++ b/src/video_core/host1x/nvdec_common.h
@@ -7,7 +7,7 @@
7#include "common/common_funcs.h" 7#include "common/common_funcs.h"
8#include "common/common_types.h" 8#include "common/common_types.h"
9 9
10namespace Tegra::NvdecCommon { 10namespace Tegra::Host1x::NvdecCommon {
11 11
12enum class VideoCodec : u64 { 12enum class VideoCodec : u64 {
13 None = 0x0, 13 None = 0x0,
@@ -94,4 +94,4 @@ ASSERT_REG_POSITION(vp9_curr_frame_mvs_offset, 0x176);
94 94
95#undef ASSERT_REG_POSITION 95#undef ASSERT_REG_POSITION
96 96
97} // namespace Tegra::NvdecCommon 97} // namespace Tegra::Host1x::NvdecCommon
diff --git a/src/video_core/command_classes/sync_manager.cpp b/src/video_core/host1x/sync_manager.cpp
index 67e58046f..5ef9ea217 100644
--- a/src/video_core/command_classes/sync_manager.cpp
+++ b/src/video_core/host1x/sync_manager.cpp
@@ -3,10 +3,13 @@
3 3
4#include <algorithm> 4#include <algorithm>
5#include "sync_manager.h" 5#include "sync_manager.h"
6#include "video_core/gpu.h" 6#include "video_core/host1x/host1x.h"
7#include "video_core/host1x/syncpoint_manager.h"
7 8
8namespace Tegra { 9namespace Tegra {
9SyncptIncrManager::SyncptIncrManager(GPU& gpu_) : gpu(gpu_) {} 10namespace Host1x {
11
12SyncptIncrManager::SyncptIncrManager(Host1x& host1x_) : host1x(host1x_) {}
10SyncptIncrManager::~SyncptIncrManager() = default; 13SyncptIncrManager::~SyncptIncrManager() = default;
11 14
12void SyncptIncrManager::Increment(u32 id) { 15void SyncptIncrManager::Increment(u32 id) {
@@ -36,8 +39,12 @@ void SyncptIncrManager::IncrementAllDone() {
36 if (!increments[done_count].complete) { 39 if (!increments[done_count].complete) {
37 break; 40 break;
38 } 41 }
39 gpu.IncrementSyncPoint(increments[done_count].syncpt_id); 42 auto& syncpoint_manager = host1x.GetSyncpointManager();
43 syncpoint_manager.IncrementGuest(increments[done_count].syncpt_id);
44 syncpoint_manager.IncrementHost(increments[done_count].syncpt_id);
40 } 45 }
41 increments.erase(increments.begin(), increments.begin() + done_count); 46 increments.erase(increments.begin(), increments.begin() + done_count);
42} 47}
48
49} // namespace Host1x
43} // namespace Tegra 50} // namespace Tegra
diff --git a/src/video_core/command_classes/sync_manager.h b/src/video_core/host1x/sync_manager.h
index 6dfaae080..7bb77fa27 100644
--- a/src/video_core/command_classes/sync_manager.h
+++ b/src/video_core/host1x/sync_manager.h
@@ -8,7 +8,11 @@
8#include "common/common_types.h" 8#include "common/common_types.h"
9 9
10namespace Tegra { 10namespace Tegra {
11class GPU; 11
12namespace Host1x {
13
14class Host1x;
15
12struct SyncptIncr { 16struct SyncptIncr {
13 u32 id; 17 u32 id;
14 u32 class_id; 18 u32 class_id;
@@ -21,7 +25,7 @@ struct SyncptIncr {
21 25
22class SyncptIncrManager { 26class SyncptIncrManager {
23public: 27public:
24 explicit SyncptIncrManager(GPU& gpu); 28 explicit SyncptIncrManager(Host1x& host1x);
25 ~SyncptIncrManager(); 29 ~SyncptIncrManager();
26 30
27 /// Add syncpoint id and increment all 31 /// Add syncpoint id and increment all
@@ -41,7 +45,9 @@ private:
41 std::mutex increment_lock; 45 std::mutex increment_lock;
42 u32 current_id{}; 46 u32 current_id{};
43 47
44 GPU& gpu; 48 Host1x& host1x;
45}; 49};
46 50
51} // namespace Host1x
52
47} // namespace Tegra 53} // namespace Tegra
diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp
new file mode 100644
index 000000000..326e8355a
--- /dev/null
+++ b/src/video_core/host1x/syncpoint_manager.cpp
@@ -0,0 +1,96 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "common/microprofile.h"
5#include "video_core/host1x/syncpoint_manager.h"
6
7namespace Tegra {
8
9namespace Host1x {
10
11MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
12
13SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
14 std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value,
15 std::function<void()>&& action) {
16 if (syncpoint.load(std::memory_order_acquire) >= expected_value) {
17 action();
18 return {};
19 }
20
21 std::unique_lock lk(guard);
22 if (syncpoint.load(std::memory_order_relaxed) >= expected_value) {
23 action();
24 return {};
25 }
26 auto it = action_storage.begin();
27 while (it != action_storage.end()) {
28 if (it->expected_value >= expected_value) {
29 break;
30 }
31 ++it;
32 }
33 return action_storage.emplace(it, expected_value, std::move(action));
34}
35
36void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage,
37 ActionHandle& handle) {
38 std::unique_lock lk(guard);
39 action_storage.erase(handle);
40}
41
42void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) {
43 DeregisterAction(guest_action_storage[syncpoint_id], handle);
44}
45
46void SyncpointManager::DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle) {
47 DeregisterAction(host_action_storage[syncpoint_id], handle);
48}
49
50void SyncpointManager::IncrementGuest(u32 syncpoint_id) {
51 Increment(syncpoints_guest[syncpoint_id], wait_guest_cv, guest_action_storage[syncpoint_id]);
52}
53
54void SyncpointManager::IncrementHost(u32 syncpoint_id) {
55 Increment(syncpoints_host[syncpoint_id], wait_host_cv, host_action_storage[syncpoint_id]);
56}
57
58void SyncpointManager::WaitGuest(u32 syncpoint_id, u32 expected_value) {
59 Wait(syncpoints_guest[syncpoint_id], wait_guest_cv, expected_value);
60}
61
62void SyncpointManager::WaitHost(u32 syncpoint_id, u32 expected_value) {
63 MICROPROFILE_SCOPE(GPU_wait);
64 Wait(syncpoints_host[syncpoint_id], wait_host_cv, expected_value);
65}
66
67void SyncpointManager::Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
68 std::list<RegisteredAction>& action_storage) {
69 auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1};
70
71 std::unique_lock lk(guard);
72 auto it = action_storage.begin();
73 while (it != action_storage.end()) {
74 if (it->expected_value > new_value) {
75 break;
76 }
77 it->action();
78 it = action_storage.erase(it);
79 }
80 wait_cv.notify_all();
81}
82
83void SyncpointManager::Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
84 u32 expected_value) {
85 const auto pred = [&]() { return syncpoint.load(std::memory_order_acquire) >= expected_value; };
86 if (pred()) {
87 return;
88 }
89
90 std::unique_lock lk(guard);
91 wait_cv.wait(lk, pred);
92}
93
94} // namespace Host1x
95
96} // namespace Tegra
diff --git a/src/video_core/host1x/syncpoint_manager.h b/src/video_core/host1x/syncpoint_manager.h
new file mode 100644
index 000000000..50a264e23
--- /dev/null
+++ b/src/video_core/host1x/syncpoint_manager.h
@@ -0,0 +1,98 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include <array>
7#include <atomic>
8#include <condition_variable>
9#include <functional>
10#include <list>
11#include <mutex>
12
13#include "common/common_types.h"
14
15namespace Tegra {
16
17namespace Host1x {
18
19class SyncpointManager {
20public:
21 u32 GetGuestSyncpointValue(u32 id) const {
22 return syncpoints_guest[id].load(std::memory_order_acquire);
23 }
24
25 u32 GetHostSyncpointValue(u32 id) const {
26 return syncpoints_host[id].load(std::memory_order_acquire);
27 }
28
29 struct RegisteredAction {
30 explicit RegisteredAction(u32 expected_value_, std::function<void()>&& action_)
31 : expected_value{expected_value_}, action{std::move(action_)} {}
32 u32 expected_value;
33 std::function<void()> action;
34 };
35 using ActionHandle = std::list<RegisteredAction>::iterator;
36
37 template <typename Func>
38 ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
39 std::function<void()> func(action);
40 return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id],
41 expected_value, std::move(func));
42 }
43
44 template <typename Func>
45 ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
46 std::function<void()> func(action);
47 return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id],
48 expected_value, std::move(func));
49 }
50
51 void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle);
52
53 void DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle);
54
55 void IncrementGuest(u32 syncpoint_id);
56
57 void IncrementHost(u32 syncpoint_id);
58
59 void WaitGuest(u32 syncpoint_id, u32 expected_value);
60
61 void WaitHost(u32 syncpoint_id, u32 expected_value);
62
63 bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) const {
64 return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
65 }
66
67 bool IsReadyHost(u32 syncpoint_id, u32 expected_value) const {
68 return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
69 }
70
71private:
72 void Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
73 std::list<RegisteredAction>& action_storage);
74
75 ActionHandle RegisterAction(std::atomic<u32>& syncpoint,
76 std::list<RegisteredAction>& action_storage, u32 expected_value,
77 std::function<void()>&& action);
78
79 void DeregisterAction(std::list<RegisteredAction>& action_storage, ActionHandle& handle);
80
81 void Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, u32 expected_value);
82
83 static constexpr size_t NUM_MAX_SYNCPOINTS = 192;
84
85 std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_guest{};
86 std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_host{};
87
88 std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> guest_action_storage;
89 std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> host_action_storage;
90
91 std::mutex guard;
92 std::condition_variable wait_guest_cv;
93 std::condition_variable wait_host_cv;
94};
95
96} // namespace Host1x
97
98} // namespace Tegra
diff --git a/src/video_core/command_classes/vic.cpp b/src/video_core/host1x/vic.cpp
index 7c17df353..ac0b7d20e 100644
--- a/src/video_core/command_classes/vic.cpp
+++ b/src/video_core/host1x/vic.cpp
@@ -18,14 +18,17 @@ extern "C" {
18#include "common/bit_field.h" 18#include "common/bit_field.h"
19#include "common/logging/log.h" 19#include "common/logging/log.h"
20 20
21#include "video_core/command_classes/nvdec.h"
22#include "video_core/command_classes/vic.h"
23#include "video_core/engines/maxwell_3d.h" 21#include "video_core/engines/maxwell_3d.h"
24#include "video_core/gpu.h" 22#include "video_core/host1x/host1x.h"
23#include "video_core/host1x/nvdec.h"
24#include "video_core/host1x/vic.h"
25#include "video_core/memory_manager.h" 25#include "video_core/memory_manager.h"
26#include "video_core/textures/decoders.h" 26#include "video_core/textures/decoders.h"
27 27
28namespace Tegra { 28namespace Tegra {
29
30namespace Host1x {
31
29namespace { 32namespace {
30enum class VideoPixelFormat : u64_le { 33enum class VideoPixelFormat : u64_le {
31 RGBA8 = 0x1f, 34 RGBA8 = 0x1f,
@@ -46,8 +49,8 @@ union VicConfig {
46 BitField<46, 14, u64_le> surface_height_minus1; 49 BitField<46, 14, u64_le> surface_height_minus1;
47}; 50};
48 51
49Vic::Vic(GPU& gpu_, std::shared_ptr<Nvdec> nvdec_processor_) 52Vic::Vic(Host1x& host1x_, std::shared_ptr<Nvdec> nvdec_processor_)
50 : gpu(gpu_), 53 : host1x(host1x_),
51 nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {} 54 nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {}
52 55
53Vic::~Vic() = default; 56Vic::~Vic() = default;
@@ -78,7 +81,7 @@ void Vic::Execute() {
78 LOG_ERROR(Service_NVDRV, "VIC Luma address not set."); 81 LOG_ERROR(Service_NVDRV, "VIC Luma address not set.");
79 return; 82 return;
80 } 83 }
81 const VicConfig config{gpu.MemoryManager().Read<u64>(config_struct_address + 0x20)}; 84 const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)};
82 const AVFramePtr frame_ptr = nvdec_processor->GetFrame(); 85 const AVFramePtr frame_ptr = nvdec_processor->GetFrame();
83 const auto* frame = frame_ptr.get(); 86 const auto* frame = frame_ptr.get();
84 if (!frame) { 87 if (!frame) {
@@ -153,15 +156,16 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
153 const u32 block_height = static_cast<u32>(config.block_linear_height_log2); 156 const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
154 const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0); 157 const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
155 luma_buffer.resize(size); 158 luma_buffer.resize(size);
156 Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(), 159 std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height);
157 converted_frame_buf_addr, block_height, 0, 0); 160 Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height,
161 block_height, 0, width * 4);
158 162
159 gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); 163 host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
160 } else { 164 } else {
161 // send pitch linear frame 165 // send pitch linear frame
162 const size_t linear_size = width * height * 4; 166 const size_t linear_size = width * height * 4;
163 gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr, 167 host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
164 linear_size); 168 linear_size);
165 } 169 }
166} 170}
167 171
@@ -189,8 +193,8 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
189 luma_buffer[dst + x] = luma_src[src + x]; 193 luma_buffer[dst + x] = luma_src[src + x];
190 } 194 }
191 } 195 }
192 gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), 196 host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
193 luma_buffer.size()); 197 luma_buffer.size());
194 198
195 // Chroma 199 // Chroma
196 const std::size_t half_height = frame_height / 2; 200 const std::size_t half_height = frame_height / 2;
@@ -231,8 +235,10 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
231 ASSERT(false); 235 ASSERT(false);
232 break; 236 break;
233 } 237 }
234 gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(), 238 host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
235 chroma_buffer.size()); 239 chroma_buffer.size());
236} 240}
237 241
242} // namespace Host1x
243
238} // namespace Tegra 244} // namespace Tegra
diff --git a/src/video_core/command_classes/vic.h b/src/video_core/host1x/vic.h
index 010daa6b6..2b78786e8 100644
--- a/src/video_core/command_classes/vic.h
+++ b/src/video_core/host1x/vic.h
@@ -10,7 +10,10 @@
10struct SwsContext; 10struct SwsContext;
11 11
12namespace Tegra { 12namespace Tegra {
13class GPU; 13
14namespace Host1x {
15
16class Host1x;
14class Nvdec; 17class Nvdec;
15union VicConfig; 18union VicConfig;
16 19
@@ -25,7 +28,7 @@ public:
25 SetOutputSurfaceChromaUnusedOffset = 0x1ca 28 SetOutputSurfaceChromaUnusedOffset = 0x1ca
26 }; 29 };
27 30
28 explicit Vic(GPU& gpu, std::shared_ptr<Nvdec> nvdec_processor); 31 explicit Vic(Host1x& host1x, std::shared_ptr<Nvdec> nvdec_processor);
29 32
30 ~Vic(); 33 ~Vic();
31 34
@@ -39,8 +42,8 @@ private:
39 42
40 void WriteYUVFrame(const AVFrame* frame, const VicConfig& config); 43 void WriteYUVFrame(const AVFrame* frame, const VicConfig& config);
41 44
42 GPU& gpu; 45 Host1x& host1x;
43 std::shared_ptr<Tegra::Nvdec> nvdec_processor; 46 std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor;
44 47
45 /// Avoid reallocation of the following buffers every frame, as their 48 /// Avoid reallocation of the following buffers every frame, as their
46 /// size does not change during a stream 49 /// size does not change during a stream
@@ -58,4 +61,6 @@ private:
58 s32 scaler_height{}; 61 s32 scaler_height{};
59}; 62};
60 63
64} // namespace Host1x
65
61} // namespace Tegra 66} // namespace Tegra
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp
index 43f8b5904..f61d5998e 100644
--- a/src/video_core/macro/macro.cpp
+++ b/src/video_core/macro/macro.cpp
@@ -8,6 +8,7 @@
8 8
9#include <boost/container_hash/hash.hpp> 9#include <boost/container_hash/hash.hpp>
10 10
11#include <fstream>
11#include "common/assert.h" 12#include "common/assert.h"
12#include "common/fs/fs.h" 13#include "common/fs/fs.h"
13#include "common/fs/path_util.h" 14#include "common/fs/path_util.h"
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index bf9eb735d..cca401c74 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -7,6 +7,7 @@
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/logging/log.h" 8#include "common/logging/log.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/device_memory.h"
10#include "core/hle/kernel/k_page_table.h" 11#include "core/hle/kernel/k_page_table.h"
11#include "core/hle/kernel/k_process.h" 12#include "core/hle/kernel/k_process.h"
12#include "core/memory.h" 13#include "core/memory.h"
@@ -16,172 +17,198 @@
16 17
17namespace Tegra { 18namespace Tegra {
18 19
19MemoryManager::MemoryManager(Core::System& system_) 20std::atomic<size_t> MemoryManager::unique_identifier_generator{};
20 : system{system_}, page_table(page_table_size) {} 21
22MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_,
23 u64 page_bits_)
24 : system{system_}, memory{system.Memory()}, device_memory{system.DeviceMemory()},
25 address_space_bits{address_space_bits_}, page_bits{page_bits_}, big_page_bits{big_page_bits_},
26 entries{}, big_entries{}, page_table{address_space_bits, address_space_bits + page_bits - 38,
27 page_bits != big_page_bits ? page_bits : 0},
28 unique_identifier{unique_identifier_generator.fetch_add(1, std::memory_order_acq_rel)} {
29 address_space_size = 1ULL << address_space_bits;
30 page_size = 1ULL << page_bits;
31 page_mask = page_size - 1ULL;
32 big_page_size = 1ULL << big_page_bits;
33 big_page_mask = big_page_size - 1ULL;
34 const u64 page_table_bits = address_space_bits - page_bits;
35 const u64 big_page_table_bits = address_space_bits - big_page_bits;
36 const u64 page_table_size = 1ULL << page_table_bits;
37 const u64 big_page_table_size = 1ULL << big_page_table_bits;
38 page_table_mask = page_table_size - 1;
39 big_page_table_mask = big_page_table_size - 1;
40
41 big_entries.resize(big_page_table_size / 32, 0);
42 big_page_table_cpu.resize(big_page_table_size);
43 big_page_continous.resize(big_page_table_size / continous_bits, 0);
44 entries.resize(page_table_size / 32, 0);
45}
21 46
22MemoryManager::~MemoryManager() = default; 47MemoryManager::~MemoryManager() = default;
23 48
24void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { 49template <bool is_big_page>
25 rasterizer = rasterizer_; 50MemoryManager::EntryType MemoryManager::GetEntry(size_t position) const {
26} 51 if constexpr (is_big_page) {
27 52 position = position >> big_page_bits;
28GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { 53 const u64 entry_mask = big_entries[position / 32];
29 u64 remaining_size{size}; 54 const size_t sub_index = position % 32;
30 for (u64 offset{}; offset < size; offset += page_size) { 55 return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL);
31 if (remaining_size < page_size) { 56 } else {
32 SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size); 57 position = position >> page_bits;
33 } else { 58 const u64 entry_mask = entries[position / 32];
34 SetPageEntry(gpu_addr + offset, page_entry + offset); 59 const size_t sub_index = position % 32;
35 } 60 return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL);
36 remaining_size -= page_size;
37 } 61 }
38 return gpu_addr;
39} 62}
40 63
41GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) { 64template <bool is_big_page>
42 const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first); 65void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) {
43 if (it != map_ranges.end() && it->first == gpu_addr) { 66 if constexpr (is_big_page) {
44 it->second = size; 67 position = position >> big_page_bits;
68 const u64 entry_mask = big_entries[position / 32];
69 const size_t sub_index = position % 32;
70 big_entries[position / 32] =
71 (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2);
45 } else { 72 } else {
46 map_ranges.insert(it, MapRange{gpu_addr, size}); 73 position = position >> page_bits;
74 const u64 entry_mask = entries[position / 32];
75 const size_t sub_index = position % 32;
76 entries[position / 32] =
77 (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2);
47 } 78 }
48 return UpdateRange(gpu_addr, cpu_addr, size);
49} 79}
50 80
51GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) { 81inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const {
52 return Map(cpu_addr, *FindFreeRange(size, align), size); 82 const u64 entry_mask = big_page_continous[big_page_index / continous_bits];
83 const size_t sub_index = big_page_index % continous_bits;
84 return ((entry_mask >> sub_index) & 0x1ULL) != 0;
53} 85}
54 86
55GPUVAddr MemoryManager::MapAllocate32(VAddr cpu_addr, std::size_t size) { 87inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value) {
56 const std::optional<GPUVAddr> gpu_addr = FindFreeRange(size, 1, true); 88 const u64 continous_mask = big_page_continous[big_page_index / continous_bits];
57 ASSERT(gpu_addr); 89 const size_t sub_index = big_page_index % continous_bits;
58 return Map(cpu_addr, *gpu_addr, size); 90 big_page_continous[big_page_index / continous_bits] =
91 (~(1ULL << sub_index) & continous_mask) | (value ? 1ULL << sub_index : 0);
59} 92}
60 93
61void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { 94template <MemoryManager::EntryType entry_type>
62 if (size == 0) { 95GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
63 return; 96 size_t size) {
64 } 97 u64 remaining_size{size};
65 const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first); 98 if constexpr (entry_type == EntryType::Mapped) {
66 if (it != map_ranges.end()) { 99 page_table.ReserveRange(gpu_addr, size);
67 ASSERT(it->first == gpu_addr);
68 map_ranges.erase(it);
69 } else {
70 ASSERT_MSG(false, "Unmapping non-existent GPU address=0x{:x}", gpu_addr);
71 }
72 const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
73
74 for (const auto& [map_addr, map_size] : submapped_ranges) {
75 // Flush and invalidate through the GPU interface, to be asynchronous if possible.
76 const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr);
77 ASSERT(cpu_addr);
78
79 rasterizer->UnmapMemory(*cpu_addr, map_size);
80 } 100 }
81
82 UpdateRange(gpu_addr, PageEntry::State::Unmapped, size);
83}
84
85std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) {
86 for (u64 offset{}; offset < size; offset += page_size) { 101 for (u64 offset{}; offset < size; offset += page_size) {
87 if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) { 102 const GPUVAddr current_gpu_addr = gpu_addr + offset;
88 return std::nullopt; 103 [[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr);
104 SetEntry<false>(current_gpu_addr, entry_type);
105 if (current_entry_type != entry_type) {
106 rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size);
107 }
108 if constexpr (entry_type == EntryType::Mapped) {
109 const VAddr current_cpu_addr = cpu_addr + offset;
110 const auto index = PageEntryIndex<false>(current_gpu_addr);
111 const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
112 page_table[index] = sub_value;
89 } 113 }
114 remaining_size -= page_size;
90 } 115 }
91 116 return gpu_addr;
92 return UpdateRange(gpu_addr, PageEntry::State::Allocated, size);
93}
94
95GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) {
96 return *AllocateFixed(*FindFreeRange(size, align), size);
97} 117}
98 118
99void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) { 119template <MemoryManager::EntryType entry_type>
100 if (!page_entry.IsValid()) { 120GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
101 return; 121 size_t size) {
122 u64 remaining_size{size};
123 for (u64 offset{}; offset < size; offset += big_page_size) {
124 const GPUVAddr current_gpu_addr = gpu_addr + offset;
125 [[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr);
126 SetEntry<true>(current_gpu_addr, entry_type);
127 if (current_entry_type != entry_type) {
128 rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size);
129 }
130 if constexpr (entry_type == EntryType::Mapped) {
131 const VAddr current_cpu_addr = cpu_addr + offset;
132 const auto index = PageEntryIndex<true>(current_gpu_addr);
133 const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
134 big_page_table_cpu[index] = sub_value;
135 const bool is_continous = ([&] {
136 uintptr_t base_ptr{
137 reinterpret_cast<uintptr_t>(memory.GetPointerSilent(current_cpu_addr))};
138 if (base_ptr == 0) {
139 return false;
140 }
141 for (VAddr start_cpu = current_cpu_addr + page_size;
142 start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) {
143 base_ptr += page_size;
144 auto next_ptr = reinterpret_cast<uintptr_t>(memory.GetPointerSilent(start_cpu));
145 if (next_ptr == 0 || base_ptr != next_ptr) {
146 return false;
147 }
148 }
149 return true;
150 })();
151 SetBigPageContinous(index, is_continous);
152 }
153 remaining_size -= big_page_size;
102 } 154 }
103 155 return gpu_addr;
104 ASSERT(system.CurrentProcess()
105 ->PageTable()
106 .LockForDeviceAddressSpace(page_entry.ToAddress(), size)
107 .IsSuccess());
108} 156}
109 157
110void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) { 158void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
111 if (!page_entry.IsValid()) { 159 rasterizer = rasterizer_;
112 return;
113 }
114
115 ASSERT(system.CurrentProcess()
116 ->PageTable()
117 .UnlockForDeviceAddressSpace(page_entry.ToAddress(), size)
118 .IsSuccess());
119} 160}
120 161
121PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const { 162GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size,
122 return page_table[PageEntryIndex(gpu_addr)]; 163 bool is_big_pages) {
164 if (is_big_pages) [[likely]] {
165 return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size);
166 }
167 return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size);
123} 168}
124 169
125void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { 170GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) {
126 // TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to 171 if (is_big_pages) [[likely]] {
127 // improper tracking, but should be fixed in the future. 172 return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size);
128
129 //// Unlock the old page
130 // TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size);
131
132 //// Lock the new page
133 // TryLockPage(page_entry, size);
134 auto& current_page = page_table[PageEntryIndex(gpu_addr)];
135
136 if ((!current_page.IsValid() && page_entry.IsValid()) ||
137 current_page.ToAddress() != page_entry.ToAddress()) {
138 rasterizer->ModifyGPUMemory(gpu_addr, size);
139 } 173 }
140 174 return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size);
141 current_page = page_entry;
142} 175}
143 176
144std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align, 177void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
145 bool start_32bit_address) const { 178 if (size == 0) {
146 if (!align) { 179 return;
147 align = page_size;
148 } else {
149 align = Common::AlignUp(align, page_size);
150 } 180 }
181 const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
151 182
152 u64 available_size{}; 183 for (const auto& [map_addr, map_size] : submapped_ranges) {
153 GPUVAddr gpu_addr{start_32bit_address ? address_space_start_low : address_space_start}; 184 // Flush and invalidate through the GPU interface, to be asynchronous if possible.
154 while (gpu_addr + available_size < address_space_size) { 185 const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr);
155 if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) { 186 ASSERT(cpu_addr);
156 available_size += page_size;
157
158 if (available_size >= size) {
159 return gpu_addr;
160 }
161 } else {
162 gpu_addr += available_size + page_size;
163 available_size = 0;
164 187
165 const auto remainder{gpu_addr % align}; 188 rasterizer->UnmapMemory(*cpu_addr, map_size);
166 if (remainder) {
167 gpu_addr = (gpu_addr - remainder) + align;
168 }
169 }
170 } 189 }
171 190
172 return std::nullopt; 191 BigPageTableOp<EntryType::Free>(gpu_addr, 0, size);
192 PageTableOp<EntryType::Free>(gpu_addr, 0, size);
173} 193}
174 194
175std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { 195std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
176 if (gpu_addr == 0) { 196 if (!IsWithinGPUAddressRange(gpu_addr)) [[unlikely]] {
177 return std::nullopt; 197 return std::nullopt;
178 } 198 }
179 const auto page_entry{GetPageEntry(gpu_addr)}; 199 if (GetEntry<true>(gpu_addr) != EntryType::Mapped) [[unlikely]] {
180 if (!page_entry.IsValid()) { 200 if (GetEntry<false>(gpu_addr) != EntryType::Mapped) {
181 return std::nullopt; 201 return std::nullopt;
202 }
203
204 const VAddr cpu_addr_base = static_cast<VAddr>(page_table[PageEntryIndex<false>(gpu_addr)])
205 << cpu_page_bits;
206 return cpu_addr_base + (gpu_addr & page_mask);
182 } 207 }
183 208
184 return page_entry.ToAddress() + (gpu_addr & page_mask); 209 const VAddr cpu_addr_base =
210 static_cast<VAddr>(big_page_table_cpu[PageEntryIndex<true>(gpu_addr)]) << cpu_page_bits;
211 return cpu_addr_base + (gpu_addr & big_page_mask);
185} 212}
186 213
187std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const { 214std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const {
@@ -189,7 +216,7 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t s
189 const size_t page_last{(addr + size + page_size - 1) >> page_bits}; 216 const size_t page_last{(addr + size + page_size - 1) >> page_bits};
190 while (page_index < page_last) { 217 while (page_index < page_last) {
191 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 218 const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
192 if (page_addr && *page_addr != 0) { 219 if (page_addr) {
193 return page_addr; 220 return page_addr;
194 } 221 }
195 ++page_index; 222 ++page_index;
@@ -232,126 +259,298 @@ template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data);
232template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data); 259template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data);
233 260
234u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) { 261u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) {
235 if (!GetPageEntry(gpu_addr).IsValid()) {
236 return {};
237 }
238
239 const auto address{GpuToCpuAddress(gpu_addr)}; 262 const auto address{GpuToCpuAddress(gpu_addr)};
240 if (!address) { 263 if (!address) {
241 return {}; 264 return {};
242 } 265 }
243 266
244 return system.Memory().GetPointer(*address); 267 return memory.GetPointer(*address);
245} 268}
246 269
247const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { 270const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
248 if (!GetPageEntry(gpu_addr).IsValid()) {
249 return {};
250 }
251
252 const auto address{GpuToCpuAddress(gpu_addr)}; 271 const auto address{GpuToCpuAddress(gpu_addr)};
253 if (!address) { 272 if (!address) {
254 return {}; 273 return {};
255 } 274 }
256 275
257 return system.Memory().GetPointer(*address); 276 return memory.GetPointer(*address);
258} 277}
259 278
260size_t MemoryManager::BytesToMapEnd(GPUVAddr gpu_addr) const noexcept { 279#ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining.
261 auto it = std::ranges::upper_bound(map_ranges, gpu_addr, {}, &MapRange::first); 280#pragma inline_recursion(on)
262 --it; 281#endif
263 return it->second - (gpu_addr - it->first); 282
264} 283template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
265 284inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size,
266void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, 285 FuncMapped&& func_mapped, FuncReserved&& func_reserved,
267 bool is_safe) const { 286 FuncUnmapped&& func_unmapped) const {
287 static constexpr bool BOOL_BREAK_MAPPED = std::is_same_v<FuncMapped, bool>;
288 static constexpr bool BOOL_BREAK_RESERVED = std::is_same_v<FuncReserved, bool>;
289 static constexpr bool BOOL_BREAK_UNMAPPED = std::is_same_v<FuncUnmapped, bool>;
290 u64 used_page_size;
291 u64 used_page_mask;
292 u64 used_page_bits;
293 if constexpr (is_big_pages) {
294 used_page_size = big_page_size;
295 used_page_mask = big_page_mask;
296 used_page_bits = big_page_bits;
297 } else {
298 used_page_size = page_size;
299 used_page_mask = page_mask;
300 used_page_bits = page_bits;
301 }
268 std::size_t remaining_size{size}; 302 std::size_t remaining_size{size};
269 std::size_t page_index{gpu_src_addr >> page_bits}; 303 std::size_t page_index{gpu_src_addr >> used_page_bits};
270 std::size_t page_offset{gpu_src_addr & page_mask}; 304 std::size_t page_offset{gpu_src_addr & used_page_mask};
305 GPUVAddr current_address = gpu_src_addr;
271 306
272 while (remaining_size > 0) { 307 while (remaining_size > 0) {
273 const std::size_t copy_amount{ 308 const std::size_t copy_amount{
274 std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; 309 std::min(static_cast<std::size_t>(used_page_size) - page_offset, remaining_size)};
275 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 310 auto entry = GetEntry<is_big_pages>(current_address);
276 if (page_addr && *page_addr != 0) { 311 if (entry == EntryType::Mapped) [[likely]] {
277 const auto src_addr{*page_addr + page_offset}; 312 if constexpr (BOOL_BREAK_MAPPED) {
278 if (is_safe) { 313 if (func_mapped(page_index, page_offset, copy_amount)) {
279 // Flush must happen on the rasterizer interface, such that memory is always 314 return;
280 // synchronous when it is read (even when in asynchronous GPU mode). 315 }
281 // Fixes Dead Cells title menu. 316 } else {
282 rasterizer->FlushRegion(src_addr, copy_amount); 317 func_mapped(page_index, page_offset, copy_amount);
283 } 318 }
284 system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
285 } else {
286 std::memset(dest_buffer, 0, copy_amount);
287 }
288 319
320 } else if (entry == EntryType::Reserved) {
321 if constexpr (BOOL_BREAK_RESERVED) {
322 if (func_reserved(page_index, page_offset, copy_amount)) {
323 return;
324 }
325 } else {
326 func_reserved(page_index, page_offset, copy_amount);
327 }
328
329 } else [[unlikely]] {
330 if constexpr (BOOL_BREAK_UNMAPPED) {
331 if (func_unmapped(page_index, page_offset, copy_amount)) {
332 return;
333 }
334 } else {
335 func_unmapped(page_index, page_offset, copy_amount);
336 }
337 }
289 page_index++; 338 page_index++;
290 page_offset = 0; 339 page_offset = 0;
291 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
292 remaining_size -= copy_amount; 340 remaining_size -= copy_amount;
341 current_address += copy_amount;
293 } 342 }
294} 343}
295 344
345template <bool is_safe>
346void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer,
347 std::size_t size) const {
348 auto set_to_zero = [&]([[maybe_unused]] std::size_t page_index,
349 [[maybe_unused]] std::size_t offset, std::size_t copy_amount) {
350 std::memset(dest_buffer, 0, copy_amount);
351 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
352 };
353 auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
354 const VAddr cpu_addr_base =
355 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
356 if constexpr (is_safe) {
357 rasterizer->FlushRegion(cpu_addr_base, copy_amount);
358 }
359 u8* physical = memory.GetPointer(cpu_addr_base);
360 std::memcpy(dest_buffer, physical, copy_amount);
361 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
362 };
363 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
364 const VAddr cpu_addr_base =
365 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
366 if constexpr (is_safe) {
367 rasterizer->FlushRegion(cpu_addr_base, copy_amount);
368 }
369 if (!IsBigPageContinous(page_index)) [[unlikely]] {
370 memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount);
371 } else {
372 u8* physical = memory.GetPointer(cpu_addr_base);
373 std::memcpy(dest_buffer, physical, copy_amount);
374 }
375 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
376 };
377 auto read_short_pages = [&](std::size_t page_index, std::size_t offset,
378 std::size_t copy_amount) {
379 GPUVAddr base = (page_index << big_page_bits) + offset;
380 MemoryOperation<false>(base, copy_amount, mapped_normal, set_to_zero, set_to_zero);
381 };
382 MemoryOperation<true>(gpu_src_addr, size, mapped_big, set_to_zero, read_short_pages);
383}
384
296void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { 385void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const {
297 ReadBlockImpl(gpu_src_addr, dest_buffer, size, true); 386 ReadBlockImpl<true>(gpu_src_addr, dest_buffer, size);
298} 387}
299 388
300void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, 389void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer,
301 const std::size_t size) const { 390 const std::size_t size) const {
302 ReadBlockImpl(gpu_src_addr, dest_buffer, size, false); 391 ReadBlockImpl<false>(gpu_src_addr, dest_buffer, size);
303} 392}
304 393
305void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, 394template <bool is_safe>
306 bool is_safe) { 395void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer,
307 std::size_t remaining_size{size}; 396 std::size_t size) {
308 std::size_t page_index{gpu_dest_addr >> page_bits}; 397 auto just_advance = [&]([[maybe_unused]] std::size_t page_index,
309 std::size_t page_offset{gpu_dest_addr & page_mask}; 398 [[maybe_unused]] std::size_t offset, std::size_t copy_amount) {
310 399 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
311 while (remaining_size > 0) { 400 };
312 const std::size_t copy_amount{ 401 auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
313 std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; 402 const VAddr cpu_addr_base =
314 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 403 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
315 if (page_addr && *page_addr != 0) { 404 if constexpr (is_safe) {
316 const auto dest_addr{*page_addr + page_offset}; 405 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
317
318 if (is_safe) {
319 // Invalidate must happen on the rasterizer interface, such that memory is always
320 // synchronous when it is written (even when in asynchronous GPU mode).
321 rasterizer->InvalidateRegion(dest_addr, copy_amount);
322 }
323 system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
324 } 406 }
325 407 u8* physical = memory.GetPointer(cpu_addr_base);
326 page_index++; 408 std::memcpy(physical, src_buffer, copy_amount);
327 page_offset = 0;
328 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; 409 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
329 remaining_size -= copy_amount; 410 };
330 } 411 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
412 const VAddr cpu_addr_base =
413 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
414 if constexpr (is_safe) {
415 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
416 }
417 if (!IsBigPageContinous(page_index)) [[unlikely]] {
418 memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount);
419 } else {
420 u8* physical = memory.GetPointer(cpu_addr_base);
421 std::memcpy(physical, src_buffer, copy_amount);
422 }
423 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
424 };
425 auto write_short_pages = [&](std::size_t page_index, std::size_t offset,
426 std::size_t copy_amount) {
427 GPUVAddr base = (page_index << big_page_bits) + offset;
428 MemoryOperation<false>(base, copy_amount, mapped_normal, just_advance, just_advance);
429 };
430 MemoryOperation<true>(gpu_dest_addr, size, mapped_big, just_advance, write_short_pages);
331} 431}
332 432
333void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { 433void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) {
334 WriteBlockImpl(gpu_dest_addr, src_buffer, size, true); 434 WriteBlockImpl<true>(gpu_dest_addr, src_buffer, size);
335} 435}
336 436
337void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, 437void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer,
338 std::size_t size) { 438 std::size_t size) {
339 WriteBlockImpl(gpu_dest_addr, src_buffer, size, false); 439 WriteBlockImpl<false>(gpu_dest_addr, src_buffer, size);
340} 440}
341 441
342void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const { 442void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const {
343 size_t remaining_size{size}; 443 auto do_nothing = [&]([[maybe_unused]] std::size_t page_index,
344 size_t page_index{gpu_addr >> page_bits}; 444 [[maybe_unused]] std::size_t offset,
345 size_t page_offset{gpu_addr & page_mask}; 445 [[maybe_unused]] std::size_t copy_amount) {};
346 while (remaining_size > 0) { 446
347 const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; 447 auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
348 if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { 448 const VAddr cpu_addr_base =
349 rasterizer->FlushRegion(*page_addr + page_offset, num_bytes); 449 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
450 rasterizer->FlushRegion(cpu_addr_base, copy_amount);
451 };
452 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
453 const VAddr cpu_addr_base =
454 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
455 rasterizer->FlushRegion(cpu_addr_base, copy_amount);
456 };
457 auto flush_short_pages = [&](std::size_t page_index, std::size_t offset,
458 std::size_t copy_amount) {
459 GPUVAddr base = (page_index << big_page_bits) + offset;
460 MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing);
461 };
462 MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, flush_short_pages);
463}
464
465bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const {
466 bool result = false;
467 auto do_nothing = [&]([[maybe_unused]] std::size_t page_index,
468 [[maybe_unused]] std::size_t offset,
469 [[maybe_unused]] std::size_t copy_amount) { return false; };
470
471 auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
472 const VAddr cpu_addr_base =
473 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
474 result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount);
475 return result;
476 };
477 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
478 const VAddr cpu_addr_base =
479 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
480 result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount);
481 return result;
482 };
483 auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
484 std::size_t copy_amount) {
485 GPUVAddr base = (page_index << big_page_bits) + offset;
486 MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing);
487 return result;
488 };
489 MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, check_short_pages);
490 return result;
491}
492
493size_t MemoryManager::MaxContinousRange(GPUVAddr gpu_addr, size_t size) const {
494 std::optional<VAddr> old_page_addr{};
495 size_t range_so_far = 0;
496 bool result{false};
497 auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
498 std::size_t copy_amount) {
499 result = true;
500 return true;
501 };
502 auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
503 const VAddr cpu_addr_base =
504 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
505 if (old_page_addr && *old_page_addr != cpu_addr_base) {
506 result = true;
507 return true;
350 } 508 }
351 ++page_index; 509 range_so_far += copy_amount;
352 page_offset = 0; 510 old_page_addr = {cpu_addr_base + copy_amount};
353 remaining_size -= num_bytes; 511 return false;
354 } 512 };
513 auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
514 const VAddr cpu_addr_base =
515 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
516 if (old_page_addr && *old_page_addr != cpu_addr_base) {
517 return true;
518 }
519 range_so_far += copy_amount;
520 old_page_addr = {cpu_addr_base + copy_amount};
521 return false;
522 };
523 auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
524 std::size_t copy_amount) {
525 GPUVAddr base = (page_index << big_page_bits) + offset;
526 MemoryOperation<false>(base, copy_amount, short_check, fail, fail);
527 return result;
528 };
529 MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages);
530 return range_so_far;
531}
532
533void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size) const {
534 auto do_nothing = [&]([[maybe_unused]] std::size_t page_index,
535 [[maybe_unused]] std::size_t offset,
536 [[maybe_unused]] std::size_t copy_amount) {};
537
538 auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
539 const VAddr cpu_addr_base =
540 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
541 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
542 };
543 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
544 const VAddr cpu_addr_base =
545 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
546 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
547 };
548 auto invalidate_short_pages = [&](std::size_t page_index, std::size_t offset,
549 std::size_t copy_amount) {
550 GPUVAddr base = (page_index << big_page_bits) + offset;
551 MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing);
552 };
553 MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, invalidate_short_pages);
355} 554}
356 555
357void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) { 556void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) {
@@ -365,87 +564,134 @@ void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std
365} 564}
366 565
367bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { 566bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
368 const auto cpu_addr{GpuToCpuAddress(gpu_addr)}; 567 if (GetEntry<true>(gpu_addr) == EntryType::Mapped) [[likely]] {
369 if (!cpu_addr) { 568 size_t page_index = gpu_addr >> big_page_bits;
569 if (IsBigPageContinous(page_index)) [[likely]] {
570 const std::size_t page{(page_index & big_page_mask) + size};
571 return page <= big_page_size;
572 }
573 const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
574 return page <= Core::Memory::YUZU_PAGESIZE;
575 }
576 if (GetEntry<false>(gpu_addr) != EntryType::Mapped) {
370 return false; 577 return false;
371 } 578 }
372 const std::size_t page{(*cpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; 579 const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
373 return page <= Core::Memory::YUZU_PAGESIZE; 580 return page <= Core::Memory::YUZU_PAGESIZE;
374} 581}
375 582
376bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { 583bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const {
377 size_t page_index{gpu_addr >> page_bits};
378 const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits};
379 std::optional<VAddr> old_page_addr{}; 584 std::optional<VAddr> old_page_addr{};
380 while (page_index != page_last) { 585 bool result{true};
381 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 586 auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
382 if (!page_addr || *page_addr == 0) { 587 std::size_t copy_amount) {
383 return false; 588 result = false;
589 return true;
590 };
591 auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
592 const VAddr cpu_addr_base =
593 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
594 if (old_page_addr && *old_page_addr != cpu_addr_base) {
595 result = false;
596 return true;
384 } 597 }
385 if (old_page_addr) { 598 old_page_addr = {cpu_addr_base + copy_amount};
386 if (*old_page_addr + page_size != *page_addr) { 599 return false;
387 return false; 600 };
388 } 601 auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
602 const VAddr cpu_addr_base =
603 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
604 if (old_page_addr && *old_page_addr != cpu_addr_base) {
605 result = false;
606 return true;
389 } 607 }
390 old_page_addr = page_addr; 608 old_page_addr = {cpu_addr_base + copy_amount};
391 ++page_index; 609 return false;
392 } 610 };
393 return true; 611 auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
612 std::size_t copy_amount) {
613 GPUVAddr base = (page_index << big_page_bits) + offset;
614 MemoryOperation<false>(base, copy_amount, short_check, fail, fail);
615 return !result;
616 };
617 MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages);
618 return result;
394} 619}
395 620
396bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const { 621bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const {
397 size_t page_index{gpu_addr >> page_bits}; 622 bool result{true};
398 const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; 623 auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
399 while (page_index < page_last) { 624 [[maybe_unused]] std::size_t copy_amount) {
400 if (!page_table[page_index].IsValid() || page_table[page_index].ToAddress() == 0) { 625 result = false;
401 return false; 626 return true;
402 } 627 };
403 ++page_index; 628 auto pass = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
404 } 629 [[maybe_unused]] std::size_t copy_amount) { return false; };
405 return true; 630 auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
631 std::size_t copy_amount) {
632 GPUVAddr base = (page_index << big_page_bits) + offset;
633 MemoryOperation<false>(base, copy_amount, pass, pass, fail);
634 return !result;
635 };
636 MemoryOperation<true>(gpu_addr, size, pass, fail, check_short_pages);
637 return result;
406} 638}
407 639
408std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange( 640std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange(
409 GPUVAddr gpu_addr, std::size_t size) const { 641 GPUVAddr gpu_addr, std::size_t size) const {
410 std::vector<std::pair<GPUVAddr, std::size_t>> result{}; 642 std::vector<std::pair<GPUVAddr, std::size_t>> result{};
411 size_t page_index{gpu_addr >> page_bits};
412 size_t remaining_size{size};
413 size_t page_offset{gpu_addr & page_mask};
414 std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{}; 643 std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{};
415 std::optional<VAddr> old_page_addr{}; 644 std::optional<VAddr> old_page_addr{};
416 const auto extend_size = [&last_segment, &page_index, &page_offset](std::size_t bytes) { 645 const auto split = [&last_segment, &result]([[maybe_unused]] std::size_t page_index,
417 if (!last_segment) { 646 [[maybe_unused]] std::size_t offset,
418 const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset; 647 [[maybe_unused]] std::size_t copy_amount) {
419 last_segment = {new_base_addr, bytes};
420 } else {
421 last_segment->second += bytes;
422 }
423 };
424 const auto split = [&last_segment, &result] {
425 if (last_segment) { 648 if (last_segment) {
426 result.push_back(*last_segment); 649 result.push_back(*last_segment);
427 last_segment = std::nullopt; 650 last_segment = std::nullopt;
428 } 651 }
429 }; 652 };
430 while (remaining_size > 0) { 653 const auto extend_size_big = [this, &split, &old_page_addr,
431 const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; 654 &last_segment](std::size_t page_index, std::size_t offset,
432 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 655 std::size_t copy_amount) {
433 if (!page_addr || *page_addr == 0) { 656 const VAddr cpu_addr_base =
434 split(); 657 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
435 } else if (old_page_addr) { 658 if (old_page_addr) {
436 if (*old_page_addr + page_size != *page_addr) { 659 if (*old_page_addr != cpu_addr_base) {
437 split(); 660 split(0, 0, 0);
661 }
662 }
663 old_page_addr = {cpu_addr_base + copy_amount};
664 if (!last_segment) {
665 const GPUVAddr new_base_addr = (page_index << big_page_bits) + offset;
666 last_segment = {new_base_addr, copy_amount};
667 } else {
668 last_segment->second += copy_amount;
669 }
670 };
671 const auto extend_size_short = [this, &split, &old_page_addr,
672 &last_segment](std::size_t page_index, std::size_t offset,
673 std::size_t copy_amount) {
674 const VAddr cpu_addr_base =
675 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
676 if (old_page_addr) {
677 if (*old_page_addr != cpu_addr_base) {
678 split(0, 0, 0);
438 } 679 }
439 extend_size(num_bytes); 680 }
681 old_page_addr = {cpu_addr_base + copy_amount};
682 if (!last_segment) {
683 const GPUVAddr new_base_addr = (page_index << page_bits) + offset;
684 last_segment = {new_base_addr, copy_amount};
440 } else { 685 } else {
441 extend_size(num_bytes); 686 last_segment->second += copy_amount;
442 } 687 }
443 ++page_index; 688 };
444 page_offset = 0; 689 auto do_short_pages = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
445 remaining_size -= num_bytes; 690 GPUVAddr base = (page_index << big_page_bits) + offset;
446 old_page_addr = page_addr; 691 MemoryOperation<false>(base, copy_amount, extend_size_short, split, split);
447 } 692 };
448 split(); 693 MemoryOperation<true>(gpu_addr, size, extend_size_big, split, do_short_pages);
694 split(0, 0, 0);
449 return result; 695 return result;
450} 696}
451 697
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 74f9ce175..f992e29f3 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -3,73 +3,39 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <atomic>
6#include <map> 7#include <map>
7#include <optional> 8#include <optional>
8#include <vector> 9#include <vector>
9 10
10#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/multi_level_page_table.h"
13#include "common/virtual_buffer.h"
11 14
12namespace VideoCore { 15namespace VideoCore {
13class RasterizerInterface; 16class RasterizerInterface;
14} 17}
15 18
16namespace Core { 19namespace Core {
20class DeviceMemory;
21namespace Memory {
22class Memory;
23} // namespace Memory
17class System; 24class System;
18} 25} // namespace Core
19 26
20namespace Tegra { 27namespace Tegra {
21 28
22class PageEntry final {
23public:
24 enum class State : u32 {
25 Unmapped = static_cast<u32>(-1),
26 Allocated = static_cast<u32>(-2),
27 };
28
29 constexpr PageEntry() = default;
30 constexpr PageEntry(State state_) : state{state_} {}
31 constexpr PageEntry(VAddr addr) : state{static_cast<State>(addr >> ShiftBits)} {}
32
33 [[nodiscard]] constexpr bool IsUnmapped() const {
34 return state == State::Unmapped;
35 }
36
37 [[nodiscard]] constexpr bool IsAllocated() const {
38 return state == State::Allocated;
39 }
40
41 [[nodiscard]] constexpr bool IsValid() const {
42 return !IsUnmapped() && !IsAllocated();
43 }
44
45 [[nodiscard]] constexpr VAddr ToAddress() const {
46 if (!IsValid()) {
47 return {};
48 }
49
50 return static_cast<VAddr>(state) << ShiftBits;
51 }
52
53 [[nodiscard]] constexpr PageEntry operator+(u64 offset) const {
54 // If this is a reserved value, offsets do not apply
55 if (!IsValid()) {
56 return *this;
57 }
58 return PageEntry{(static_cast<VAddr>(state) << ShiftBits) + offset};
59 }
60
61private:
62 static constexpr std::size_t ShiftBits{12};
63
64 State state{State::Unmapped};
65};
66static_assert(sizeof(PageEntry) == 4, "PageEntry is too large");
67
68class MemoryManager final { 29class MemoryManager final {
69public: 30public:
70 explicit MemoryManager(Core::System& system_); 31 explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40,
32 u64 big_page_bits_ = 16, u64 page_bits_ = 12);
71 ~MemoryManager(); 33 ~MemoryManager();
72 34
35 size_t GetID() const {
36 return unique_identifier;
37 }
38
73 /// Binds a renderer to the memory manager. 39 /// Binds a renderer to the memory manager.
74 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); 40 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
75 41
@@ -86,9 +52,6 @@ public:
86 [[nodiscard]] u8* GetPointer(GPUVAddr addr); 52 [[nodiscard]] u8* GetPointer(GPUVAddr addr);
87 [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const; 53 [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const;
88 54
89 /// Returns the number of bytes until the end of the memory map containing the given GPU address
90 [[nodiscard]] size_t BytesToMapEnd(GPUVAddr gpu_addr) const noexcept;
91
92 /** 55 /**
93 * ReadBlock and WriteBlock are full read and write operations over virtual 56 * ReadBlock and WriteBlock are full read and write operations over virtual
94 * GPU Memory. It's important to use these when GPU memory may not be continuous 57 * GPU Memory. It's important to use these when GPU memory may not be continuous
@@ -135,54 +98,95 @@ public:
135 std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr, 98 std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr,
136 std::size_t size) const; 99 std::size_t size) const;
137 100
138 [[nodiscard]] GPUVAddr Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size); 101 GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, bool is_big_pages = true);
139 [[nodiscard]] GPUVAddr MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align); 102 GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true);
140 [[nodiscard]] GPUVAddr MapAllocate32(VAddr cpu_addr, std::size_t size);
141 [[nodiscard]] std::optional<GPUVAddr> AllocateFixed(GPUVAddr gpu_addr, std::size_t size);
142 [[nodiscard]] GPUVAddr Allocate(std::size_t size, std::size_t align);
143 void Unmap(GPUVAddr gpu_addr, std::size_t size); 103 void Unmap(GPUVAddr gpu_addr, std::size_t size);
144 104
145 void FlushRegion(GPUVAddr gpu_addr, size_t size) const; 105 void FlushRegion(GPUVAddr gpu_addr, size_t size) const;
146 106
107 void InvalidateRegion(GPUVAddr gpu_addr, size_t size) const;
108
109 bool IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const;
110
111 size_t MaxContinousRange(GPUVAddr gpu_addr, size_t size) const;
112
113 bool IsWithinGPUAddressRange(GPUVAddr gpu_addr) const {
114 return gpu_addr < address_space_size;
115 }
116
147private: 117private:
148 [[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const; 118 template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
149 void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size); 119 inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped,
150 GPUVAddr UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size); 120 FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const;
151 [[nodiscard]] std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align, 121
152 bool start_32bit_address = false) const; 122 template <bool is_safe>
153 123 void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const;
154 void TryLockPage(PageEntry page_entry, std::size_t size); 124
155 void TryUnlockPage(PageEntry page_entry, std::size_t size); 125 template <bool is_safe>
156 126 void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size);
157 void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, 127
158 bool is_safe) const; 128 template <bool is_big_page>
159 void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, 129 [[nodiscard]] std::size_t PageEntryIndex(GPUVAddr gpu_addr) const {
160 bool is_safe); 130 if constexpr (is_big_page) {
161 131 return (gpu_addr >> big_page_bits) & big_page_table_mask;
162 [[nodiscard]] static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) { 132 } else {
163 return (gpu_addr >> page_bits) & page_table_mask; 133 return (gpu_addr >> page_bits) & page_table_mask;
134 }
164 } 135 }
165 136
166 static constexpr u64 address_space_size = 1ULL << 40; 137 inline bool IsBigPageContinous(size_t big_page_index) const;
167 static constexpr u64 address_space_start = 1ULL << 32; 138 inline void SetBigPageContinous(size_t big_page_index, bool value);
168 static constexpr u64 address_space_start_low = 1ULL << 16;
169 static constexpr u64 page_bits{16};
170 static constexpr u64 page_size{1 << page_bits};
171 static constexpr u64 page_mask{page_size - 1};
172 static constexpr u64 page_table_bits{24};
173 static constexpr u64 page_table_size{1 << page_table_bits};
174 static constexpr u64 page_table_mask{page_table_size - 1};
175 139
176 Core::System& system; 140 Core::System& system;
141 Core::Memory::Memory& memory;
142 Core::DeviceMemory& device_memory;
143
144 const u64 address_space_bits;
145 const u64 page_bits;
146 u64 address_space_size;
147 u64 page_size;
148 u64 page_mask;
149 u64 page_table_mask;
150 static constexpr u64 cpu_page_bits{12};
151
152 const u64 big_page_bits;
153 u64 big_page_size;
154 u64 big_page_mask;
155 u64 big_page_table_mask;
177 156
178 VideoCore::RasterizerInterface* rasterizer = nullptr; 157 VideoCore::RasterizerInterface* rasterizer = nullptr;
179 158
180 std::vector<PageEntry> page_table; 159 enum class EntryType : u64 {
160 Free = 0,
161 Reserved = 1,
162 Mapped = 2,
163 };
164
165 std::vector<u64> entries;
166 std::vector<u64> big_entries;
167
168 template <EntryType entry_type>
169 GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size);
170
171 template <EntryType entry_type>
172 GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size);
173
174 template <bool is_big_page>
175 inline EntryType GetEntry(size_t position) const;
176
177 template <bool is_big_page>
178 inline void SetEntry(size_t position, EntryType entry);
179
180 Common::MultiLevelPageTable<u32> page_table;
181 Common::VirtualBuffer<u32> big_page_table_cpu;
182
183 std::vector<u64> big_page_continous;
184
185 constexpr static size_t continous_bits = 64;
181 186
182 using MapRange = std::pair<GPUVAddr, size_t>; 187 const size_t unique_identifier;
183 std::vector<MapRange> map_ranges;
184 188
185 std::vector<std::pair<VAddr, std::size_t>> cache_invalidate_queue; 189 static std::atomic<size_t> unique_identifier_generator;
186}; 190};
187 191
188} // namespace Tegra 192} // namespace Tegra
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index 889b606b3..b0ebe71b7 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -17,6 +17,7 @@
17 17
18#include "common/assert.h" 18#include "common/assert.h"
19#include "common/settings.h" 19#include "common/settings.h"
20#include "video_core/control/channel_state_cache.h"
20#include "video_core/engines/maxwell_3d.h" 21#include "video_core/engines/maxwell_3d.h"
21#include "video_core/memory_manager.h" 22#include "video_core/memory_manager.h"
22#include "video_core/rasterizer_interface.h" 23#include "video_core/rasterizer_interface.h"
@@ -90,13 +91,10 @@ private:
90}; 91};
91 92
92template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter> 93template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter>
93class QueryCacheBase { 94class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
94public: 95public:
95 explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_, 96 explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_)
96 Tegra::Engines::Maxwell3D& maxwell3d_, 97 : rasterizer{rasterizer_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
97 Tegra::MemoryManager& gpu_memory_)
98 : rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
99 gpu_memory{gpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
100 VideoCore::QueryType::SamplesPassed}}} {} 98 VideoCore::QueryType::SamplesPassed}}} {}
101 99
102 void InvalidateRegion(VAddr addr, std::size_t size) { 100 void InvalidateRegion(VAddr addr, std::size_t size) {
@@ -117,13 +115,13 @@ public:
117 */ 115 */
118 void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) { 116 void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) {
119 std::unique_lock lock{mutex}; 117 std::unique_lock lock{mutex};
120 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 118 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
121 ASSERT(cpu_addr); 119 ASSERT(cpu_addr);
122 120
123 CachedQuery* query = TryGet(*cpu_addr); 121 CachedQuery* query = TryGet(*cpu_addr);
124 if (!query) { 122 if (!query) {
125 ASSERT_OR_EXECUTE(cpu_addr, return;); 123 ASSERT_OR_EXECUTE(cpu_addr, return;);
126 u8* const host_ptr = gpu_memory.GetPointer(gpu_addr); 124 u8* const host_ptr = gpu_memory->GetPointer(gpu_addr);
127 125
128 query = Register(type, *cpu_addr, host_ptr, timestamp.has_value()); 126 query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
129 } 127 }
@@ -137,8 +135,10 @@ public:
137 /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch. 135 /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
138 void UpdateCounters() { 136 void UpdateCounters() {
139 std::unique_lock lock{mutex}; 137 std::unique_lock lock{mutex};
140 const auto& regs = maxwell3d.regs; 138 if (maxwell3d) {
141 Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable); 139 const auto& regs = maxwell3d->regs;
140 Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable);
141 }
142 } 142 }
143 143
144 /// Resets a counter to zero. It doesn't disable the query after resetting. 144 /// Resets a counter to zero. It doesn't disable the query after resetting.
@@ -264,8 +264,6 @@ private:
264 static constexpr unsigned YUZU_PAGEBITS = 12; 264 static constexpr unsigned YUZU_PAGEBITS = 12;
265 265
266 VideoCore::RasterizerInterface& rasterizer; 266 VideoCore::RasterizerInterface& rasterizer;
267 Tegra::Engines::Maxwell3D& maxwell3d;
268 Tegra::MemoryManager& gpu_memory;
269 267
270 std::recursive_mutex mutex; 268 std::recursive_mutex mutex;
271 269
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index a04a76481..d2d40884c 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -16,6 +16,9 @@ class MemoryManager;
16namespace Engines { 16namespace Engines {
17class AccelerateDMAInterface; 17class AccelerateDMAInterface;
18} 18}
19namespace Control {
20struct ChannelState;
21}
19} // namespace Tegra 22} // namespace Tegra
20 23
21namespace VideoCore { 24namespace VideoCore {
@@ -59,7 +62,10 @@ public:
59 virtual void DisableGraphicsUniformBuffer(size_t stage, u32 index) = 0; 62 virtual void DisableGraphicsUniformBuffer(size_t stage, u32 index) = 0;
60 63
61 /// Signal a GPU based semaphore as a fence 64 /// Signal a GPU based semaphore as a fence
62 virtual void SignalSemaphore(GPUVAddr addr, u32 value) = 0; 65 virtual void SignalFence(std::function<void()>&& func) = 0;
66
67 /// Send an operation to be done after a certain amount of flushes.
68 virtual void SyncOperation(std::function<void()>&& func) = 0;
63 69
64 /// Signal a GPU based syncpoint as a fence 70 /// Signal a GPU based syncpoint as a fence
65 virtual void SignalSyncPoint(u32 value) = 0; 71 virtual void SignalSyncPoint(u32 value) = 0;
@@ -86,13 +92,13 @@ public:
86 virtual void OnCPUWrite(VAddr addr, u64 size) = 0; 92 virtual void OnCPUWrite(VAddr addr, u64 size) = 0;
87 93
88 /// Sync memory between guest and host. 94 /// Sync memory between guest and host.
89 virtual void SyncGuestHost() = 0; 95 virtual void InvalidateGPUCache() = 0;
90 96
91 /// Unmap memory range 97 /// Unmap memory range
92 virtual void UnmapMemory(VAddr addr, u64 size) = 0; 98 virtual void UnmapMemory(VAddr addr, u64 size) = 0;
93 99
94 /// Remap GPU memory range. This means underneath backing memory changed 100 /// Remap GPU memory range. This means underneath backing memory changed
95 virtual void ModifyGPUMemory(GPUVAddr addr, u64 size) = 0; 101 virtual void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) = 0;
96 102
97 /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory 103 /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
98 /// and invalidated 104 /// and invalidated
@@ -123,7 +129,7 @@ public:
123 [[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0; 129 [[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0;
124 130
125 virtual void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, 131 virtual void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
126 std::span<u8> memory) = 0; 132 std::span<const u8> memory) = 0;
127 133
128 /// Attempt to use a faster method to display the framebuffer to screen 134 /// Attempt to use a faster method to display the framebuffer to screen
129 [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config, 135 [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config,
@@ -137,5 +143,11 @@ public:
137 /// Initialize disk cached resources for the game being emulated 143 /// Initialize disk cached resources for the game being emulated
138 virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading, 144 virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
139 const DiskResourceLoadCallback& callback) {} 145 const DiskResourceLoadCallback& callback) {}
146
147 virtual void InitializeChannel(Tegra::Control::ChannelState& channel) {}
148
149 virtual void BindChannel(Tegra::Control::ChannelState& channel) {}
150
151 virtual void ReleaseChannel(s32 channel_id) {}
140}; 152};
141} // namespace VideoCore 153} // namespace VideoCore
diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp
index 1f0f156ed..26d066004 100644
--- a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp
+++ b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp
@@ -28,12 +28,11 @@ bool ComputePipelineKey::operator==(const ComputePipelineKey& rhs) const noexcep
28} 28}
29 29
30ComputePipeline::ComputePipeline(const Device& device, TextureCache& texture_cache_, 30ComputePipeline::ComputePipeline(const Device& device, TextureCache& texture_cache_,
31 BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, 31 BufferCache& buffer_cache_, ProgramManager& program_manager_,
32 Tegra::Engines::KeplerCompute& kepler_compute_, 32 const Shader::Info& info_, std::string code,
33 ProgramManager& program_manager_, const Shader::Info& info_, 33 std::vector<u32> code_v)
34 std::string code, std::vector<u32> code_v) 34 : texture_cache{texture_cache_}, buffer_cache{buffer_cache_},
35 : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, gpu_memory{gpu_memory_}, 35 program_manager{program_manager_}, info{info_} {
36 kepler_compute{kepler_compute_}, program_manager{program_manager_}, info{info_} {
37 switch (device.GetShaderBackend()) { 36 switch (device.GetShaderBackend()) {
38 case Settings::ShaderBackend::GLSL: 37 case Settings::ShaderBackend::GLSL:
39 source_program = CreateProgram(code, GL_COMPUTE_SHADER); 38 source_program = CreateProgram(code, GL_COMPUTE_SHADER);
@@ -86,7 +85,7 @@ void ComputePipeline::Configure() {
86 GLsizei texture_binding{}; 85 GLsizei texture_binding{};
87 GLsizei image_binding{}; 86 GLsizei image_binding{};
88 87
89 const auto& qmd{kepler_compute.launch_description}; 88 const auto& qmd{kepler_compute->launch_description};
90 const auto& cbufs{qmd.const_buffer_config}; 89 const auto& cbufs{qmd.const_buffer_config};
91 const bool via_header_index{qmd.linked_tsc != 0}; 90 const bool via_header_index{qmd.linked_tsc != 0};
92 const auto read_handle{[&](const auto& desc, u32 index) { 91 const auto read_handle{[&](const auto& desc, u32 index) {
@@ -101,12 +100,13 @@ void ComputePipeline::Configure() {
101 const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; 100 const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
102 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + 101 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
103 secondary_offset}; 102 secondary_offset};
104 const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; 103 const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left};
105 const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; 104 const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr)
105 << desc.secondary_shift_left};
106 return TexturePair(lhs_raw | rhs_raw, via_header_index); 106 return TexturePair(lhs_raw | rhs_raw, via_header_index);
107 } 107 }
108 } 108 }
109 return TexturePair(gpu_memory.Read<u32>(addr), via_header_index); 109 return TexturePair(gpu_memory->Read<u32>(addr), via_header_index);
110 }}; 110 }};
111 const auto add_image{[&](const auto& desc, bool blacklist) { 111 const auto add_image{[&](const auto& desc, bool blacklist) {
112 for (u32 index = 0; index < desc.count; ++index) { 112 for (u32 index = 0; index < desc.count; ++index) {
diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.h b/src/video_core/renderer_opengl/gl_compute_pipeline.h
index 723f27f11..6534dec32 100644
--- a/src/video_core/renderer_opengl/gl_compute_pipeline.h
+++ b/src/video_core/renderer_opengl/gl_compute_pipeline.h
@@ -49,10 +49,8 @@ static_assert(std::is_trivially_constructible_v<ComputePipelineKey>);
49class ComputePipeline { 49class ComputePipeline {
50public: 50public:
51 explicit ComputePipeline(const Device& device, TextureCache& texture_cache_, 51 explicit ComputePipeline(const Device& device, TextureCache& texture_cache_,
52 BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, 52 BufferCache& buffer_cache_, ProgramManager& program_manager_,
53 Tegra::Engines::KeplerCompute& kepler_compute_, 53 const Shader::Info& info_, std::string code, std::vector<u32> code_v);
54 ProgramManager& program_manager_, const Shader::Info& info_,
55 std::string code, std::vector<u32> code_v);
56 54
57 void Configure(); 55 void Configure();
58 56
@@ -60,11 +58,17 @@ public:
60 return writes_global_memory; 58 return writes_global_memory;
61 } 59 }
62 60
61 void SetEngine(Tegra::Engines::KeplerCompute* kepler_compute_,
62 Tegra::MemoryManager* gpu_memory_) {
63 kepler_compute = kepler_compute_;
64 gpu_memory = gpu_memory_;
65 }
66
63private: 67private:
64 TextureCache& texture_cache; 68 TextureCache& texture_cache;
65 BufferCache& buffer_cache; 69 BufferCache& buffer_cache;
66 Tegra::MemoryManager& gpu_memory; 70 Tegra::MemoryManager* gpu_memory;
67 Tegra::Engines::KeplerCompute& kepler_compute; 71 Tegra::Engines::KeplerCompute* kepler_compute;
68 ProgramManager& program_manager; 72 ProgramManager& program_manager;
69 73
70 Shader::Info info; 74 Shader::Info info;
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp
index 6e82c2e28..91463f854 100644
--- a/src/video_core/renderer_opengl/gl_fence_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp
@@ -10,10 +10,7 @@
10 10
11namespace OpenGL { 11namespace OpenGL {
12 12
13GLInnerFence::GLInnerFence(u32 payload_, bool is_stubbed_) : FenceBase{payload_, is_stubbed_} {} 13GLInnerFence::GLInnerFence(bool is_stubbed_) : FenceBase{is_stubbed_} {}
14
15GLInnerFence::GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_)
16 : FenceBase{address_, payload_, is_stubbed_} {}
17 14
18GLInnerFence::~GLInnerFence() = default; 15GLInnerFence::~GLInnerFence() = default;
19 16
@@ -48,12 +45,8 @@ FenceManagerOpenGL::FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterize
48 BufferCache& buffer_cache_, QueryCache& query_cache_) 45 BufferCache& buffer_cache_, QueryCache& query_cache_)
49 : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_} {} 46 : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_} {}
50 47
51Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) { 48Fence FenceManagerOpenGL::CreateFence(bool is_stubbed) {
52 return std::make_shared<GLInnerFence>(value, is_stubbed); 49 return std::make_shared<GLInnerFence>(is_stubbed);
53}
54
55Fence FenceManagerOpenGL::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
56 return std::make_shared<GLInnerFence>(addr, value, is_stubbed);
57} 50}
58 51
59void FenceManagerOpenGL::QueueFence(Fence& fence) { 52void FenceManagerOpenGL::QueueFence(Fence& fence) {
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h
index 14ff00db2..f1446e732 100644
--- a/src/video_core/renderer_opengl/gl_fence_manager.h
+++ b/src/video_core/renderer_opengl/gl_fence_manager.h
@@ -16,8 +16,7 @@ namespace OpenGL {
16 16
17class GLInnerFence : public VideoCommon::FenceBase { 17class GLInnerFence : public VideoCommon::FenceBase {
18public: 18public:
19 explicit GLInnerFence(u32 payload_, bool is_stubbed_); 19 explicit GLInnerFence(bool is_stubbed_);
20 explicit GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_);
21 ~GLInnerFence(); 20 ~GLInnerFence();
22 21
23 void Queue(); 22 void Queue();
@@ -40,8 +39,7 @@ public:
40 QueryCache& query_cache); 39 QueryCache& query_cache);
41 40
42protected: 41protected:
43 Fence CreateFence(u32 value, bool is_stubbed) override; 42 Fence CreateFence(bool is_stubbed) override;
44 Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
45 void QueueFence(Fence& fence) override; 43 void QueueFence(Fence& fence) override;
46 bool IsFenceSignaled(Fence& fence) const override; 44 bool IsFenceSignaled(Fence& fence) const override;
47 void WaitFence(Fence& fence) override; 45 void WaitFence(Fence& fence) override;
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
index 67eae369d..41493a7da 100644
--- a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
+++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
@@ -169,15 +169,15 @@ ConfigureFuncPtr ConfigureFunc(const std::array<Shader::Info, 5>& infos, u32 ena
169} 169}
170} // Anonymous namespace 170} // Anonymous namespace
171 171
172GraphicsPipeline::GraphicsPipeline( 172GraphicsPipeline::GraphicsPipeline(const Device& device, TextureCache& texture_cache_,
173 const Device& device, TextureCache& texture_cache_, BufferCache& buffer_cache_, 173 BufferCache& buffer_cache_, ProgramManager& program_manager_,
174 Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_, 174 StateTracker& state_tracker_, ShaderWorker* thread_worker,
175 ProgramManager& program_manager_, StateTracker& state_tracker_, ShaderWorker* thread_worker, 175 VideoCore::ShaderNotify* shader_notify,
176 VideoCore::ShaderNotify* shader_notify, std::array<std::string, 5> sources, 176 std::array<std::string, 5> sources,
177 std::array<std::vector<u32>, 5> sources_spirv, const std::array<const Shader::Info*, 5>& infos, 177 std::array<std::vector<u32>, 5> sources_spirv,
178 const GraphicsPipelineKey& key_) 178 const std::array<const Shader::Info*, 5>& infos,
179 : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, 179 const GraphicsPipelineKey& key_)
180 gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, program_manager{program_manager_}, 180 : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_},
181 state_tracker{state_tracker_}, key{key_} { 181 state_tracker{state_tracker_}, key{key_} {
182 if (shader_notify) { 182 if (shader_notify) {
183 shader_notify->MarkShaderBuilding(); 183 shader_notify->MarkShaderBuilding();
@@ -285,7 +285,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
285 buffer_cache.runtime.SetBaseStorageBindings(base_storage_bindings); 285 buffer_cache.runtime.SetBaseStorageBindings(base_storage_bindings);
286 buffer_cache.runtime.SetEnableStorageBuffers(use_storage_buffers); 286 buffer_cache.runtime.SetEnableStorageBuffers(use_storage_buffers);
287 287
288 const auto& regs{maxwell3d.regs}; 288 const auto& regs{maxwell3d->regs};
289 const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; 289 const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
290 const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE { 290 const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
291 const Shader::Info& info{stage_infos[stage]}; 291 const Shader::Info& info{stage_infos[stage]};
@@ -299,7 +299,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
299 ++ssbo_index; 299 ++ssbo_index;
300 } 300 }
301 } 301 }
302 const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers}; 302 const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers};
303 const auto read_handle{[&](const auto& desc, u32 index) { 303 const auto read_handle{[&](const auto& desc, u32 index) {
304 ASSERT(cbufs[desc.cbuf_index].enabled); 304 ASSERT(cbufs[desc.cbuf_index].enabled);
305 const u32 index_offset{index << desc.size_shift}; 305 const u32 index_offset{index << desc.size_shift};
@@ -312,13 +312,14 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
312 const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; 312 const u32 second_offset{desc.secondary_cbuf_offset + index_offset};
313 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + 313 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address +
314 second_offset}; 314 second_offset};
315 const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; 315 const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left};
316 const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; 316 const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr)
317 << desc.secondary_shift_left};
317 const u32 raw{lhs_raw | rhs_raw}; 318 const u32 raw{lhs_raw | rhs_raw};
318 return TexturePair(raw, via_header_index); 319 return TexturePair(raw, via_header_index);
319 } 320 }
320 } 321 }
321 return TexturePair(gpu_memory.Read<u32>(addr), via_header_index); 322 return TexturePair(gpu_memory->Read<u32>(addr), via_header_index);
322 }}; 323 }};
323 const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE { 324 const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE {
324 for (u32 index = 0; index < desc.count; ++index) { 325 for (u32 index = 0; index < desc.count; ++index) {
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.h b/src/video_core/renderer_opengl/gl_graphics_pipeline.h
index 4ec15b966..a0f0e63cb 100644
--- a/src/video_core/renderer_opengl/gl_graphics_pipeline.h
+++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.h
@@ -71,10 +71,9 @@ static_assert(std::is_trivially_constructible_v<GraphicsPipelineKey>);
71class GraphicsPipeline { 71class GraphicsPipeline {
72public: 72public:
73 explicit GraphicsPipeline(const Device& device, TextureCache& texture_cache_, 73 explicit GraphicsPipeline(const Device& device, TextureCache& texture_cache_,
74 BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, 74 BufferCache& buffer_cache_, ProgramManager& program_manager_,
75 Tegra::Engines::Maxwell3D& maxwell3d_, 75 StateTracker& state_tracker_, ShaderWorker* thread_worker,
76 ProgramManager& program_manager_, StateTracker& state_tracker_, 76 VideoCore::ShaderNotify* shader_notify,
77 ShaderWorker* thread_worker, VideoCore::ShaderNotify* shader_notify,
78 std::array<std::string, 5> sources, 77 std::array<std::string, 5> sources,
79 std::array<std::vector<u32>, 5> sources_spirv, 78 std::array<std::vector<u32>, 5> sources_spirv,
80 const std::array<const Shader::Info*, 5>& infos, 79 const std::array<const Shader::Info*, 5>& infos,
@@ -107,6 +106,11 @@ public:
107 }; 106 };
108 } 107 }
109 108
109 void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) {
110 maxwell3d = maxwell3d_;
111 gpu_memory = gpu_memory_;
112 }
113
110private: 114private:
111 template <typename Spec> 115 template <typename Spec>
112 void ConfigureImpl(bool is_indexed); 116 void ConfigureImpl(bool is_indexed);
@@ -119,8 +123,8 @@ private:
119 123
120 TextureCache& texture_cache; 124 TextureCache& texture_cache;
121 BufferCache& buffer_cache; 125 BufferCache& buffer_cache;
122 Tegra::MemoryManager& gpu_memory; 126 Tegra::MemoryManager* gpu_memory;
123 Tegra::Engines::Maxwell3D& maxwell3d; 127 Tegra::Engines::Maxwell3D* maxwell3d;
124 ProgramManager& program_manager; 128 ProgramManager& program_manager;
125 StateTracker& state_tracker; 129 StateTracker& state_tracker;
126 const GraphicsPipelineKey key; 130 const GraphicsPipelineKey key;
diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp
index ed40f5791..5070db441 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_query_cache.cpp
@@ -26,9 +26,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) {
26 26
27} // Anonymous namespace 27} // Anonymous namespace
28 28
29QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, 29QueryCache::QueryCache(RasterizerOpenGL& rasterizer_)
30 Tegra::MemoryManager& gpu_memory_) 30 : QueryCacheBase(rasterizer_), gl_rasterizer{rasterizer_} {}
31 : QueryCacheBase(rasterizer_, maxwell3d_, gpu_memory_), gl_rasterizer{rasterizer_} {}
32 31
33QueryCache::~QueryCache() = default; 32QueryCache::~QueryCache() = default;
34 33
diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h
index 8a49f1ef0..14ce59990 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.h
+++ b/src/video_core/renderer_opengl/gl_query_cache.h
@@ -28,8 +28,7 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
28class QueryCache final 28class QueryCache final
29 : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { 29 : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
30public: 30public:
31 explicit QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, 31 explicit QueryCache(RasterizerOpenGL& rasterizer_);
32 Tegra::MemoryManager& gpu_memory_);
33 ~QueryCache(); 32 ~QueryCache();
34 33
35 OGLQuery AllocateQuery(VideoCore::QueryType type); 34 OGLQuery AllocateQuery(VideoCore::QueryType type);
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index a0d048b0b..c2d80605d 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -16,7 +16,7 @@
16#include "common/microprofile.h" 16#include "common/microprofile.h"
17#include "common/scope_exit.h" 17#include "common/scope_exit.h"
18#include "common/settings.h" 18#include "common/settings.h"
19 19#include "video_core/control/channel_state.h"
20#include "video_core/engines/kepler_compute.h" 20#include "video_core/engines/kepler_compute.h"
21#include "video_core/engines/maxwell_3d.h" 21#include "video_core/engines/maxwell_3d.h"
22#include "video_core/memory_manager.h" 22#include "video_core/memory_manager.h"
@@ -56,22 +56,20 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra
56 Core::Memory::Memory& cpu_memory_, const Device& device_, 56 Core::Memory::Memory& cpu_memory_, const Device& device_,
57 ScreenInfo& screen_info_, ProgramManager& program_manager_, 57 ScreenInfo& screen_info_, ProgramManager& program_manager_,
58 StateTracker& state_tracker_) 58 StateTracker& state_tracker_)
59 : RasterizerAccelerated(cpu_memory_), gpu(gpu_), maxwell3d(gpu.Maxwell3D()), 59 : RasterizerAccelerated(cpu_memory_), gpu(gpu_), device(device_), screen_info(screen_info_),
60 kepler_compute(gpu.KeplerCompute()), gpu_memory(gpu.MemoryManager()), device(device_), 60 program_manager(program_manager_), state_tracker(state_tracker_),
61 screen_info(screen_info_), program_manager(program_manager_), state_tracker(state_tracker_),
62 texture_cache_runtime(device, program_manager, state_tracker), 61 texture_cache_runtime(device, program_manager, state_tracker),
63 texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), 62 texture_cache(texture_cache_runtime, *this), buffer_cache_runtime(device),
64 buffer_cache_runtime(device), 63 buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
65 buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime), 64 shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager,
66 shader_cache(*this, emu_window_, maxwell3d, kepler_compute, gpu_memory, device, texture_cache, 65 state_tracker, gpu.ShaderNotify()),
67 buffer_cache, program_manager, state_tracker, gpu.ShaderNotify()), 66 query_cache(*this), accelerate_dma(buffer_cache),
68 query_cache(*this, maxwell3d, gpu_memory), accelerate_dma(buffer_cache),
69 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache) {} 67 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache) {}
70 68
71RasterizerOpenGL::~RasterizerOpenGL() = default; 69RasterizerOpenGL::~RasterizerOpenGL() = default;
72 70
73void RasterizerOpenGL::SyncVertexFormats() { 71void RasterizerOpenGL::SyncVertexFormats() {
74 auto& flags = maxwell3d.dirty.flags; 72 auto& flags = maxwell3d->dirty.flags;
75 if (!flags[Dirty::VertexFormats]) { 73 if (!flags[Dirty::VertexFormats]) {
76 return; 74 return;
77 } 75 }
@@ -89,7 +87,7 @@ void RasterizerOpenGL::SyncVertexFormats() {
89 } 87 }
90 flags[Dirty::VertexFormat0 + index] = false; 88 flags[Dirty::VertexFormat0 + index] = false;
91 89
92 const auto attrib = maxwell3d.regs.vertex_attrib_format[index]; 90 const auto attrib = maxwell3d->regs.vertex_attrib_format[index];
93 const auto gl_index = static_cast<GLuint>(index); 91 const auto gl_index = static_cast<GLuint>(index);
94 92
95 // Disable constant attributes. 93 // Disable constant attributes.
@@ -113,13 +111,13 @@ void RasterizerOpenGL::SyncVertexFormats() {
113} 111}
114 112
115void RasterizerOpenGL::SyncVertexInstances() { 113void RasterizerOpenGL::SyncVertexInstances() {
116 auto& flags = maxwell3d.dirty.flags; 114 auto& flags = maxwell3d->dirty.flags;
117 if (!flags[Dirty::VertexInstances]) { 115 if (!flags[Dirty::VertexInstances]) {
118 return; 116 return;
119 } 117 }
120 flags[Dirty::VertexInstances] = false; 118 flags[Dirty::VertexInstances] = false;
121 119
122 const auto& regs = maxwell3d.regs; 120 const auto& regs = maxwell3d->regs;
123 for (std::size_t index = 0; index < NUM_SUPPORTED_VERTEX_ATTRIBUTES; ++index) { 121 for (std::size_t index = 0; index < NUM_SUPPORTED_VERTEX_ATTRIBUTES; ++index) {
124 if (!flags[Dirty::VertexInstance0 + index]) { 122 if (!flags[Dirty::VertexInstance0 + index]) {
125 continue; 123 continue;
@@ -140,11 +138,11 @@ void RasterizerOpenGL::LoadDiskResources(u64 title_id, std::stop_token stop_load
140 138
141void RasterizerOpenGL::Clear() { 139void RasterizerOpenGL::Clear() {
142 MICROPROFILE_SCOPE(OpenGL_Clears); 140 MICROPROFILE_SCOPE(OpenGL_Clears);
143 if (!maxwell3d.ShouldExecute()) { 141 if (!maxwell3d->ShouldExecute()) {
144 return; 142 return;
145 } 143 }
146 144
147 const auto& regs = maxwell3d.regs; 145 const auto& regs = maxwell3d->regs;
148 bool use_color{}; 146 bool use_color{};
149 bool use_depth{}; 147 bool use_depth{};
150 bool use_stencil{}; 148 bool use_stencil{};
@@ -217,22 +215,26 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
217 if (!pipeline) { 215 if (!pipeline) {
218 return; 216 return;
219 } 217 }
218
219 gpu.TickWork();
220
220 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 221 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
222 pipeline->SetEngine(maxwell3d, gpu_memory);
221 pipeline->Configure(is_indexed); 223 pipeline->Configure(is_indexed);
222 224
223 SyncState(); 225 SyncState();
224 226
225 const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d.regs.draw.topology); 227 const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology);
226 BeginTransformFeedback(pipeline, primitive_mode); 228 BeginTransformFeedback(pipeline, primitive_mode);
227 229
228 const GLuint base_instance = static_cast<GLuint>(maxwell3d.regs.vb_base_instance); 230 const GLuint base_instance = static_cast<GLuint>(maxwell3d->regs.vb_base_instance);
229 const GLsizei num_instances = 231 const GLsizei num_instances =
230 static_cast<GLsizei>(is_instanced ? maxwell3d.mme_draw.instance_count : 1); 232 static_cast<GLsizei>(is_instanced ? maxwell3d->mme_draw.instance_count : 1);
231 if (is_indexed) { 233 if (is_indexed) {
232 const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vb_element_base); 234 const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.vb_element_base);
233 const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.index_array.count); 235 const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.index_array.count);
234 const GLvoid* const offset = buffer_cache_runtime.IndexOffset(); 236 const GLvoid* const offset = buffer_cache_runtime.IndexOffset();
235 const GLenum format = MaxwellToGL::IndexFormat(maxwell3d.regs.index_array.format); 237 const GLenum format = MaxwellToGL::IndexFormat(maxwell3d->regs.index_array.format);
236 if (num_instances == 1 && base_instance == 0 && base_vertex == 0) { 238 if (num_instances == 1 && base_instance == 0 && base_vertex == 0) {
237 glDrawElements(primitive_mode, num_vertices, format, offset); 239 glDrawElements(primitive_mode, num_vertices, format, offset);
238 } else if (num_instances == 1 && base_instance == 0) { 240 } else if (num_instances == 1 && base_instance == 0) {
@@ -251,8 +253,8 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
251 base_instance); 253 base_instance);
252 } 254 }
253 } else { 255 } else {
254 const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vertex_buffer.first); 256 const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.vertex_buffer.first);
255 const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.vertex_buffer.count); 257 const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.vertex_buffer.count);
256 if (num_instances == 1 && base_instance == 0) { 258 if (num_instances == 1 && base_instance == 0) {
257 glDrawArrays(primitive_mode, base_vertex, num_vertices); 259 glDrawArrays(primitive_mode, base_vertex, num_vertices);
258 } else if (base_instance == 0) { 260 } else if (base_instance == 0) {
@@ -273,8 +275,9 @@ void RasterizerOpenGL::DispatchCompute() {
273 if (!pipeline) { 275 if (!pipeline) {
274 return; 276 return;
275 } 277 }
278 pipeline->SetEngine(kepler_compute, gpu_memory);
276 pipeline->Configure(); 279 pipeline->Configure();
277 const auto& qmd{kepler_compute.launch_description}; 280 const auto& qmd{kepler_compute->launch_description};
278 glDispatchCompute(qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z); 281 glDispatchCompute(qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z);
279 ++num_queued_commands; 282 ++num_queued_commands;
280 has_written_global_memory |= pipeline->WritesGlobalMemory(); 283 has_written_global_memory |= pipeline->WritesGlobalMemory();
@@ -359,7 +362,7 @@ void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
359 } 362 }
360} 363}
361 364
362void RasterizerOpenGL::SyncGuestHost() { 365void RasterizerOpenGL::InvalidateGPUCache() {
363 MICROPROFILE_SCOPE(OpenGL_CacheManagement); 366 MICROPROFILE_SCOPE(OpenGL_CacheManagement);
364 shader_cache.SyncGuestHost(); 367 shader_cache.SyncGuestHost();
365 { 368 {
@@ -380,40 +383,30 @@ void RasterizerOpenGL::UnmapMemory(VAddr addr, u64 size) {
380 shader_cache.OnCPUWrite(addr, size); 383 shader_cache.OnCPUWrite(addr, size);
381} 384}
382 385
383void RasterizerOpenGL::ModifyGPUMemory(GPUVAddr addr, u64 size) { 386void RasterizerOpenGL::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {
384 { 387 {
385 std::scoped_lock lock{texture_cache.mutex}; 388 std::scoped_lock lock{texture_cache.mutex};
386 texture_cache.UnmapGPUMemory(addr, size); 389 texture_cache.UnmapGPUMemory(as_id, addr, size);
387 } 390 }
388} 391}
389 392
390void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) { 393void RasterizerOpenGL::SignalFence(std::function<void()>&& func) {
391 if (!gpu.IsAsync()) { 394 fence_manager.SignalFence(std::move(func));
392 gpu_memory.Write<u32>(addr, value); 395}
393 return; 396
394 } 397void RasterizerOpenGL::SyncOperation(std::function<void()>&& func) {
395 fence_manager.SignalSemaphore(addr, value); 398 fence_manager.SyncOperation(std::move(func));
396} 399}
397 400
398void RasterizerOpenGL::SignalSyncPoint(u32 value) { 401void RasterizerOpenGL::SignalSyncPoint(u32 value) {
399 if (!gpu.IsAsync()) {
400 gpu.IncrementSyncPoint(value);
401 return;
402 }
403 fence_manager.SignalSyncPoint(value); 402 fence_manager.SignalSyncPoint(value);
404} 403}
405 404
406void RasterizerOpenGL::SignalReference() { 405void RasterizerOpenGL::SignalReference() {
407 if (!gpu.IsAsync()) {
408 return;
409 }
410 fence_manager.SignalOrdering(); 406 fence_manager.SignalOrdering();
411} 407}
412 408
413void RasterizerOpenGL::ReleaseFences() { 409void RasterizerOpenGL::ReleaseFences() {
414 if (!gpu.IsAsync()) {
415 return;
416 }
417 fence_manager.WaitPendingFences(); 410 fence_manager.WaitPendingFences();
418} 411}
419 412
@@ -430,6 +423,7 @@ void RasterizerOpenGL::WaitForIdle() {
430} 423}
431 424
432void RasterizerOpenGL::FragmentBarrier() { 425void RasterizerOpenGL::FragmentBarrier() {
426 glTextureBarrier();
433 glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT); 427 glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT);
434} 428}
435 429
@@ -482,13 +476,13 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerOpenGL::AccessAccelerateDMA()
482} 476}
483 477
484void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, 478void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
485 std::span<u8> memory) { 479 std::span<const u8> memory) {
486 auto cpu_addr = gpu_memory.GpuToCpuAddress(address); 480 auto cpu_addr = gpu_memory->GpuToCpuAddress(address);
487 if (!cpu_addr) [[unlikely]] { 481 if (!cpu_addr) [[unlikely]] {
488 gpu_memory.WriteBlock(address, memory.data(), copy_size); 482 gpu_memory->WriteBlock(address, memory.data(), copy_size);
489 return; 483 return;
490 } 484 }
491 gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size); 485 gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size);
492 { 486 {
493 std::unique_lock<std::mutex> lock{buffer_cache.mutex}; 487 std::unique_lock<std::mutex> lock{buffer_cache.mutex};
494 if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { 488 if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) {
@@ -551,8 +545,8 @@ void RasterizerOpenGL::SyncState() {
551} 545}
552 546
553void RasterizerOpenGL::SyncViewport() { 547void RasterizerOpenGL::SyncViewport() {
554 auto& flags = maxwell3d.dirty.flags; 548 auto& flags = maxwell3d->dirty.flags;
555 const auto& regs = maxwell3d.regs; 549 const auto& regs = maxwell3d->regs;
556 550
557 const bool rescale_viewports = flags[VideoCommon::Dirty::RescaleViewports]; 551 const bool rescale_viewports = flags[VideoCommon::Dirty::RescaleViewports];
558 const bool dirty_viewport = flags[Dirty::Viewports] || rescale_viewports; 552 const bool dirty_viewport = flags[Dirty::Viewports] || rescale_viewports;
@@ -657,23 +651,23 @@ void RasterizerOpenGL::SyncViewport() {
657} 651}
658 652
659void RasterizerOpenGL::SyncDepthClamp() { 653void RasterizerOpenGL::SyncDepthClamp() {
660 auto& flags = maxwell3d.dirty.flags; 654 auto& flags = maxwell3d->dirty.flags;
661 if (!flags[Dirty::DepthClampEnabled]) { 655 if (!flags[Dirty::DepthClampEnabled]) {
662 return; 656 return;
663 } 657 }
664 flags[Dirty::DepthClampEnabled] = false; 658 flags[Dirty::DepthClampEnabled] = false;
665 659
666 oglEnable(GL_DEPTH_CLAMP, maxwell3d.regs.view_volume_clip_control.depth_clamp_disabled == 0); 660 oglEnable(GL_DEPTH_CLAMP, maxwell3d->regs.view_volume_clip_control.depth_clamp_disabled == 0);
667} 661}
668 662
669void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) { 663void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) {
670 auto& flags = maxwell3d.dirty.flags; 664 auto& flags = maxwell3d->dirty.flags;
671 if (!flags[Dirty::ClipDistances] && !flags[VideoCommon::Dirty::Shaders]) { 665 if (!flags[Dirty::ClipDistances] && !flags[VideoCommon::Dirty::Shaders]) {
672 return; 666 return;
673 } 667 }
674 flags[Dirty::ClipDistances] = false; 668 flags[Dirty::ClipDistances] = false;
675 669
676 clip_mask &= maxwell3d.regs.clip_distance_enabled; 670 clip_mask &= maxwell3d->regs.clip_distance_enabled;
677 if (clip_mask == last_clip_distance_mask) { 671 if (clip_mask == last_clip_distance_mask) {
678 return; 672 return;
679 } 673 }
@@ -689,8 +683,8 @@ void RasterizerOpenGL::SyncClipCoef() {
689} 683}
690 684
691void RasterizerOpenGL::SyncCullMode() { 685void RasterizerOpenGL::SyncCullMode() {
692 auto& flags = maxwell3d.dirty.flags; 686 auto& flags = maxwell3d->dirty.flags;
693 const auto& regs = maxwell3d.regs; 687 const auto& regs = maxwell3d->regs;
694 688
695 if (flags[Dirty::CullTest]) { 689 if (flags[Dirty::CullTest]) {
696 flags[Dirty::CullTest] = false; 690 flags[Dirty::CullTest] = false;
@@ -705,23 +699,23 @@ void RasterizerOpenGL::SyncCullMode() {
705} 699}
706 700
707void RasterizerOpenGL::SyncPrimitiveRestart() { 701void RasterizerOpenGL::SyncPrimitiveRestart() {
708 auto& flags = maxwell3d.dirty.flags; 702 auto& flags = maxwell3d->dirty.flags;
709 if (!flags[Dirty::PrimitiveRestart]) { 703 if (!flags[Dirty::PrimitiveRestart]) {
710 return; 704 return;
711 } 705 }
712 flags[Dirty::PrimitiveRestart] = false; 706 flags[Dirty::PrimitiveRestart] = false;
713 707
714 if (maxwell3d.regs.primitive_restart.enabled) { 708 if (maxwell3d->regs.primitive_restart.enabled) {
715 glEnable(GL_PRIMITIVE_RESTART); 709 glEnable(GL_PRIMITIVE_RESTART);
716 glPrimitiveRestartIndex(maxwell3d.regs.primitive_restart.index); 710 glPrimitiveRestartIndex(maxwell3d->regs.primitive_restart.index);
717 } else { 711 } else {
718 glDisable(GL_PRIMITIVE_RESTART); 712 glDisable(GL_PRIMITIVE_RESTART);
719 } 713 }
720} 714}
721 715
722void RasterizerOpenGL::SyncDepthTestState() { 716void RasterizerOpenGL::SyncDepthTestState() {
723 auto& flags = maxwell3d.dirty.flags; 717 auto& flags = maxwell3d->dirty.flags;
724 const auto& regs = maxwell3d.regs; 718 const auto& regs = maxwell3d->regs;
725 719
726 if (flags[Dirty::DepthMask]) { 720 if (flags[Dirty::DepthMask]) {
727 flags[Dirty::DepthMask] = false; 721 flags[Dirty::DepthMask] = false;
@@ -740,13 +734,13 @@ void RasterizerOpenGL::SyncDepthTestState() {
740} 734}
741 735
742void RasterizerOpenGL::SyncStencilTestState() { 736void RasterizerOpenGL::SyncStencilTestState() {
743 auto& flags = maxwell3d.dirty.flags; 737 auto& flags = maxwell3d->dirty.flags;
744 if (!flags[Dirty::StencilTest]) { 738 if (!flags[Dirty::StencilTest]) {
745 return; 739 return;
746 } 740 }
747 flags[Dirty::StencilTest] = false; 741 flags[Dirty::StencilTest] = false;
748 742
749 const auto& regs = maxwell3d.regs; 743 const auto& regs = maxwell3d->regs;
750 oglEnable(GL_STENCIL_TEST, regs.stencil_enable); 744 oglEnable(GL_STENCIL_TEST, regs.stencil_enable);
751 745
752 glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_func_func), 746 glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_func_func),
@@ -771,23 +765,23 @@ void RasterizerOpenGL::SyncStencilTestState() {
771} 765}
772 766
773void RasterizerOpenGL::SyncRasterizeEnable() { 767void RasterizerOpenGL::SyncRasterizeEnable() {
774 auto& flags = maxwell3d.dirty.flags; 768 auto& flags = maxwell3d->dirty.flags;
775 if (!flags[Dirty::RasterizeEnable]) { 769 if (!flags[Dirty::RasterizeEnable]) {
776 return; 770 return;
777 } 771 }
778 flags[Dirty::RasterizeEnable] = false; 772 flags[Dirty::RasterizeEnable] = false;
779 773
780 oglEnable(GL_RASTERIZER_DISCARD, maxwell3d.regs.rasterize_enable == 0); 774 oglEnable(GL_RASTERIZER_DISCARD, maxwell3d->regs.rasterize_enable == 0);
781} 775}
782 776
783void RasterizerOpenGL::SyncPolygonModes() { 777void RasterizerOpenGL::SyncPolygonModes() {
784 auto& flags = maxwell3d.dirty.flags; 778 auto& flags = maxwell3d->dirty.flags;
785 if (!flags[Dirty::PolygonModes]) { 779 if (!flags[Dirty::PolygonModes]) {
786 return; 780 return;
787 } 781 }
788 flags[Dirty::PolygonModes] = false; 782 flags[Dirty::PolygonModes] = false;
789 783
790 const auto& regs = maxwell3d.regs; 784 const auto& regs = maxwell3d->regs;
791 if (regs.fill_rectangle) { 785 if (regs.fill_rectangle) {
792 if (!GLAD_GL_NV_fill_rectangle) { 786 if (!GLAD_GL_NV_fill_rectangle) {
793 LOG_ERROR(Render_OpenGL, "GL_NV_fill_rectangle used and not supported"); 787 LOG_ERROR(Render_OpenGL, "GL_NV_fill_rectangle used and not supported");
@@ -820,7 +814,7 @@ void RasterizerOpenGL::SyncPolygonModes() {
820} 814}
821 815
822void RasterizerOpenGL::SyncColorMask() { 816void RasterizerOpenGL::SyncColorMask() {
823 auto& flags = maxwell3d.dirty.flags; 817 auto& flags = maxwell3d->dirty.flags;
824 if (!flags[Dirty::ColorMasks]) { 818 if (!flags[Dirty::ColorMasks]) {
825 return; 819 return;
826 } 820 }
@@ -829,7 +823,7 @@ void RasterizerOpenGL::SyncColorMask() {
829 const bool force = flags[Dirty::ColorMaskCommon]; 823 const bool force = flags[Dirty::ColorMaskCommon];
830 flags[Dirty::ColorMaskCommon] = false; 824 flags[Dirty::ColorMaskCommon] = false;
831 825
832 const auto& regs = maxwell3d.regs; 826 const auto& regs = maxwell3d->regs;
833 if (regs.color_mask_common) { 827 if (regs.color_mask_common) {
834 if (!force && !flags[Dirty::ColorMask0]) { 828 if (!force && !flags[Dirty::ColorMask0]) {
835 return; 829 return;
@@ -854,30 +848,30 @@ void RasterizerOpenGL::SyncColorMask() {
854} 848}
855 849
856void RasterizerOpenGL::SyncMultiSampleState() { 850void RasterizerOpenGL::SyncMultiSampleState() {
857 auto& flags = maxwell3d.dirty.flags; 851 auto& flags = maxwell3d->dirty.flags;
858 if (!flags[Dirty::MultisampleControl]) { 852 if (!flags[Dirty::MultisampleControl]) {
859 return; 853 return;
860 } 854 }
861 flags[Dirty::MultisampleControl] = false; 855 flags[Dirty::MultisampleControl] = false;
862 856
863 const auto& regs = maxwell3d.regs; 857 const auto& regs = maxwell3d->regs;
864 oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.multisample_control.alpha_to_coverage); 858 oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.multisample_control.alpha_to_coverage);
865 oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.multisample_control.alpha_to_one); 859 oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.multisample_control.alpha_to_one);
866} 860}
867 861
868void RasterizerOpenGL::SyncFragmentColorClampState() { 862void RasterizerOpenGL::SyncFragmentColorClampState() {
869 auto& flags = maxwell3d.dirty.flags; 863 auto& flags = maxwell3d->dirty.flags;
870 if (!flags[Dirty::FragmentClampColor]) { 864 if (!flags[Dirty::FragmentClampColor]) {
871 return; 865 return;
872 } 866 }
873 flags[Dirty::FragmentClampColor] = false; 867 flags[Dirty::FragmentClampColor] = false;
874 868
875 glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d.regs.frag_color_clamp ? GL_TRUE : GL_FALSE); 869 glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d->regs.frag_color_clamp ? GL_TRUE : GL_FALSE);
876} 870}
877 871
878void RasterizerOpenGL::SyncBlendState() { 872void RasterizerOpenGL::SyncBlendState() {
879 auto& flags = maxwell3d.dirty.flags; 873 auto& flags = maxwell3d->dirty.flags;
880 const auto& regs = maxwell3d.regs; 874 const auto& regs = maxwell3d->regs;
881 875
882 if (flags[Dirty::BlendColor]) { 876 if (flags[Dirty::BlendColor]) {
883 flags[Dirty::BlendColor] = false; 877 flags[Dirty::BlendColor] = false;
@@ -934,13 +928,13 @@ void RasterizerOpenGL::SyncBlendState() {
934} 928}
935 929
936void RasterizerOpenGL::SyncLogicOpState() { 930void RasterizerOpenGL::SyncLogicOpState() {
937 auto& flags = maxwell3d.dirty.flags; 931 auto& flags = maxwell3d->dirty.flags;
938 if (!flags[Dirty::LogicOp]) { 932 if (!flags[Dirty::LogicOp]) {
939 return; 933 return;
940 } 934 }
941 flags[Dirty::LogicOp] = false; 935 flags[Dirty::LogicOp] = false;
942 936
943 const auto& regs = maxwell3d.regs; 937 const auto& regs = maxwell3d->regs;
944 if (regs.logic_op.enable) { 938 if (regs.logic_op.enable) {
945 glEnable(GL_COLOR_LOGIC_OP); 939 glEnable(GL_COLOR_LOGIC_OP);
946 glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.operation)); 940 glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.operation));
@@ -950,7 +944,7 @@ void RasterizerOpenGL::SyncLogicOpState() {
950} 944}
951 945
952void RasterizerOpenGL::SyncScissorTest() { 946void RasterizerOpenGL::SyncScissorTest() {
953 auto& flags = maxwell3d.dirty.flags; 947 auto& flags = maxwell3d->dirty.flags;
954 if (!flags[Dirty::Scissors] && !flags[VideoCommon::Dirty::RescaleScissors]) { 948 if (!flags[Dirty::Scissors] && !flags[VideoCommon::Dirty::RescaleScissors]) {
955 return; 949 return;
956 } 950 }
@@ -959,7 +953,7 @@ void RasterizerOpenGL::SyncScissorTest() {
959 const bool force = flags[VideoCommon::Dirty::RescaleScissors]; 953 const bool force = flags[VideoCommon::Dirty::RescaleScissors];
960 flags[VideoCommon::Dirty::RescaleScissors] = false; 954 flags[VideoCommon::Dirty::RescaleScissors] = false;
961 955
962 const auto& regs = maxwell3d.regs; 956 const auto& regs = maxwell3d->regs;
963 957
964 const auto& resolution = Settings::values.resolution_info; 958 const auto& resolution = Settings::values.resolution_info;
965 const bool is_rescaling{texture_cache.IsRescaling()}; 959 const bool is_rescaling{texture_cache.IsRescaling()};
@@ -995,39 +989,39 @@ void RasterizerOpenGL::SyncScissorTest() {
995} 989}
996 990
997void RasterizerOpenGL::SyncPointState() { 991void RasterizerOpenGL::SyncPointState() {
998 auto& flags = maxwell3d.dirty.flags; 992 auto& flags = maxwell3d->dirty.flags;
999 if (!flags[Dirty::PointSize]) { 993 if (!flags[Dirty::PointSize]) {
1000 return; 994 return;
1001 } 995 }
1002 flags[Dirty::PointSize] = false; 996 flags[Dirty::PointSize] = false;
1003 997
1004 oglEnable(GL_POINT_SPRITE, maxwell3d.regs.point_sprite_enable); 998 oglEnable(GL_POINT_SPRITE, maxwell3d->regs.point_sprite_enable);
1005 oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d.regs.vp_point_size.enable); 999 oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d->regs.vp_point_size.enable);
1006 const bool is_rescaling{texture_cache.IsRescaling()}; 1000 const bool is_rescaling{texture_cache.IsRescaling()};
1007 const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f; 1001 const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f;
1008 glPointSize(std::max(1.0f, maxwell3d.regs.point_size * scale)); 1002 glPointSize(std::max(1.0f, maxwell3d->regs.point_size * scale));
1009} 1003}
1010 1004
1011void RasterizerOpenGL::SyncLineState() { 1005void RasterizerOpenGL::SyncLineState() {
1012 auto& flags = maxwell3d.dirty.flags; 1006 auto& flags = maxwell3d->dirty.flags;
1013 if (!flags[Dirty::LineWidth]) { 1007 if (!flags[Dirty::LineWidth]) {
1014 return; 1008 return;
1015 } 1009 }
1016 flags[Dirty::LineWidth] = false; 1010 flags[Dirty::LineWidth] = false;
1017 1011
1018 const auto& regs = maxwell3d.regs; 1012 const auto& regs = maxwell3d->regs;
1019 oglEnable(GL_LINE_SMOOTH, regs.line_smooth_enable); 1013 oglEnable(GL_LINE_SMOOTH, regs.line_smooth_enable);
1020 glLineWidth(regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased); 1014 glLineWidth(regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased);
1021} 1015}
1022 1016
1023void RasterizerOpenGL::SyncPolygonOffset() { 1017void RasterizerOpenGL::SyncPolygonOffset() {
1024 auto& flags = maxwell3d.dirty.flags; 1018 auto& flags = maxwell3d->dirty.flags;
1025 if (!flags[Dirty::PolygonOffset]) { 1019 if (!flags[Dirty::PolygonOffset]) {
1026 return; 1020 return;
1027 } 1021 }
1028 flags[Dirty::PolygonOffset] = false; 1022 flags[Dirty::PolygonOffset] = false;
1029 1023
1030 const auto& regs = maxwell3d.regs; 1024 const auto& regs = maxwell3d->regs;
1031 oglEnable(GL_POLYGON_OFFSET_FILL, regs.polygon_offset_fill_enable); 1025 oglEnable(GL_POLYGON_OFFSET_FILL, regs.polygon_offset_fill_enable);
1032 oglEnable(GL_POLYGON_OFFSET_LINE, regs.polygon_offset_line_enable); 1026 oglEnable(GL_POLYGON_OFFSET_LINE, regs.polygon_offset_line_enable);
1033 oglEnable(GL_POLYGON_OFFSET_POINT, regs.polygon_offset_point_enable); 1027 oglEnable(GL_POLYGON_OFFSET_POINT, regs.polygon_offset_point_enable);
@@ -1041,13 +1035,13 @@ void RasterizerOpenGL::SyncPolygonOffset() {
1041} 1035}
1042 1036
1043void RasterizerOpenGL::SyncAlphaTest() { 1037void RasterizerOpenGL::SyncAlphaTest() {
1044 auto& flags = maxwell3d.dirty.flags; 1038 auto& flags = maxwell3d->dirty.flags;
1045 if (!flags[Dirty::AlphaTest]) { 1039 if (!flags[Dirty::AlphaTest]) {
1046 return; 1040 return;
1047 } 1041 }
1048 flags[Dirty::AlphaTest] = false; 1042 flags[Dirty::AlphaTest] = false;
1049 1043
1050 const auto& regs = maxwell3d.regs; 1044 const auto& regs = maxwell3d->regs;
1051 if (regs.alpha_test_enabled) { 1045 if (regs.alpha_test_enabled) {
1052 glEnable(GL_ALPHA_TEST); 1046 glEnable(GL_ALPHA_TEST);
1053 glAlphaFunc(MaxwellToGL::ComparisonOp(regs.alpha_test_func), regs.alpha_test_ref); 1047 glAlphaFunc(MaxwellToGL::ComparisonOp(regs.alpha_test_func), regs.alpha_test_ref);
@@ -1057,17 +1051,17 @@ void RasterizerOpenGL::SyncAlphaTest() {
1057} 1051}
1058 1052
1059void RasterizerOpenGL::SyncFramebufferSRGB() { 1053void RasterizerOpenGL::SyncFramebufferSRGB() {
1060 auto& flags = maxwell3d.dirty.flags; 1054 auto& flags = maxwell3d->dirty.flags;
1061 if (!flags[Dirty::FramebufferSRGB]) { 1055 if (!flags[Dirty::FramebufferSRGB]) {
1062 return; 1056 return;
1063 } 1057 }
1064 flags[Dirty::FramebufferSRGB] = false; 1058 flags[Dirty::FramebufferSRGB] = false;
1065 1059
1066 oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d.regs.framebuffer_srgb); 1060 oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d->regs.framebuffer_srgb);
1067} 1061}
1068 1062
1069void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum primitive_mode) { 1063void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum primitive_mode) {
1070 const auto& regs = maxwell3d.regs; 1064 const auto& regs = maxwell3d->regs;
1071 if (regs.tfb_enabled == 0) { 1065 if (regs.tfb_enabled == 0) {
1072 return; 1066 return;
1073 } 1067 }
@@ -1086,11 +1080,48 @@ void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum
1086} 1080}
1087 1081
1088void RasterizerOpenGL::EndTransformFeedback() { 1082void RasterizerOpenGL::EndTransformFeedback() {
1089 if (maxwell3d.regs.tfb_enabled != 0) { 1083 if (maxwell3d->regs.tfb_enabled != 0) {
1090 glEndTransformFeedback(); 1084 glEndTransformFeedback();
1091 } 1085 }
1092} 1086}
1093 1087
1088void RasterizerOpenGL::InitializeChannel(Tegra::Control::ChannelState& channel) {
1089 CreateChannel(channel);
1090 {
1091 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
1092 texture_cache.CreateChannel(channel);
1093 buffer_cache.CreateChannel(channel);
1094 }
1095 shader_cache.CreateChannel(channel);
1096 query_cache.CreateChannel(channel);
1097 state_tracker.SetupTables(channel);
1098}
1099
1100void RasterizerOpenGL::BindChannel(Tegra::Control::ChannelState& channel) {
1101 const s32 channel_id = channel.bind_id;
1102 BindToChannel(channel_id);
1103 {
1104 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
1105 texture_cache.BindToChannel(channel_id);
1106 buffer_cache.BindToChannel(channel_id);
1107 }
1108 shader_cache.BindToChannel(channel_id);
1109 query_cache.BindToChannel(channel_id);
1110 state_tracker.ChangeChannel(channel);
1111 state_tracker.InvalidateState();
1112}
1113
1114void RasterizerOpenGL::ReleaseChannel(s32 channel_id) {
1115 EraseChannel(channel_id);
1116 {
1117 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
1118 texture_cache.EraseChannel(channel_id);
1119 buffer_cache.EraseChannel(channel_id);
1120 }
1121 shader_cache.EraseChannel(channel_id);
1122 query_cache.EraseChannel(channel_id);
1123}
1124
1094AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} 1125AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {}
1095 1126
1096bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { 1127bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 31a16fcba..45131b785 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -12,6 +12,7 @@
12#include <glad/glad.h> 12#include <glad/glad.h>
13 13
14#include "common/common_types.h" 14#include "common/common_types.h"
15#include "video_core/control/channel_state_cache.h"
15#include "video_core/engines/maxwell_dma.h" 16#include "video_core/engines/maxwell_dma.h"
16#include "video_core/rasterizer_accelerated.h" 17#include "video_core/rasterizer_accelerated.h"
17#include "video_core/rasterizer_interface.h" 18#include "video_core/rasterizer_interface.h"
@@ -58,7 +59,8 @@ private:
58 BufferCache& buffer_cache; 59 BufferCache& buffer_cache;
59}; 60};
60 61
61class RasterizerOpenGL : public VideoCore::RasterizerAccelerated { 62class RasterizerOpenGL : public VideoCore::RasterizerAccelerated,
63 protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
62public: 64public:
63 explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, 65 explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
64 Core::Memory::Memory& cpu_memory_, const Device& device_, 66 Core::Memory::Memory& cpu_memory_, const Device& device_,
@@ -78,10 +80,11 @@ public:
78 bool MustFlushRegion(VAddr addr, u64 size) override; 80 bool MustFlushRegion(VAddr addr, u64 size) override;
79 void InvalidateRegion(VAddr addr, u64 size) override; 81 void InvalidateRegion(VAddr addr, u64 size) override;
80 void OnCPUWrite(VAddr addr, u64 size) override; 82 void OnCPUWrite(VAddr addr, u64 size) override;
81 void SyncGuestHost() override; 83 void InvalidateGPUCache() override;
82 void UnmapMemory(VAddr addr, u64 size) override; 84 void UnmapMemory(VAddr addr, u64 size) override;
83 void ModifyGPUMemory(GPUVAddr addr, u64 size) override; 85 void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
84 void SignalSemaphore(GPUVAddr addr, u32 value) override; 86 void SignalFence(std::function<void()>&& func) override;
87 void SyncOperation(std::function<void()>&& func) override;
85 void SignalSyncPoint(u32 value) override; 88 void SignalSyncPoint(u32 value) override;
86 void SignalReference() override; 89 void SignalReference() override;
87 void ReleaseFences() override; 90 void ReleaseFences() override;
@@ -96,7 +99,7 @@ public:
96 const Tegra::Engines::Fermi2D::Config& copy_config) override; 99 const Tegra::Engines::Fermi2D::Config& copy_config) override;
97 Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; 100 Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
98 void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, 101 void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
99 std::span<u8> memory) override; 102 std::span<const u8> memory) override;
100 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, 103 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
101 u32 pixel_stride) override; 104 u32 pixel_stride) override;
102 void LoadDiskResources(u64 title_id, std::stop_token stop_loading, 105 void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
@@ -107,6 +110,12 @@ public:
107 return num_queued_commands > 0; 110 return num_queued_commands > 0;
108 } 111 }
109 112
113 void InitializeChannel(Tegra::Control::ChannelState& channel) override;
114
115 void BindChannel(Tegra::Control::ChannelState& channel) override;
116
117 void ReleaseChannel(s32 channel_id) override;
118
110private: 119private:
111 static constexpr size_t MAX_TEXTURES = 192; 120 static constexpr size_t MAX_TEXTURES = 192;
112 static constexpr size_t MAX_IMAGES = 48; 121 static constexpr size_t MAX_IMAGES = 48;
@@ -191,9 +200,6 @@ private:
191 void EndTransformFeedback(); 200 void EndTransformFeedback();
192 201
193 Tegra::GPU& gpu; 202 Tegra::GPU& gpu;
194 Tegra::Engines::Maxwell3D& maxwell3d;
195 Tegra::Engines::KeplerCompute& kepler_compute;
196 Tegra::MemoryManager& gpu_memory;
197 203
198 const Device& device; 204 const Device& device;
199 ScreenInfo& screen_info; 205 ScreenInfo& screen_info;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 0b8d8ec92..5a29a41d2 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -151,16 +151,13 @@ void SetXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs
151} // Anonymous namespace 151} // Anonymous namespace
152 152
153ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, 153ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
154 Tegra::Engines::Maxwell3D& maxwell3d_, 154 const Device& device_, TextureCache& texture_cache_,
155 Tegra::Engines::KeplerCompute& kepler_compute_, 155 BufferCache& buffer_cache_, ProgramManager& program_manager_,
156 Tegra::MemoryManager& gpu_memory_, const Device& device_, 156 StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_)
157 TextureCache& texture_cache_, BufferCache& buffer_cache_, 157 : VideoCommon::ShaderCache{rasterizer_}, emu_window{emu_window_}, device{device_},
158 ProgramManager& program_manager_, StateTracker& state_tracker_, 158 texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_},
159 VideoCore::ShaderNotify& shader_notify_) 159 state_tracker{state_tracker_}, shader_notify{shader_notify_},
160 : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_}, 160 use_asynchronous_shaders{device.UseAsynchronousShaders()},
161 emu_window{emu_window_}, device{device_}, texture_cache{texture_cache_},
162 buffer_cache{buffer_cache_}, program_manager{program_manager_}, state_tracker{state_tracker_},
163 shader_notify{shader_notify_}, use_asynchronous_shaders{device.UseAsynchronousShaders()},
164 profile{ 161 profile{
165 .supported_spirv = 0x00010000, 162 .supported_spirv = 0x00010000,
166 163
@@ -310,7 +307,7 @@ GraphicsPipeline* ShaderCache::CurrentGraphicsPipeline() {
310 current_pipeline = nullptr; 307 current_pipeline = nullptr;
311 return nullptr; 308 return nullptr;
312 } 309 }
313 const auto& regs{maxwell3d.regs}; 310 const auto& regs{maxwell3d->regs};
314 graphics_key.raw = 0; 311 graphics_key.raw = 0;
315 graphics_key.early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0); 312 graphics_key.early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0);
316 graphics_key.gs_input_topology.Assign(graphics_key.unique_hashes[4] != 0 313 graphics_key.gs_input_topology.Assign(graphics_key.unique_hashes[4] != 0
@@ -351,13 +348,13 @@ GraphicsPipeline* ShaderCache::BuiltPipeline(GraphicsPipeline* pipeline) const n
351 } 348 }
352 // If something is using depth, we can assume that games are not rendering anything which 349 // If something is using depth, we can assume that games are not rendering anything which
353 // will be used one time. 350 // will be used one time.
354 if (maxwell3d.regs.zeta_enable) { 351 if (maxwell3d->regs.zeta_enable) {
355 return nullptr; 352 return nullptr;
356 } 353 }
357 // If games are using a small index count, we can assume these are full screen quads. 354 // If games are using a small index count, we can assume these are full screen quads.
358 // Usually these shaders are only used once for building textures so we can assume they 355 // Usually these shaders are only used once for building textures so we can assume they
359 // can't be built async 356 // can't be built async
360 if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) { 357 if (maxwell3d->regs.index_array.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) {
361 return pipeline; 358 return pipeline;
362 } 359 }
363 return nullptr; 360 return nullptr;
@@ -368,7 +365,7 @@ ComputePipeline* ShaderCache::CurrentComputePipeline() {
368 if (!shader) { 365 if (!shader) {
369 return nullptr; 366 return nullptr;
370 } 367 }
371 const auto& qmd{kepler_compute.launch_description}; 368 const auto& qmd{kepler_compute->launch_description};
372 const ComputePipelineKey key{ 369 const ComputePipelineKey key{
373 .unique_hash = shader->unique_hash, 370 .unique_hash = shader->unique_hash,
374 .shared_memory_size = qmd.shared_alloc, 371 .shared_memory_size = qmd.shared_alloc,
@@ -480,9 +477,9 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline(
480 previous_program = &program; 477 previous_program = &program;
481 } 478 }
482 auto* const thread_worker{build_in_parallel ? workers.get() : nullptr}; 479 auto* const thread_worker{build_in_parallel ? workers.get() : nullptr};
483 return std::make_unique<GraphicsPipeline>( 480 return std::make_unique<GraphicsPipeline>(device, texture_cache, buffer_cache, program_manager,
484 device, texture_cache, buffer_cache, gpu_memory, maxwell3d, program_manager, state_tracker, 481 state_tracker, thread_worker, &shader_notify, sources,
485 thread_worker, &shader_notify, sources, sources_spirv, infos, key); 482 sources_spirv, infos, key);
486 483
487} catch (Shader::Exception& exception) { 484} catch (Shader::Exception& exception) {
488 LOG_ERROR(Render_OpenGL, "{}", exception.what()); 485 LOG_ERROR(Render_OpenGL, "{}", exception.what());
@@ -491,9 +488,9 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline(
491 488
492std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline( 489std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline(
493 const ComputePipelineKey& key, const VideoCommon::ShaderInfo* shader) { 490 const ComputePipelineKey& key, const VideoCommon::ShaderInfo* shader) {
494 const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; 491 const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
495 const auto& qmd{kepler_compute.launch_description}; 492 const auto& qmd{kepler_compute->launch_description};
496 ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; 493 ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
497 env.SetCachedSize(shader->size_bytes); 494 env.SetCachedSize(shader->size_bytes);
498 495
499 main_pools.ReleaseContents(); 496 main_pools.ReleaseContents();
@@ -536,9 +533,8 @@ std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline(
536 break; 533 break;
537 } 534 }
538 535
539 return std::make_unique<ComputePipeline>(device, texture_cache, buffer_cache, gpu_memory, 536 return std::make_unique<ComputePipeline>(device, texture_cache, buffer_cache, program_manager,
540 kepler_compute, program_manager, program.info, code, 537 program.info, code, code_spirv);
541 code_spirv);
542} catch (Shader::Exception& exception) { 538} catch (Shader::Exception& exception) {
543 LOG_ERROR(Render_OpenGL, "{}", exception.what()); 539 LOG_ERROR(Render_OpenGL, "{}", exception.what());
544 return nullptr; 540 return nullptr;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index a14269dea..89f181fe3 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -30,12 +30,9 @@ using ShaderWorker = Common::StatefulThreadWorker<ShaderContext::Context>;
30class ShaderCache : public VideoCommon::ShaderCache { 30class ShaderCache : public VideoCommon::ShaderCache {
31public: 31public:
32 explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, 32 explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
33 Tegra::Engines::Maxwell3D& maxwell3d_, 33 const Device& device_, TextureCache& texture_cache_,
34 Tegra::Engines::KeplerCompute& kepler_compute_, 34 BufferCache& buffer_cache_, ProgramManager& program_manager_,
35 Tegra::MemoryManager& gpu_memory_, const Device& device_, 35 StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_);
36 TextureCache& texture_cache_, BufferCache& buffer_cache_,
37 ProgramManager& program_manager_, StateTracker& state_tracker_,
38 VideoCore::ShaderNotify& shader_notify_);
39 ~ShaderCache(); 36 ~ShaderCache();
40 37
41 void LoadDiskResources(u64 title_id, std::stop_token stop_loading, 38 void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp
index 912725ef7..a8f3a0f57 100644
--- a/src/video_core/renderer_opengl/gl_state_tracker.cpp
+++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp
@@ -7,8 +7,8 @@
7 7
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "video_core/control/channel_state.h"
10#include "video_core/engines/maxwell_3d.h" 11#include "video_core/engines/maxwell_3d.h"
11#include "video_core/gpu.h"
12#include "video_core/renderer_opengl/gl_state_tracker.h" 12#include "video_core/renderer_opengl/gl_state_tracker.h"
13 13
14#define OFF(field_name) MAXWELL3D_REG_INDEX(field_name) 14#define OFF(field_name) MAXWELL3D_REG_INDEX(field_name)
@@ -202,9 +202,8 @@ void SetupDirtyMisc(Tables& tables) {
202 202
203} // Anonymous namespace 203} // Anonymous namespace
204 204
205StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags} { 205void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) {
206 auto& dirty = gpu.Maxwell3D().dirty; 206 auto& tables{channel_state.maxwell_3d->dirty.tables};
207 auto& tables = dirty.tables;
208 SetupDirtyFlags(tables); 207 SetupDirtyFlags(tables);
209 SetupDirtyColorMasks(tables); 208 SetupDirtyColorMasks(tables);
210 SetupDirtyViewports(tables); 209 SetupDirtyViewports(tables);
@@ -230,4 +229,14 @@ StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags}
230 SetupDirtyMisc(tables); 229 SetupDirtyMisc(tables);
231} 230}
232 231
232void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) {
233 flags = &channel_state.maxwell_3d->dirty.flags;
234}
235
236void StateTracker::InvalidateState() {
237 flags->set();
238}
239
240StateTracker::StateTracker() : flags{&default_flags} {}
241
233} // namespace OpenGL 242} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.h b/src/video_core/renderer_opengl/gl_state_tracker.h
index 04e024f08..19bcf3f35 100644
--- a/src/video_core/renderer_opengl/gl_state_tracker.h
+++ b/src/video_core/renderer_opengl/gl_state_tracker.h
@@ -12,8 +12,10 @@
12#include "video_core/engines/maxwell_3d.h" 12#include "video_core/engines/maxwell_3d.h"
13 13
14namespace Tegra { 14namespace Tegra {
15class GPU; 15namespace Control {
16struct ChannelState;
16} 17}
18} // namespace Tegra
17 19
18namespace OpenGL { 20namespace OpenGL {
19 21
@@ -83,7 +85,7 @@ static_assert(Last <= std::numeric_limits<u8>::max());
83 85
84class StateTracker { 86class StateTracker {
85public: 87public:
86 explicit StateTracker(Tegra::GPU& gpu); 88 explicit StateTracker();
87 89
88 void BindIndexBuffer(GLuint new_index_buffer) { 90 void BindIndexBuffer(GLuint new_index_buffer) {
89 if (index_buffer == new_index_buffer) { 91 if (index_buffer == new_index_buffer) {
@@ -121,94 +123,107 @@ public:
121 } 123 }
122 124
123 void NotifyScreenDrawVertexArray() { 125 void NotifyScreenDrawVertexArray() {
124 flags[OpenGL::Dirty::VertexFormats] = true; 126 (*flags)[OpenGL::Dirty::VertexFormats] = true;
125 flags[OpenGL::Dirty::VertexFormat0 + 0] = true; 127 (*flags)[OpenGL::Dirty::VertexFormat0 + 0] = true;
126 flags[OpenGL::Dirty::VertexFormat0 + 1] = true; 128 (*flags)[OpenGL::Dirty::VertexFormat0 + 1] = true;
127 129
128 flags[VideoCommon::Dirty::VertexBuffers] = true; 130 (*flags)[VideoCommon::Dirty::VertexBuffers] = true;
129 flags[VideoCommon::Dirty::VertexBuffer0] = true; 131 (*flags)[VideoCommon::Dirty::VertexBuffer0] = true;
130 132
131 flags[OpenGL::Dirty::VertexInstances] = true; 133 (*flags)[OpenGL::Dirty::VertexInstances] = true;
132 flags[OpenGL::Dirty::VertexInstance0 + 0] = true; 134 (*flags)[OpenGL::Dirty::VertexInstance0 + 0] = true;
133 flags[OpenGL::Dirty::VertexInstance0 + 1] = true; 135 (*flags)[OpenGL::Dirty::VertexInstance0 + 1] = true;
134 } 136 }
135 137
136 void NotifyPolygonModes() { 138 void NotifyPolygonModes() {
137 flags[OpenGL::Dirty::PolygonModes] = true; 139 (*flags)[OpenGL::Dirty::PolygonModes] = true;
138 flags[OpenGL::Dirty::PolygonModeFront] = true; 140 (*flags)[OpenGL::Dirty::PolygonModeFront] = true;
139 flags[OpenGL::Dirty::PolygonModeBack] = true; 141 (*flags)[OpenGL::Dirty::PolygonModeBack] = true;
140 } 142 }
141 143
142 void NotifyViewport0() { 144 void NotifyViewport0() {
143 flags[OpenGL::Dirty::Viewports] = true; 145 (*flags)[OpenGL::Dirty::Viewports] = true;
144 flags[OpenGL::Dirty::Viewport0] = true; 146 (*flags)[OpenGL::Dirty::Viewport0] = true;
145 } 147 }
146 148
147 void NotifyScissor0() { 149 void NotifyScissor0() {
148 flags[OpenGL::Dirty::Scissors] = true; 150 (*flags)[OpenGL::Dirty::Scissors] = true;
149 flags[OpenGL::Dirty::Scissor0] = true; 151 (*flags)[OpenGL::Dirty::Scissor0] = true;
150 } 152 }
151 153
152 void NotifyColorMask(size_t index) { 154 void NotifyColorMask(size_t index) {
153 flags[OpenGL::Dirty::ColorMasks] = true; 155 (*flags)[OpenGL::Dirty::ColorMasks] = true;
154 flags[OpenGL::Dirty::ColorMask0 + index] = true; 156 (*flags)[OpenGL::Dirty::ColorMask0 + index] = true;
155 } 157 }
156 158
157 void NotifyBlend0() { 159 void NotifyBlend0() {
158 flags[OpenGL::Dirty::BlendStates] = true; 160 (*flags)[OpenGL::Dirty::BlendStates] = true;
159 flags[OpenGL::Dirty::BlendState0] = true; 161 (*flags)[OpenGL::Dirty::BlendState0] = true;
160 } 162 }
161 163
162 void NotifyFramebuffer() { 164 void NotifyFramebuffer() {
163 flags[VideoCommon::Dirty::RenderTargets] = true; 165 (*flags)[VideoCommon::Dirty::RenderTargets] = true;
164 } 166 }
165 167
166 void NotifyFrontFace() { 168 void NotifyFrontFace() {
167 flags[OpenGL::Dirty::FrontFace] = true; 169 (*flags)[OpenGL::Dirty::FrontFace] = true;
168 } 170 }
169 171
170 void NotifyCullTest() { 172 void NotifyCullTest() {
171 flags[OpenGL::Dirty::CullTest] = true; 173 (*flags)[OpenGL::Dirty::CullTest] = true;
172 } 174 }
173 175
174 void NotifyDepthMask() { 176 void NotifyDepthMask() {
175 flags[OpenGL::Dirty::DepthMask] = true; 177 (*flags)[OpenGL::Dirty::DepthMask] = true;
176 } 178 }
177 179
178 void NotifyDepthTest() { 180 void NotifyDepthTest() {
179 flags[OpenGL::Dirty::DepthTest] = true; 181 (*flags)[OpenGL::Dirty::DepthTest] = true;
180 } 182 }
181 183
182 void NotifyStencilTest() { 184 void NotifyStencilTest() {
183 flags[OpenGL::Dirty::StencilTest] = true; 185 (*flags)[OpenGL::Dirty::StencilTest] = true;
184 } 186 }
185 187
186 void NotifyPolygonOffset() { 188 void NotifyPolygonOffset() {
187 flags[OpenGL::Dirty::PolygonOffset] = true; 189 (*flags)[OpenGL::Dirty::PolygonOffset] = true;
188 } 190 }
189 191
190 void NotifyRasterizeEnable() { 192 void NotifyRasterizeEnable() {
191 flags[OpenGL::Dirty::RasterizeEnable] = true; 193 (*flags)[OpenGL::Dirty::RasterizeEnable] = true;
192 } 194 }
193 195
194 void NotifyFramebufferSRGB() { 196 void NotifyFramebufferSRGB() {
195 flags[OpenGL::Dirty::FramebufferSRGB] = true; 197 (*flags)[OpenGL::Dirty::FramebufferSRGB] = true;
196 } 198 }
197 199
198 void NotifyLogicOp() { 200 void NotifyLogicOp() {
199 flags[OpenGL::Dirty::LogicOp] = true; 201 (*flags)[OpenGL::Dirty::LogicOp] = true;
200 } 202 }
201 203
202 void NotifyClipControl() { 204 void NotifyClipControl() {
203 flags[OpenGL::Dirty::ClipControl] = true; 205 (*flags)[OpenGL::Dirty::ClipControl] = true;
204 } 206 }
205 207
206 void NotifyAlphaTest() { 208 void NotifyAlphaTest() {
207 flags[OpenGL::Dirty::AlphaTest] = true; 209 (*flags)[OpenGL::Dirty::AlphaTest] = true;
208 } 210 }
209 211
212 void NotifyRange(u8 start, u8 end) {
213 for (auto flag = start; flag <= end; flag++) {
214 (*flags)[flag] = true;
215 }
216 }
217
218 void SetupTables(Tegra::Control::ChannelState& channel_state);
219
220 void ChangeChannel(Tegra::Control::ChannelState& channel_state);
221
222 void InvalidateState();
223
210private: 224private:
211 Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; 225 Tegra::Engines::Maxwell3D::DirtyState::Flags* flags;
226 Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags{};
212 227
213 GLuint framebuffer = 0; 228 GLuint framebuffer = 0;
214 GLuint index_buffer = 0; 229 GLuint index_buffer = 0;
diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h
index dfe7f26ca..004421236 100644
--- a/src/video_core/renderer_opengl/maxwell_to_gl.h
+++ b/src/video_core/renderer_opengl/maxwell_to_gl.h
@@ -87,7 +87,7 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> FORMAT_TAB
87 {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // BC3_SRGB 87 {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // BC3_SRGB
88 {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7_SRGB 88 {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7_SRGB
89 {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // A4B4G4R4_UNORM 89 {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // A4B4G4R4_UNORM
90 {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R4G4_UNORM 90 {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // G4R4_UNORM
91 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB 91 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB
92 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB 92 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB
93 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB 93 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index 34f3f7a67..8bd5eba7e 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -131,7 +131,7 @@ RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_,
131 Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, 131 Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
132 std::unique_ptr<Core::Frontend::GraphicsContext> context_) 132 std::unique_ptr<Core::Frontend::GraphicsContext> context_)
133 : RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_}, 133 : RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_},
134 emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{gpu}, 134 emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{},
135 program_manager{device}, 135 program_manager{device},
136 rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) { 136 rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) {
137 if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) { 137 if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) {
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index 6703b8e68..e7104d377 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -184,7 +184,7 @@ struct FormatTuple {
184 {VK_FORMAT_BC3_SRGB_BLOCK}, // BC3_SRGB 184 {VK_FORMAT_BC3_SRGB_BLOCK}, // BC3_SRGB
185 {VK_FORMAT_BC7_SRGB_BLOCK}, // BC7_SRGB 185 {VK_FORMAT_BC7_SRGB_BLOCK}, // BC7_SRGB
186 {VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // A4B4G4R4_UNORM 186 {VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // A4B4G4R4_UNORM
187 {VK_FORMAT_R4G4_UNORM_PACK8}, // R4G4_UNORM 187 {VK_FORMAT_R4G4_UNORM_PACK8}, // G4R4_UNORM
188 {VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB 188 {VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB
189 {VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB 189 {VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB
190 {VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB 190 {VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 7c78d0299..d8131232a 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -102,13 +102,13 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_,
102 debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr), 102 debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr),
103 surface(CreateSurface(instance, render_window)), 103 surface(CreateSurface(instance, render_window)),
104 device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false), 104 device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false),
105 state_tracker(gpu), scheduler(device, state_tracker), 105 state_tracker(), scheduler(device, state_tracker),
106 swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width, 106 swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width,
107 render_window.GetFramebufferLayout().height, false), 107 render_window.GetFramebufferLayout().height, false),
108 blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler, 108 blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler,
109 screen_info), 109 screen_info),
110 rasterizer(render_window, gpu, gpu.MemoryManager(), cpu_memory, screen_info, device, 110 rasterizer(render_window, gpu, cpu_memory, screen_info, device, memory_allocator,
111 memory_allocator, state_tracker, scheduler) { 111 state_tracker, scheduler) {
112 Report(); 112 Report();
113} catch (const vk::Exception& exception) { 113} catch (const vk::Exception& exception) {
114 LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what()); 114 LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what());
@@ -142,7 +142,7 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
142 const auto recreate_swapchain = [&] { 142 const auto recreate_swapchain = [&] {
143 if (!has_been_recreated) { 143 if (!has_been_recreated) {
144 has_been_recreated = true; 144 has_been_recreated = true;
145 scheduler.WaitWorker(); 145 scheduler.Finish();
146 } 146 }
147 const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout(); 147 const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout();
148 swapchain.Create(layout.width, layout.height, is_srgb); 148 swapchain.Create(layout.width, layout.height, is_srgb);
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index 444c29f68..cb7fa2078 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -145,6 +145,11 @@ VkSemaphore BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
145 // Finish any pending renderpass 145 // Finish any pending renderpass
146 scheduler.RequestOutsideRenderPassOperationContext(); 146 scheduler.RequestOutsideRenderPassOperationContext();
147 147
148 if (const auto swapchain_images = swapchain.GetImageCount(); swapchain_images != image_count) {
149 image_count = swapchain_images;
150 Recreate();
151 }
152
148 const std::size_t image_index = swapchain.GetImageIndex(); 153 const std::size_t image_index = swapchain.GetImageIndex();
149 154
150 scheduler.Wait(resource_ticks[image_index]); 155 scheduler.Wait(resource_ticks[image_index]);
@@ -448,15 +453,15 @@ vk::Framebuffer BlitScreen::CreateFramebuffer(const VkImageView& image_view, VkE
448 453
449void BlitScreen::CreateStaticResources() { 454void BlitScreen::CreateStaticResources() {
450 CreateShaders(); 455 CreateShaders();
456 CreateSampler();
457}
458
459void BlitScreen::CreateDynamicResources() {
451 CreateSemaphores(); 460 CreateSemaphores();
452 CreateDescriptorPool(); 461 CreateDescriptorPool();
453 CreateDescriptorSetLayout(); 462 CreateDescriptorSetLayout();
454 CreateDescriptorSets(); 463 CreateDescriptorSets();
455 CreatePipelineLayout(); 464 CreatePipelineLayout();
456 CreateSampler();
457}
458
459void BlitScreen::CreateDynamicResources() {
460 CreateRenderPass(); 465 CreateRenderPass();
461 CreateFramebuffers(); 466 CreateFramebuffers();
462 CreateGraphicsPipeline(); 467 CreateGraphicsPipeline();
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index b8c67bef0..29e2ea925 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -109,7 +109,7 @@ private:
109 MemoryAllocator& memory_allocator; 109 MemoryAllocator& memory_allocator;
110 Swapchain& swapchain; 110 Swapchain& swapchain;
111 Scheduler& scheduler; 111 Scheduler& scheduler;
112 const std::size_t image_count; 112 std::size_t image_count;
113 const ScreenInfo& screen_info; 113 const ScreenInfo& screen_info;
114 114
115 vk::ShaderModule vertex_shader; 115 vk::ShaderModule vertex_shader;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index f17a5ccd6..241d7573e 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -26,8 +26,6 @@
26 26
27namespace Vulkan { 27namespace Vulkan {
28 28
29using Tegra::Texture::SWIZZLE_TABLE;
30
31namespace { 29namespace {
32 30
33constexpr u32 ASTC_BINDING_INPUT_BUFFER = 0; 31constexpr u32 ASTC_BINDING_INPUT_BUFFER = 0;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 6447210e2..7906e11a8 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -126,8 +126,8 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
126 const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; 126 const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
127 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + 127 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
128 secondary_offset}; 128 secondary_offset};
129 const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; 129 const u32 lhs_raw{gpu_memory.Read<u32>(addr) << desc.shift_left};
130 const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; 130 const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr) << desc.secondary_shift_left};
131 return TexturePair(lhs_raw | rhs_raw, via_header_index); 131 return TexturePair(lhs_raw | rhs_raw, via_header_index);
132 } 132 }
133 } 133 }
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
index c249b34d4..0214b103a 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
@@ -11,11 +11,8 @@
11 11
12namespace Vulkan { 12namespace Vulkan {
13 13
14InnerFence::InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_) 14InnerFence::InnerFence(Scheduler& scheduler_, bool is_stubbed_)
15 : FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {} 15 : FenceBase{is_stubbed_}, scheduler{scheduler_} {}
16
17InnerFence::InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_)
18 : FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {}
19 16
20InnerFence::~InnerFence() = default; 17InnerFence::~InnerFence() = default;
21 18
@@ -48,12 +45,8 @@ FenceManager::FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::G
48 : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_}, 45 : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_},
49 scheduler{scheduler_} {} 46 scheduler{scheduler_} {}
50 47
51Fence FenceManager::CreateFence(u32 value, bool is_stubbed) { 48Fence FenceManager::CreateFence(bool is_stubbed) {
52 return std::make_shared<InnerFence>(scheduler, value, is_stubbed); 49 return std::make_shared<InnerFence>(scheduler, is_stubbed);
53}
54
55Fence FenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
56 return std::make_shared<InnerFence>(scheduler, addr, value, is_stubbed);
57} 50}
58 51
59void FenceManager::QueueFence(Fence& fence) { 52void FenceManager::QueueFence(Fence& fence) {
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h
index 7c0bbd80a..7fe2afcd9 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.h
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.h
@@ -25,8 +25,7 @@ class Scheduler;
25 25
26class InnerFence : public VideoCommon::FenceBase { 26class InnerFence : public VideoCommon::FenceBase {
27public: 27public:
28 explicit InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_); 28 explicit InnerFence(Scheduler& scheduler_, bool is_stubbed_);
29 explicit InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_);
30 ~InnerFence(); 29 ~InnerFence();
31 30
32 void Queue(); 31 void Queue();
@@ -50,8 +49,7 @@ public:
50 QueryCache& query_cache, const Device& device, Scheduler& scheduler); 49 QueryCache& query_cache, const Device& device, Scheduler& scheduler);
51 50
52protected: 51protected:
53 Fence CreateFence(u32 value, bool is_stubbed) override; 52 Fence CreateFence(bool is_stubbed) override;
54 Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
55 void QueueFence(Fence& fence) override; 53 void QueueFence(Fence& fence) override;
56 bool IsFenceSignaled(Fence& fence) const override; 54 bool IsFenceSignaled(Fence& fence) const override;
57 void WaitFence(Fence& fence) override; 55 void WaitFence(Fence& fence) override;
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 5aca8f038..f47786f48 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -215,15 +215,14 @@ ConfigureFuncPtr ConfigureFunc(const std::array<vk::ShaderModule, NUM_STAGES>& m
215} // Anonymous namespace 215} // Anonymous namespace
216 216
217GraphicsPipeline::GraphicsPipeline( 217GraphicsPipeline::GraphicsPipeline(
218 Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, Scheduler& scheduler_, 218 Scheduler& scheduler_, BufferCache& buffer_cache_, TextureCache& texture_cache_,
219 BufferCache& buffer_cache_, TextureCache& texture_cache_,
220 VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool, 219 VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool,
221 UpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread, 220 UpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread,
222 PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache, 221 PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache,
223 const GraphicsPipelineCacheKey& key_, std::array<vk::ShaderModule, NUM_STAGES> stages, 222 const GraphicsPipelineCacheKey& key_, std::array<vk::ShaderModule, NUM_STAGES> stages,
224 const std::array<const Shader::Info*, NUM_STAGES>& infos) 223 const std::array<const Shader::Info*, NUM_STAGES>& infos)
225 : key{key_}, maxwell3d{maxwell3d_}, gpu_memory{gpu_memory_}, device{device_}, 224 : key{key_}, device{device_}, texture_cache{texture_cache_},
226 texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, scheduler{scheduler_}, 225 buffer_cache{buffer_cache_}, scheduler{scheduler_},
227 update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} { 226 update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} {
228 if (shader_notify) { 227 if (shader_notify) {
229 shader_notify->MarkShaderBuilding(); 228 shader_notify->MarkShaderBuilding();
@@ -288,7 +287,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
288 287
289 buffer_cache.SetUniformBuffersState(enabled_uniform_buffer_masks, &uniform_buffer_sizes); 288 buffer_cache.SetUniformBuffersState(enabled_uniform_buffer_masks, &uniform_buffer_sizes);
290 289
291 const auto& regs{maxwell3d.regs}; 290 const auto& regs{maxwell3d->regs};
292 const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; 291 const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
293 const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE { 292 const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
294 const Shader::Info& info{stage_infos[stage]}; 293 const Shader::Info& info{stage_infos[stage]};
@@ -302,7 +301,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
302 ++ssbo_index; 301 ++ssbo_index;
303 } 302 }
304 } 303 }
305 const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers}; 304 const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers};
306 const auto read_handle{[&](const auto& desc, u32 index) { 305 const auto read_handle{[&](const auto& desc, u32 index) {
307 ASSERT(cbufs[desc.cbuf_index].enabled); 306 ASSERT(cbufs[desc.cbuf_index].enabled);
308 const u32 index_offset{index << desc.size_shift}; 307 const u32 index_offset{index << desc.size_shift};
@@ -315,13 +314,14 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
315 const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; 314 const u32 second_offset{desc.secondary_cbuf_offset + index_offset};
316 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + 315 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address +
317 second_offset}; 316 second_offset};
318 const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; 317 const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left};
319 const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; 318 const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr)
319 << desc.secondary_shift_left};
320 const u32 raw{lhs_raw | rhs_raw}; 320 const u32 raw{lhs_raw | rhs_raw};
321 return TexturePair(raw, via_header_index); 321 return TexturePair(raw, via_header_index);
322 } 322 }
323 } 323 }
324 return TexturePair(gpu_memory.Read<u32>(addr), via_header_index); 324 return TexturePair(gpu_memory->Read<u32>(addr), via_header_index);
325 }}; 325 }};
326 const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE { 326 const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE {
327 for (u32 index = 0; index < desc.count; ++index) { 327 for (u32 index = 0; index < desc.count; ++index) {
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index e8949a9ab..85602592b 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -69,15 +69,16 @@ class GraphicsPipeline {
69 static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage; 69 static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage;
70 70
71public: 71public:
72 explicit GraphicsPipeline( 72 explicit GraphicsPipeline(Scheduler& scheduler, BufferCache& buffer_cache,
73 Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory, 73 TextureCache& texture_cache, VideoCore::ShaderNotify* shader_notify,
74 Scheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache, 74 const Device& device, DescriptorPool& descriptor_pool,
75 VideoCore::ShaderNotify* shader_notify, const Device& device, 75 UpdateDescriptorQueue& update_descriptor_queue,
76 DescriptorPool& descriptor_pool, UpdateDescriptorQueue& update_descriptor_queue, 76 Common::ThreadWorker* worker_thread,
77 Common::ThreadWorker* worker_thread, PipelineStatistics* pipeline_statistics, 77 PipelineStatistics* pipeline_statistics,
78 RenderPassCache& render_pass_cache, const GraphicsPipelineCacheKey& key, 78 RenderPassCache& render_pass_cache,
79 std::array<vk::ShaderModule, NUM_STAGES> stages, 79 const GraphicsPipelineCacheKey& key,
80 const std::array<const Shader::Info*, NUM_STAGES>& infos); 80 std::array<vk::ShaderModule, NUM_STAGES> stages,
81 const std::array<const Shader::Info*, NUM_STAGES>& infos);
81 82
82 GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete; 83 GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete;
83 GraphicsPipeline(GraphicsPipeline&&) noexcept = delete; 84 GraphicsPipeline(GraphicsPipeline&&) noexcept = delete;
@@ -109,6 +110,11 @@ public:
109 return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl<Spec>(is_indexed); }; 110 return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl<Spec>(is_indexed); };
110 } 111 }
111 112
113 void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) {
114 maxwell3d = maxwell3d_;
115 gpu_memory = gpu_memory_;
116 }
117
112private: 118private:
113 template <typename Spec> 119 template <typename Spec>
114 void ConfigureImpl(bool is_indexed); 120 void ConfigureImpl(bool is_indexed);
@@ -120,8 +126,8 @@ private:
120 void Validate(); 126 void Validate();
121 127
122 const GraphicsPipelineCacheKey key; 128 const GraphicsPipelineCacheKey key;
123 Tegra::Engines::Maxwell3D& maxwell3d; 129 Tegra::Engines::Maxwell3D* maxwell3d;
124 Tegra::MemoryManager& gpu_memory; 130 Tegra::MemoryManager* gpu_memory;
125 const Device& device; 131 const Device& device;
126 TextureCache& texture_cache; 132 TextureCache& texture_cache;
127 BufferCache& buffer_cache; 133 BufferCache& buffer_cache;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index accbfc8e1..732e7b6f2 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -259,17 +259,15 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c
259 return std::memcmp(&rhs, this, Size()) == 0; 259 return std::memcmp(&rhs, this, Size()) == 0;
260} 260}
261 261
262PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, 262PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device_,
263 Tegra::Engines::KeplerCompute& kepler_compute_,
264 Tegra::MemoryManager& gpu_memory_, const Device& device_,
265 Scheduler& scheduler_, DescriptorPool& descriptor_pool_, 263 Scheduler& scheduler_, DescriptorPool& descriptor_pool_,
266 UpdateDescriptorQueue& update_descriptor_queue_, 264 UpdateDescriptorQueue& update_descriptor_queue_,
267 RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_, 265 RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
268 TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_) 266 TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
269 : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_}, 267 : VideoCommon::ShaderCache{rasterizer_}, device{device_}, scheduler{scheduler_},
270 device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, 268 descriptor_pool{descriptor_pool_}, update_descriptor_queue{update_descriptor_queue_},
271 update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_}, 269 render_pass_cache{render_pass_cache_}, buffer_cache{buffer_cache_},
272 buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_}, 270 texture_cache{texture_cache_}, shader_notify{shader_notify_},
273 use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()}, 271 use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()},
274 workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "VkPipelineBuilder"), 272 workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "VkPipelineBuilder"),
275 serialization_thread(1, "VkPipelineSerialization") { 273 serialization_thread(1, "VkPipelineSerialization") {
@@ -337,7 +335,7 @@ GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() {
337 current_pipeline = nullptr; 335 current_pipeline = nullptr;
338 return nullptr; 336 return nullptr;
339 } 337 }
340 graphics_key.state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported(), 338 graphics_key.state.Refresh(*maxwell3d, device.IsExtExtendedDynamicStateSupported(),
341 device.IsExtVertexInputDynamicStateSupported()); 339 device.IsExtVertexInputDynamicStateSupported());
342 340
343 if (current_pipeline) { 341 if (current_pipeline) {
@@ -357,7 +355,7 @@ ComputePipeline* PipelineCache::CurrentComputePipeline() {
357 if (!shader) { 355 if (!shader) {
358 return nullptr; 356 return nullptr;
359 } 357 }
360 const auto& qmd{kepler_compute.launch_description}; 358 const auto& qmd{kepler_compute->launch_description};
361 const ComputePipelineCacheKey key{ 359 const ComputePipelineCacheKey key{
362 .unique_hash = shader->unique_hash, 360 .unique_hash = shader->unique_hash,
363 .shared_memory_size = qmd.shared_alloc, 361 .shared_memory_size = qmd.shared_alloc,
@@ -486,13 +484,13 @@ GraphicsPipeline* PipelineCache::BuiltPipeline(GraphicsPipeline* pipeline) const
486 } 484 }
487 // If something is using depth, we can assume that games are not rendering anything which 485 // If something is using depth, we can assume that games are not rendering anything which
488 // will be used one time. 486 // will be used one time.
489 if (maxwell3d.regs.zeta_enable) { 487 if (maxwell3d->regs.zeta_enable) {
490 return nullptr; 488 return nullptr;
491 } 489 }
492 // If games are using a small index count, we can assume these are full screen quads. 490 // If games are using a small index count, we can assume these are full screen quads.
493 // Usually these shaders are only used once for building textures so we can assume they 491 // Usually these shaders are only used once for building textures so we can assume they
494 // can't be built async 492 // can't be built async
495 if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) { 493 if (maxwell3d->regs.index_array.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) {
496 return pipeline; 494 return pipeline;
497 } 495 }
498 return nullptr; 496 return nullptr;
@@ -557,10 +555,10 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
557 previous_stage = &program; 555 previous_stage = &program;
558 } 556 }
559 Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr}; 557 Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
560 return std::make_unique<GraphicsPipeline>( 558 return std::make_unique<GraphicsPipeline>(scheduler, buffer_cache, texture_cache,
561 maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device, 559 &shader_notify, device, descriptor_pool,
562 descriptor_pool, update_descriptor_queue, thread_worker, statistics, render_pass_cache, key, 560 update_descriptor_queue, thread_worker, statistics,
563 std::move(modules), infos); 561 render_pass_cache, key, std::move(modules), infos);
564 562
565} catch (const Shader::Exception& exception) { 563} catch (const Shader::Exception& exception) {
566 LOG_ERROR(Render_Vulkan, "{}", exception.what()); 564 LOG_ERROR(Render_Vulkan, "{}", exception.what());
@@ -592,9 +590,9 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() {
592 590
593std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline( 591std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
594 const ComputePipelineCacheKey& key, const ShaderInfo* shader) { 592 const ComputePipelineCacheKey& key, const ShaderInfo* shader) {
595 const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; 593 const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
596 const auto& qmd{kepler_compute.launch_description}; 594 const auto& qmd{kepler_compute->launch_description};
597 ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; 595 ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
598 env.SetCachedSize(shader->size_bytes); 596 env.SetCachedSize(shader->size_bytes);
599 597
600 main_pools.ReleaseContents(); 598 main_pools.ReleaseContents();
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 127957dbf..61f9e9366 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -100,10 +100,8 @@ struct ShaderPools {
100 100
101class PipelineCache : public VideoCommon::ShaderCache { 101class PipelineCache : public VideoCommon::ShaderCache {
102public: 102public:
103 explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d, 103 explicit PipelineCache(RasterizerVulkan& rasterizer, const Device& device, Scheduler& scheduler,
104 Tegra::Engines::KeplerCompute& kepler_compute, 104 DescriptorPool& descriptor_pool,
105 Tegra::MemoryManager& gpu_memory, const Device& device,
106 Scheduler& scheduler, DescriptorPool& descriptor_pool,
107 UpdateDescriptorQueue& update_descriptor_queue, 105 UpdateDescriptorQueue& update_descriptor_queue,
108 RenderPassCache& render_pass_cache, BufferCache& buffer_cache, 106 RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
109 TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_); 107 TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 2b859c6b8..7cb02631c 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -65,10 +65,9 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
65 usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; 65 usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
66} 66}
67 67
68QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, 68QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
69 Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, 69 Scheduler& scheduler_)
70 const Device& device_, Scheduler& scheduler_) 70 : QueryCacheBase{rasterizer_}, device{device_}, scheduler{scheduler_},
71 : QueryCacheBase{rasterizer_, maxwell3d_, gpu_memory_}, device{device_}, scheduler{scheduler_},
72 query_pools{ 71 query_pools{
73 QueryPool{device_, scheduler_, QueryType::SamplesPassed}, 72 QueryPool{device_, scheduler_, QueryType::SamplesPassed},
74 } {} 73 } {}
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index b0d86c4f8..26762ee09 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -52,9 +52,8 @@ private:
52class QueryCache final 52class QueryCache final
53 : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { 53 : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
54public: 54public:
55 explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, 55 explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
56 Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, 56 Scheduler& scheduler_);
57 const Device& device_, Scheduler& scheduler_);
58 ~QueryCache(); 57 ~QueryCache();
59 58
60 std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type); 59 std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type);
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 7e40c2df1..acfd5da7d 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -11,6 +11,7 @@
11#include "common/microprofile.h" 11#include "common/microprofile.h"
12#include "common/scope_exit.h" 12#include "common/scope_exit.h"
13#include "common/settings.h" 13#include "common/settings.h"
14#include "video_core/control/channel_state.h"
14#include "video_core/engines/kepler_compute.h" 15#include "video_core/engines/kepler_compute.h"
15#include "video_core/engines/maxwell_3d.h" 16#include "video_core/engines/maxwell_3d.h"
16#include "video_core/renderer_vulkan/blit_image.h" 17#include "video_core/renderer_vulkan/blit_image.h"
@@ -148,14 +149,11 @@ DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instan
148} // Anonymous namespace 149} // Anonymous namespace
149 150
150RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, 151RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
151 Tegra::MemoryManager& gpu_memory_,
152 Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, 152 Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_,
153 const Device& device_, MemoryAllocator& memory_allocator_, 153 const Device& device_, MemoryAllocator& memory_allocator_,
154 StateTracker& state_tracker_, Scheduler& scheduler_) 154 StateTracker& state_tracker_, Scheduler& scheduler_)
155 : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, 155 : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, screen_info{screen_info_}, device{device_},
156 gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()}, 156 memory_allocator{memory_allocator_}, state_tracker{state_tracker_}, scheduler{scheduler_},
157 screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_},
158 state_tracker{state_tracker_}, scheduler{scheduler_},
159 staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), 157 staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler),
160 update_descriptor_queue(device, scheduler), 158 update_descriptor_queue(device, scheduler),
161 blit_image(device, scheduler, state_tracker, descriptor_pool), 159 blit_image(device, scheduler, state_tracker, descriptor_pool),
@@ -165,14 +163,13 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
165 memory_allocator, staging_pool, 163 memory_allocator, staging_pool,
166 blit_image, astc_decoder_pass, 164 blit_image, astc_decoder_pass,
167 render_pass_cache}, 165 render_pass_cache},
168 texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), 166 texture_cache(texture_cache_runtime, *this),
169 buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, 167 buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
170 update_descriptor_queue, descriptor_pool), 168 update_descriptor_queue, descriptor_pool),
171 buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime), 169 buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
172 pipeline_cache(*this, maxwell3d, kepler_compute, gpu_memory, device, scheduler, 170 pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue,
173 descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache, 171 render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
174 texture_cache, gpu.ShaderNotify()), 172 query_cache{*this, device, scheduler}, accelerate_dma{buffer_cache},
175 query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{buffer_cache},
176 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), 173 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
177 wfi_event(device.GetLogical().CreateEvent()) { 174 wfi_event(device.GetLogical().CreateEvent()) {
178 scheduler.SetQueryCache(query_cache); 175 scheduler.SetQueryCache(query_cache);
@@ -193,14 +190,16 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
193 return; 190 return;
194 } 191 }
195 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 192 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
193 // update engine as channel may be different.
194 pipeline->SetEngine(maxwell3d, gpu_memory);
196 pipeline->Configure(is_indexed); 195 pipeline->Configure(is_indexed);
197 196
198 BeginTransformFeedback(); 197 BeginTransformFeedback();
199 198
200 UpdateDynamicStates(); 199 UpdateDynamicStates();
201 200
202 const auto& regs{maxwell3d.regs}; 201 const auto& regs{maxwell3d->regs};
203 const u32 num_instances{maxwell3d.mme_draw.instance_count}; 202 const u32 num_instances{maxwell3d->mme_draw.instance_count};
204 const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)}; 203 const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)};
205 scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) { 204 scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) {
206 if (draw_params.is_indexed) { 205 if (draw_params.is_indexed) {
@@ -218,14 +217,14 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
218void RasterizerVulkan::Clear() { 217void RasterizerVulkan::Clear() {
219 MICROPROFILE_SCOPE(Vulkan_Clearing); 218 MICROPROFILE_SCOPE(Vulkan_Clearing);
220 219
221 if (!maxwell3d.ShouldExecute()) { 220 if (!maxwell3d->ShouldExecute()) {
222 return; 221 return;
223 } 222 }
224 FlushWork(); 223 FlushWork();
225 224
226 query_cache.UpdateCounters(); 225 query_cache.UpdateCounters();
227 226
228 auto& regs = maxwell3d.regs; 227 auto& regs = maxwell3d->regs;
229 const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || 228 const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
230 regs.clear_buffers.A; 229 regs.clear_buffers.A;
231 const bool use_depth = regs.clear_buffers.Z; 230 const bool use_depth = regs.clear_buffers.Z;
@@ -248,8 +247,15 @@ void RasterizerVulkan::Clear() {
248 } 247 }
249 UpdateViewportsState(regs); 248 UpdateViewportsState(regs);
250 249
250 VkRect2D default_scissor;
251 default_scissor.offset.x = 0;
252 default_scissor.offset.y = 0;
253 default_scissor.extent.width = std::numeric_limits<s32>::max();
254 default_scissor.extent.height = std::numeric_limits<s32>::max();
255
251 VkClearRect clear_rect{ 256 VkClearRect clear_rect{
252 .rect = GetScissorState(regs, 0, up_scale, down_shift), 257 .rect = regs.clear_flags.scissor ? GetScissorState(regs, 0, up_scale, down_shift)
258 : default_scissor,
253 .baseArrayLayer = regs.clear_buffers.layer, 259 .baseArrayLayer = regs.clear_buffers.layer,
254 .layerCount = 1, 260 .layerCount = 1,
255 }; 261 };
@@ -339,9 +345,9 @@ void RasterizerVulkan::DispatchCompute() {
339 return; 345 return;
340 } 346 }
341 std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex}; 347 std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex};
342 pipeline->Configure(kepler_compute, gpu_memory, scheduler, buffer_cache, texture_cache); 348 pipeline->Configure(*kepler_compute, *gpu_memory, scheduler, buffer_cache, texture_cache);
343 349
344 const auto& qmd{kepler_compute.launch_description}; 350 const auto& qmd{kepler_compute->launch_description};
345 const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z}; 351 const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z};
346 scheduler.RequestOutsideRenderPassOperationContext(); 352 scheduler.RequestOutsideRenderPassOperationContext();
347 scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); }); 353 scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); });
@@ -422,7 +428,7 @@ void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
422 } 428 }
423} 429}
424 430
425void RasterizerVulkan::SyncGuestHost() { 431void RasterizerVulkan::InvalidateGPUCache() {
426 pipeline_cache.SyncGuestHost(); 432 pipeline_cache.SyncGuestHost();
427 { 433 {
428 std::scoped_lock lock{buffer_cache.mutex}; 434 std::scoped_lock lock{buffer_cache.mutex};
@@ -442,40 +448,30 @@ void RasterizerVulkan::UnmapMemory(VAddr addr, u64 size) {
442 pipeline_cache.OnCPUWrite(addr, size); 448 pipeline_cache.OnCPUWrite(addr, size);
443} 449}
444 450
445void RasterizerVulkan::ModifyGPUMemory(GPUVAddr addr, u64 size) { 451void RasterizerVulkan::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {
446 { 452 {
447 std::scoped_lock lock{texture_cache.mutex}; 453 std::scoped_lock lock{texture_cache.mutex};
448 texture_cache.UnmapGPUMemory(addr, size); 454 texture_cache.UnmapGPUMemory(as_id, addr, size);
449 } 455 }
450} 456}
451 457
452void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) { 458void RasterizerVulkan::SignalFence(std::function<void()>&& func) {
453 if (!gpu.IsAsync()) { 459 fence_manager.SignalFence(std::move(func));
454 gpu_memory.Write<u32>(addr, value); 460}
455 return; 461
456 } 462void RasterizerVulkan::SyncOperation(std::function<void()>&& func) {
457 fence_manager.SignalSemaphore(addr, value); 463 fence_manager.SyncOperation(std::move(func));
458} 464}
459 465
460void RasterizerVulkan::SignalSyncPoint(u32 value) { 466void RasterizerVulkan::SignalSyncPoint(u32 value) {
461 if (!gpu.IsAsync()) {
462 gpu.IncrementSyncPoint(value);
463 return;
464 }
465 fence_manager.SignalSyncPoint(value); 467 fence_manager.SignalSyncPoint(value);
466} 468}
467 469
468void RasterizerVulkan::SignalReference() { 470void RasterizerVulkan::SignalReference() {
469 if (!gpu.IsAsync()) {
470 return;
471 }
472 fence_manager.SignalOrdering(); 471 fence_manager.SignalOrdering();
473} 472}
474 473
475void RasterizerVulkan::ReleaseFences() { 474void RasterizerVulkan::ReleaseFences() {
476 if (!gpu.IsAsync()) {
477 return;
478 }
479 fence_manager.WaitPendingFences(); 475 fence_manager.WaitPendingFences();
480} 476}
481 477
@@ -552,13 +548,13 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerVulkan::AccessAccelerateDMA()
552} 548}
553 549
554void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, 550void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
555 std::span<u8> memory) { 551 std::span<const u8> memory) {
556 auto cpu_addr = gpu_memory.GpuToCpuAddress(address); 552 auto cpu_addr = gpu_memory->GpuToCpuAddress(address);
557 if (!cpu_addr) [[unlikely]] { 553 if (!cpu_addr) [[unlikely]] {
558 gpu_memory.WriteBlock(address, memory.data(), copy_size); 554 gpu_memory->WriteBlock(address, memory.data(), copy_size);
559 return; 555 return;
560 } 556 }
561 gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size); 557 gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size);
562 { 558 {
563 std::unique_lock<std::mutex> lock{buffer_cache.mutex}; 559 std::unique_lock<std::mutex> lock{buffer_cache.mutex};
564 if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { 560 if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) {
@@ -627,7 +623,7 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64
627} 623}
628 624
629void RasterizerVulkan::UpdateDynamicStates() { 625void RasterizerVulkan::UpdateDynamicStates() {
630 auto& regs = maxwell3d.regs; 626 auto& regs = maxwell3d->regs;
631 UpdateViewportsState(regs); 627 UpdateViewportsState(regs);
632 UpdateScissorsState(regs); 628 UpdateScissorsState(regs);
633 UpdateDepthBias(regs); 629 UpdateDepthBias(regs);
@@ -651,7 +647,7 @@ void RasterizerVulkan::UpdateDynamicStates() {
651} 647}
652 648
653void RasterizerVulkan::BeginTransformFeedback() { 649void RasterizerVulkan::BeginTransformFeedback() {
654 const auto& regs = maxwell3d.regs; 650 const auto& regs = maxwell3d->regs;
655 if (regs.tfb_enabled == 0) { 651 if (regs.tfb_enabled == 0) {
656 return; 652 return;
657 } 653 }
@@ -667,7 +663,7 @@ void RasterizerVulkan::BeginTransformFeedback() {
667} 663}
668 664
669void RasterizerVulkan::EndTransformFeedback() { 665void RasterizerVulkan::EndTransformFeedback() {
670 const auto& regs = maxwell3d.regs; 666 const auto& regs = maxwell3d->regs;
671 if (regs.tfb_enabled == 0) { 667 if (regs.tfb_enabled == 0) {
672 return; 668 return;
673 } 669 }
@@ -917,7 +913,7 @@ void RasterizerVulkan::UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs&
917} 913}
918 914
919void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) { 915void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) {
920 auto& dirty{maxwell3d.dirty.flags}; 916 auto& dirty{maxwell3d->dirty.flags};
921 if (!dirty[Dirty::VertexInput]) { 917 if (!dirty[Dirty::VertexInput]) {
922 return; 918 return;
923 } 919 }
@@ -974,4 +970,41 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs)
974 }); 970 });
975} 971}
976 972
973void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) {
974 CreateChannel(channel);
975 {
976 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
977 texture_cache.CreateChannel(channel);
978 buffer_cache.CreateChannel(channel);
979 }
980 pipeline_cache.CreateChannel(channel);
981 query_cache.CreateChannel(channel);
982 state_tracker.SetupTables(channel);
983}
984
985void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) {
986 const s32 channel_id = channel.bind_id;
987 BindToChannel(channel_id);
988 {
989 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
990 texture_cache.BindToChannel(channel_id);
991 buffer_cache.BindToChannel(channel_id);
992 }
993 pipeline_cache.BindToChannel(channel_id);
994 query_cache.BindToChannel(channel_id);
995 state_tracker.ChangeChannel(channel);
996 state_tracker.InvalidateState();
997}
998
999void RasterizerVulkan::ReleaseChannel(s32 channel_id) {
1000 EraseChannel(channel_id);
1001 {
1002 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
1003 texture_cache.EraseChannel(channel_id);
1004 buffer_cache.EraseChannel(channel_id);
1005 }
1006 pipeline_cache.EraseChannel(channel_id);
1007 query_cache.EraseChannel(channel_id);
1008}
1009
977} // namespace Vulkan 1010} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 0370ea39b..4cde3c983 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -8,6 +8,7 @@
8#include <boost/container/static_vector.hpp> 8#include <boost/container/static_vector.hpp>
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/control/channel_state_cache.h"
11#include "video_core/engines/maxwell_dma.h" 12#include "video_core/engines/maxwell_dma.h"
12#include "video_core/rasterizer_accelerated.h" 13#include "video_core/rasterizer_accelerated.h"
13#include "video_core/rasterizer_interface.h" 14#include "video_core/rasterizer_interface.h"
@@ -54,13 +55,13 @@ private:
54 BufferCache& buffer_cache; 55 BufferCache& buffer_cache;
55}; 56};
56 57
57class RasterizerVulkan final : public VideoCore::RasterizerAccelerated { 58class RasterizerVulkan final : public VideoCore::RasterizerAccelerated,
59 protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
58public: 60public:
59 explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, 61 explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
60 Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, 62 Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_,
61 ScreenInfo& screen_info_, const Device& device_, 63 const Device& device_, MemoryAllocator& memory_allocator_,
62 MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, 64 StateTracker& state_tracker_, Scheduler& scheduler_);
63 Scheduler& scheduler_);
64 ~RasterizerVulkan() override; 65 ~RasterizerVulkan() override;
65 66
66 void Draw(bool is_indexed, bool is_instanced) override; 67 void Draw(bool is_indexed, bool is_instanced) override;
@@ -75,10 +76,11 @@ public:
75 bool MustFlushRegion(VAddr addr, u64 size) override; 76 bool MustFlushRegion(VAddr addr, u64 size) override;
76 void InvalidateRegion(VAddr addr, u64 size) override; 77 void InvalidateRegion(VAddr addr, u64 size) override;
77 void OnCPUWrite(VAddr addr, u64 size) override; 78 void OnCPUWrite(VAddr addr, u64 size) override;
78 void SyncGuestHost() override; 79 void InvalidateGPUCache() override;
79 void UnmapMemory(VAddr addr, u64 size) override; 80 void UnmapMemory(VAddr addr, u64 size) override;
80 void ModifyGPUMemory(GPUVAddr addr, u64 size) override; 81 void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
81 void SignalSemaphore(GPUVAddr addr, u32 value) override; 82 void SignalFence(std::function<void()>&& func) override;
83 void SyncOperation(std::function<void()>&& func) override;
82 void SignalSyncPoint(u32 value) override; 84 void SignalSyncPoint(u32 value) override;
83 void SignalReference() override; 85 void SignalReference() override;
84 void ReleaseFences() override; 86 void ReleaseFences() override;
@@ -93,12 +95,18 @@ public:
93 const Tegra::Engines::Fermi2D::Config& copy_config) override; 95 const Tegra::Engines::Fermi2D::Config& copy_config) override;
94 Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; 96 Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
95 void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, 97 void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
96 std::span<u8> memory) override; 98 std::span<const u8> memory) override;
97 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, 99 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
98 u32 pixel_stride) override; 100 u32 pixel_stride) override;
99 void LoadDiskResources(u64 title_id, std::stop_token stop_loading, 101 void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
100 const VideoCore::DiskResourceLoadCallback& callback) override; 102 const VideoCore::DiskResourceLoadCallback& callback) override;
101 103
104 void InitializeChannel(Tegra::Control::ChannelState& channel) override;
105
106 void BindChannel(Tegra::Control::ChannelState& channel) override;
107
108 void ReleaseChannel(s32 channel_id) override;
109
102private: 110private:
103 static constexpr size_t MAX_TEXTURES = 192; 111 static constexpr size_t MAX_TEXTURES = 192;
104 static constexpr size_t MAX_IMAGES = 48; 112 static constexpr size_t MAX_IMAGES = 48;
@@ -134,9 +142,6 @@ private:
134 void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs); 142 void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs);
135 143
136 Tegra::GPU& gpu; 144 Tegra::GPU& gpu;
137 Tegra::MemoryManager& gpu_memory;
138 Tegra::Engines::Maxwell3D& maxwell3d;
139 Tegra::Engines::KeplerCompute& kepler_compute;
140 145
141 ScreenInfo& screen_info; 146 ScreenInfo& screen_info;
142 const Device& device; 147 const Device& device;
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
index 9ad096431..f234e1a31 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
@@ -7,9 +7,9 @@
7 7
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "video_core/control/channel_state.h"
10#include "video_core/dirty_flags.h" 11#include "video_core/dirty_flags.h"
11#include "video_core/engines/maxwell_3d.h" 12#include "video_core/engines/maxwell_3d.h"
12#include "video_core/gpu.h"
13#include "video_core/renderer_vulkan/vk_state_tracker.h" 13#include "video_core/renderer_vulkan/vk_state_tracker.h"
14 14
15#define OFF(field_name) MAXWELL3D_REG_INDEX(field_name) 15#define OFF(field_name) MAXWELL3D_REG_INDEX(field_name)
@@ -174,9 +174,8 @@ void SetupDirtyVertexBindings(Tables& tables) {
174} 174}
175} // Anonymous namespace 175} // Anonymous namespace
176 176
177StateTracker::StateTracker(Tegra::GPU& gpu) 177void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) {
178 : flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} { 178 auto& tables{channel_state.maxwell_3d->dirty.tables};
179 auto& tables{gpu.Maxwell3D().dirty.tables};
180 SetupDirtyFlags(tables); 179 SetupDirtyFlags(tables);
181 SetupDirtyViewports(tables); 180 SetupDirtyViewports(tables);
182 SetupDirtyScissors(tables); 181 SetupDirtyScissors(tables);
@@ -199,4 +198,15 @@ StateTracker::StateTracker(Tegra::GPU& gpu)
199 SetupDirtyVertexBindings(tables); 198 SetupDirtyVertexBindings(tables);
200} 199}
201 200
201void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) {
202 flags = &channel_state.maxwell_3d->dirty.flags;
203}
204
205void StateTracker::InvalidateState() {
206 flags->set();
207}
208
209StateTracker::StateTracker()
210 : flags{&default_flags}, default_flags{}, invalidation_flags{MakeInvalidationFlags()} {}
211
202} // namespace Vulkan 212} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h
index a85bc1c10..2296dea60 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.h
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.h
@@ -10,6 +10,12 @@
10#include "video_core/dirty_flags.h" 10#include "video_core/dirty_flags.h"
11#include "video_core/engines/maxwell_3d.h" 11#include "video_core/engines/maxwell_3d.h"
12 12
13namespace Tegra {
14namespace Control {
15struct ChannelState;
16}
17} // namespace Tegra
18
13namespace Vulkan { 19namespace Vulkan {
14 20
15namespace Dirty { 21namespace Dirty {
@@ -53,19 +59,19 @@ class StateTracker {
53 using Maxwell = Tegra::Engines::Maxwell3D::Regs; 59 using Maxwell = Tegra::Engines::Maxwell3D::Regs;
54 60
55public: 61public:
56 explicit StateTracker(Tegra::GPU& gpu); 62 explicit StateTracker();
57 63
58 void InvalidateCommandBufferState() { 64 void InvalidateCommandBufferState() {
59 flags |= invalidation_flags; 65 (*flags) |= invalidation_flags;
60 current_topology = INVALID_TOPOLOGY; 66 current_topology = INVALID_TOPOLOGY;
61 } 67 }
62 68
63 void InvalidateViewports() { 69 void InvalidateViewports() {
64 flags[Dirty::Viewports] = true; 70 (*flags)[Dirty::Viewports] = true;
65 } 71 }
66 72
67 void InvalidateScissors() { 73 void InvalidateScissors() {
68 flags[Dirty::Scissors] = true; 74 (*flags)[Dirty::Scissors] = true;
69 } 75 }
70 76
71 bool TouchViewports() { 77 bool TouchViewports() {
@@ -139,16 +145,23 @@ public:
139 return has_changed; 145 return has_changed;
140 } 146 }
141 147
148 void SetupTables(Tegra::Control::ChannelState& channel_state);
149
150 void ChangeChannel(Tegra::Control::ChannelState& channel_state);
151
152 void InvalidateState();
153
142private: 154private:
143 static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u); 155 static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u);
144 156
145 bool Exchange(std::size_t id, bool new_value) const noexcept { 157 bool Exchange(std::size_t id, bool new_value) const noexcept {
146 const bool is_dirty = flags[id]; 158 const bool is_dirty = (*flags)[id];
147 flags[id] = new_value; 159 (*flags)[id] = new_value;
148 return is_dirty; 160 return is_dirty;
149 } 161 }
150 162
151 Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; 163 Tegra::Engines::Maxwell3D::DirtyState::Flags* flags;
164 Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags;
152 Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags; 165 Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags;
153 Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY; 166 Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY;
154}; 167};
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
index a69ae7725..706d9ba74 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.cpp
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -36,7 +36,8 @@ VkPresentModeKHR ChooseSwapPresentMode(vk::Span<VkPresentModeKHR> modes) {
36 // Mailbox (triple buffering) doesn't lock the application like fifo (vsync), 36 // Mailbox (triple buffering) doesn't lock the application like fifo (vsync),
37 // prefer it if vsync option is not selected 37 // prefer it if vsync option is not selected
38 const auto found_mailbox = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR); 38 const auto found_mailbox = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR);
39 if (found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) { 39 if (Settings::values.fullscreen_mode.GetValue() == Settings::FullscreenMode::Borderless &&
40 found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) {
40 return VK_PRESENT_MODE_MAILBOX_KHR; 41 return VK_PRESENT_MODE_MAILBOX_KHR;
41 } 42 }
42 if (!Settings::values.use_speed_limit.GetValue()) { 43 if (!Settings::values.use_speed_limit.GetValue()) {
@@ -156,8 +157,16 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u3
156 present_mode = ChooseSwapPresentMode(present_modes); 157 present_mode = ChooseSwapPresentMode(present_modes);
157 158
158 u32 requested_image_count{capabilities.minImageCount + 1}; 159 u32 requested_image_count{capabilities.minImageCount + 1};
159 if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) { 160 // Ensure Tripple buffering if possible.
160 requested_image_count = capabilities.maxImageCount; 161 if (capabilities.maxImageCount > 0) {
162 if (requested_image_count > capabilities.maxImageCount) {
163 requested_image_count = capabilities.maxImageCount;
164 } else {
165 requested_image_count =
166 std::max(requested_image_count, std::min(3U, capabilities.maxImageCount));
167 }
168 } else {
169 requested_image_count = std::max(requested_image_count, 3U);
161 } 170 }
162 VkSwapchainCreateInfoKHR swapchain_ci{ 171 VkSwapchainCreateInfoKHR swapchain_ci{
163 .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, 172 .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index caca79d79..305ad8aee 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -592,7 +592,7 @@ void TryTransformSwizzleIfNeeded(PixelFormat format, std::array<SwizzleSource, 4
592 case PixelFormat::A5B5G5R1_UNORM: 592 case PixelFormat::A5B5G5R1_UNORM:
593 std::ranges::transform(swizzle, swizzle.begin(), SwapSpecial); 593 std::ranges::transform(swizzle, swizzle.begin(), SwapSpecial);
594 break; 594 break;
595 case PixelFormat::R4G4_UNORM: 595 case PixelFormat::G4R4_UNORM:
596 std::ranges::transform(swizzle, swizzle.begin(), SwapGreenRed); 596 std::ranges::transform(swizzle, swizzle.begin(), SwapGreenRed);
597 break; 597 break;
598 default: 598 default:
@@ -1474,13 +1474,14 @@ bool Image::BlitScaleHelper(bool scale_up) {
1474 }; 1474 };
1475 const VkExtent2D extent{ 1475 const VkExtent2D extent{
1476 .width = std::max(scaled_width, info.size.width), 1476 .width = std::max(scaled_width, info.size.width),
1477 .height = std::max(scaled_height, info.size.width), 1477 .height = std::max(scaled_height, info.size.height),
1478 }; 1478 };
1479 1479
1480 auto* view_ptr = blit_view.get(); 1480 auto* view_ptr = blit_view.get();
1481 if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) { 1481 if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) {
1482 if (!blit_framebuffer) { 1482 if (!blit_framebuffer) {
1483 blit_framebuffer = std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent); 1483 blit_framebuffer =
1484 std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent, scale_up);
1484 } 1485 }
1485 const auto color_view = blit_view->Handle(Shader::TextureType::Color2D); 1486 const auto color_view = blit_view->Handle(Shader::TextureType::Color2D);
1486 1487
@@ -1488,7 +1489,8 @@ bool Image::BlitScaleHelper(bool scale_up) {
1488 src_region, operation, BLIT_OPERATION); 1489 src_region, operation, BLIT_OPERATION);
1489 } else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { 1490 } else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
1490 if (!blit_framebuffer) { 1491 if (!blit_framebuffer) {
1491 blit_framebuffer = std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent); 1492 blit_framebuffer =
1493 std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent, scale_up);
1492 } 1494 }
1493 runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), blit_view->DepthView(), 1495 runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), blit_view->DepthView(),
1494 blit_view->StencilView(), dst_region, 1496 blit_view->StencilView(), dst_region,
@@ -1756,34 +1758,42 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
1756 .width = key.size.width, 1758 .width = key.size.width,
1757 .height = key.size.height, 1759 .height = key.size.height,
1758 }} { 1760 }} {
1759 CreateFramebuffer(runtime, color_buffers, depth_buffer); 1761 CreateFramebuffer(runtime, color_buffers, depth_buffer, key.is_rescaled);
1760 if (runtime.device.HasDebuggingToolAttached()) { 1762 if (runtime.device.HasDebuggingToolAttached()) {
1761 framebuffer.SetObjectNameEXT(VideoCommon::Name(key).c_str()); 1763 framebuffer.SetObjectNameEXT(VideoCommon::Name(key).c_str());
1762 } 1764 }
1763} 1765}
1764 1766
1765Framebuffer::Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer, 1767Framebuffer::Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer,
1766 ImageView* depth_buffer, VkExtent2D extent) 1768 ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled)
1767 : render_area{extent} { 1769 : render_area{extent} {
1768 std::array<ImageView*, NUM_RT> color_buffers{color_buffer}; 1770 std::array<ImageView*, NUM_RT> color_buffers{color_buffer};
1769 CreateFramebuffer(runtime, color_buffers, depth_buffer); 1771 CreateFramebuffer(runtime, color_buffers, depth_buffer, is_rescaled);
1770} 1772}
1771 1773
1772Framebuffer::~Framebuffer() = default; 1774Framebuffer::~Framebuffer() = default;
1773 1775
1774void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, 1776void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
1775 std::span<ImageView*, NUM_RT> color_buffers, 1777 std::span<ImageView*, NUM_RT> color_buffers,
1776 ImageView* depth_buffer) { 1778 ImageView* depth_buffer, bool is_rescaled) {
1777 std::vector<VkImageView> attachments; 1779 std::vector<VkImageView> attachments;
1778 RenderPassKey renderpass_key{}; 1780 RenderPassKey renderpass_key{};
1779 s32 num_layers = 1; 1781 s32 num_layers = 1;
1780 1782
1783 const auto& resolution = runtime.resolution;
1784
1785 u32 width = 0;
1786 u32 height = 0;
1781 for (size_t index = 0; index < NUM_RT; ++index) { 1787 for (size_t index = 0; index < NUM_RT; ++index) {
1782 const ImageView* const color_buffer = color_buffers[index]; 1788 const ImageView* const color_buffer = color_buffers[index];
1783 if (!color_buffer) { 1789 if (!color_buffer) {
1784 renderpass_key.color_formats[index] = PixelFormat::Invalid; 1790 renderpass_key.color_formats[index] = PixelFormat::Invalid;
1785 continue; 1791 continue;
1786 } 1792 }
1793 width = std::max(width, is_rescaled ? resolution.ScaleUp(color_buffer->size.width)
1794 : color_buffer->size.width);
1795 height = std::max(height, is_rescaled ? resolution.ScaleUp(color_buffer->size.height)
1796 : color_buffer->size.height);
1787 attachments.push_back(color_buffer->RenderTarget()); 1797 attachments.push_back(color_buffer->RenderTarget());
1788 renderpass_key.color_formats[index] = color_buffer->format; 1798 renderpass_key.color_formats[index] = color_buffer->format;
1789 num_layers = std::max(num_layers, color_buffer->range.extent.layers); 1799 num_layers = std::max(num_layers, color_buffer->range.extent.layers);
@@ -1794,6 +1804,10 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
1794 } 1804 }
1795 const size_t num_colors = attachments.size(); 1805 const size_t num_colors = attachments.size();
1796 if (depth_buffer) { 1806 if (depth_buffer) {
1807 width = std::max(width, is_rescaled ? resolution.ScaleUp(depth_buffer->size.width)
1808 : depth_buffer->size.width);
1809 height = std::max(height, is_rescaled ? resolution.ScaleUp(depth_buffer->size.height)
1810 : depth_buffer->size.height);
1797 attachments.push_back(depth_buffer->RenderTarget()); 1811 attachments.push_back(depth_buffer->RenderTarget());
1798 renderpass_key.depth_format = depth_buffer->format; 1812 renderpass_key.depth_format = depth_buffer->format;
1799 num_layers = std::max(num_layers, depth_buffer->range.extent.layers); 1813 num_layers = std::max(num_layers, depth_buffer->range.extent.layers);
@@ -1810,6 +1824,8 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
1810 renderpass_key.samples = samples; 1824 renderpass_key.samples = samples;
1811 1825
1812 renderpass = runtime.render_pass_cache.Get(renderpass_key); 1826 renderpass = runtime.render_pass_cache.Get(renderpass_key);
1827 render_area.width = std::min(render_area.width, width);
1828 render_area.height = std::min(render_area.height, height);
1813 1829
1814 num_color_buffers = static_cast<u32>(num_colors); 1830 num_color_buffers = static_cast<u32>(num_colors);
1815 framebuffer = runtime.device.GetLogical().CreateFramebuffer({ 1831 framebuffer = runtime.device.GetLogical().CreateFramebuffer({
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 69f06ee7b..0b7ac0df1 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -268,7 +268,7 @@ public:
268 ImageView* depth_buffer, const VideoCommon::RenderTargets& key); 268 ImageView* depth_buffer, const VideoCommon::RenderTargets& key);
269 269
270 explicit Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer, 270 explicit Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer,
271 ImageView* depth_buffer, VkExtent2D extent); 271 ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled);
272 272
273 ~Framebuffer(); 273 ~Framebuffer();
274 274
@@ -279,7 +279,8 @@ public:
279 Framebuffer& operator=(Framebuffer&&) = default; 279 Framebuffer& operator=(Framebuffer&&) = default;
280 280
281 void CreateFramebuffer(TextureCacheRuntime& runtime, 281 void CreateFramebuffer(TextureCacheRuntime& runtime,
282 std::span<ImageView*, NUM_RT> color_buffers, ImageView* depth_buffer); 282 std::span<ImageView*, NUM_RT> color_buffers, ImageView* depth_buffer,
283 bool is_rescaled = false);
283 284
284 [[nodiscard]] VkFramebuffer Handle() const noexcept { 285 [[nodiscard]] VkFramebuffer Handle() const noexcept {
285 return *framebuffer; 286 return *framebuffer;
diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp
index 164e4ee0e..f53066579 100644
--- a/src/video_core/shader_cache.cpp
+++ b/src/video_core/shader_cache.cpp
@@ -8,6 +8,7 @@
8#include "common/assert.h" 8#include "common/assert.h"
9#include "shader_recompiler/frontend/maxwell/control_flow.h" 9#include "shader_recompiler/frontend/maxwell/control_flow.h"
10#include "shader_recompiler/object_pool.h" 10#include "shader_recompiler/object_pool.h"
11#include "video_core/control/channel_state.h"
11#include "video_core/dirty_flags.h" 12#include "video_core/dirty_flags.h"
12#include "video_core/engines/kepler_compute.h" 13#include "video_core/engines/kepler_compute.h"
13#include "video_core/engines/maxwell_3d.h" 14#include "video_core/engines/maxwell_3d.h"
@@ -33,29 +34,25 @@ void ShaderCache::SyncGuestHost() {
33 RemovePendingShaders(); 34 RemovePendingShaders();
34} 35}
35 36
36ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_, 37ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_) : rasterizer{rasterizer_} {}
37 Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_,
38 Tegra::Engines::KeplerCompute& kepler_compute_)
39 : gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, kepler_compute{kepler_compute_},
40 rasterizer{rasterizer_} {}
41 38
42bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) { 39bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
43 auto& dirty{maxwell3d.dirty.flags}; 40 auto& dirty{maxwell3d->dirty.flags};
44 if (!dirty[VideoCommon::Dirty::Shaders]) { 41 if (!dirty[VideoCommon::Dirty::Shaders]) {
45 return last_shaders_valid; 42 return last_shaders_valid;
46 } 43 }
47 dirty[VideoCommon::Dirty::Shaders] = false; 44 dirty[VideoCommon::Dirty::Shaders] = false;
48 45
49 const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()}; 46 const GPUVAddr base_addr{maxwell3d->regs.code_address.CodeAddress()};
50 for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) { 47 for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) {
51 if (!maxwell3d.regs.IsShaderConfigEnabled(index)) { 48 if (!maxwell3d->regs.IsShaderConfigEnabled(index)) {
52 unique_hashes[index] = 0; 49 unique_hashes[index] = 0;
53 continue; 50 continue;
54 } 51 }
55 const auto& shader_config{maxwell3d.regs.shader_config[index]}; 52 const auto& shader_config{maxwell3d->regs.shader_config[index]};
56 const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)}; 53 const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)};
57 const GPUVAddr shader_addr{base_addr + shader_config.offset}; 54 const GPUVAddr shader_addr{base_addr + shader_config.offset};
58 const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)}; 55 const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)};
59 if (!cpu_shader_addr) { 56 if (!cpu_shader_addr) {
60 LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); 57 LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr);
61 last_shaders_valid = false; 58 last_shaders_valid = false;
@@ -64,7 +61,7 @@ bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
64 const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)}; 61 const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)};
65 if (!shader_info) { 62 if (!shader_info) {
66 const u32 start_address{shader_config.offset}; 63 const u32 start_address{shader_config.offset};
67 GraphicsEnvironment env{maxwell3d, gpu_memory, program, base_addr, start_address}; 64 GraphicsEnvironment env{*maxwell3d, *gpu_memory, program, base_addr, start_address};
68 shader_info = MakeShaderInfo(env, *cpu_shader_addr); 65 shader_info = MakeShaderInfo(env, *cpu_shader_addr);
69 } 66 }
70 shader_infos[index] = shader_info; 67 shader_infos[index] = shader_info;
@@ -75,10 +72,10 @@ bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
75} 72}
76 73
77const ShaderInfo* ShaderCache::ComputeShader() { 74const ShaderInfo* ShaderCache::ComputeShader() {
78 const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; 75 const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
79 const auto& qmd{kepler_compute.launch_description}; 76 const auto& qmd{kepler_compute->launch_description};
80 const GPUVAddr shader_addr{program_base + qmd.program_start}; 77 const GPUVAddr shader_addr{program_base + qmd.program_start};
81 const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)}; 78 const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)};
82 if (!cpu_shader_addr) { 79 if (!cpu_shader_addr) {
83 LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); 80 LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr);
84 return nullptr; 81 return nullptr;
@@ -86,22 +83,22 @@ const ShaderInfo* ShaderCache::ComputeShader() {
86 if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) { 83 if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) {
87 return shader; 84 return shader;
88 } 85 }
89 ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; 86 ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
90 return MakeShaderInfo(env, *cpu_shader_addr); 87 return MakeShaderInfo(env, *cpu_shader_addr);
91} 88}
92 89
93void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result, 90void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result,
94 const std::array<u64, NUM_PROGRAMS>& unique_hashes) { 91 const std::array<u64, NUM_PROGRAMS>& unique_hashes) {
95 size_t env_index{}; 92 size_t env_index{};
96 const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()}; 93 const GPUVAddr base_addr{maxwell3d->regs.code_address.CodeAddress()};
97 for (size_t index = 0; index < NUM_PROGRAMS; ++index) { 94 for (size_t index = 0; index < NUM_PROGRAMS; ++index) {
98 if (unique_hashes[index] == 0) { 95 if (unique_hashes[index] == 0) {
99 continue; 96 continue;
100 } 97 }
101 const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)}; 98 const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)};
102 auto& env{result.envs[index]}; 99 auto& env{result.envs[index]};
103 const u32 start_address{maxwell3d.regs.shader_config[index].offset}; 100 const u32 start_address{maxwell3d->regs.shader_config[index].offset};
104 env = GraphicsEnvironment{maxwell3d, gpu_memory, program, base_addr, start_address}; 101 env = GraphicsEnvironment{*maxwell3d, *gpu_memory, program, base_addr, start_address};
105 env.SetCachedSize(shader_infos[index]->size_bytes); 102 env.SetCachedSize(shader_infos[index]->size_bytes);
106 result.env_ptrs[env_index++] = &env; 103 result.env_ptrs[env_index++] = &env;
107 } 104 }
diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h
index f67cea8c4..a4391202d 100644
--- a/src/video_core/shader_cache.h
+++ b/src/video_core/shader_cache.h
@@ -12,6 +12,7 @@
12#include <vector> 12#include <vector>
13 13
14#include "common/common_types.h" 14#include "common/common_types.h"
15#include "video_core/control/channel_state_cache.h"
15#include "video_core/rasterizer_interface.h" 16#include "video_core/rasterizer_interface.h"
16#include "video_core/shader_environment.h" 17#include "video_core/shader_environment.h"
17 18
@@ -19,6 +20,10 @@ namespace Tegra {
19class MemoryManager; 20class MemoryManager;
20} 21}
21 22
23namespace Tegra::Control {
24struct ChannelState;
25}
26
22namespace VideoCommon { 27namespace VideoCommon {
23 28
24class GenericEnvironment; 29class GenericEnvironment;
@@ -28,7 +33,7 @@ struct ShaderInfo {
28 size_t size_bytes{}; 33 size_t size_bytes{};
29}; 34};
30 35
31class ShaderCache { 36class ShaderCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
32 static constexpr u64 YUZU_PAGEBITS = 14; 37 static constexpr u64 YUZU_PAGEBITS = 14;
33 static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS; 38 static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS;
34 39
@@ -71,9 +76,7 @@ protected:
71 } 76 }
72 }; 77 };
73 78
74 explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_, 79 explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_);
75 Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_,
76 Tegra::Engines::KeplerCompute& kepler_compute_);
77 80
78 /// @brief Update the hashes and information of shader stages 81 /// @brief Update the hashes and information of shader stages
79 /// @param unique_hashes Shader hashes to store into when a stage is enabled 82 /// @param unique_hashes Shader hashes to store into when a stage is enabled
@@ -88,10 +91,6 @@ protected:
88 void GetGraphicsEnvironments(GraphicsEnvironments& result, 91 void GetGraphicsEnvironments(GraphicsEnvironments& result,
89 const std::array<u64, NUM_PROGRAMS>& unique_hashes); 92 const std::array<u64, NUM_PROGRAMS>& unique_hashes);
90 93
91 Tegra::MemoryManager& gpu_memory;
92 Tegra::Engines::Maxwell3D& maxwell3d;
93 Tegra::Engines::KeplerCompute& kepler_compute;
94
95 std::array<const ShaderInfo*, NUM_PROGRAMS> shader_infos{}; 94 std::array<const ShaderInfo*, NUM_PROGRAMS> shader_infos{};
96 bool last_shaders_valid = false; 95 bool last_shaders_valid = false;
97 96
diff --git a/src/video_core/surface.h b/src/video_core/surface.h
index 5fd82357c..57ca7f597 100644
--- a/src/video_core/surface.h
+++ b/src/video_core/surface.h
@@ -82,7 +82,7 @@ enum class PixelFormat {
82 BC3_SRGB, 82 BC3_SRGB,
83 BC7_SRGB, 83 BC7_SRGB,
84 A4B4G4R4_UNORM, 84 A4B4G4R4_UNORM,
85 R4G4_UNORM, 85 G4R4_UNORM,
86 ASTC_2D_4X4_SRGB, 86 ASTC_2D_4X4_SRGB,
87 ASTC_2D_8X8_SRGB, 87 ASTC_2D_8X8_SRGB,
88 ASTC_2D_8X5_SRGB, 88 ASTC_2D_8X5_SRGB,
@@ -218,7 +218,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_WIDTH_TABLE = {{
218 4, // BC3_SRGB 218 4, // BC3_SRGB
219 4, // BC7_SRGB 219 4, // BC7_SRGB
220 1, // A4B4G4R4_UNORM 220 1, // A4B4G4R4_UNORM
221 1, // R4G4_UNORM 221 1, // G4R4_UNORM
222 4, // ASTC_2D_4X4_SRGB 222 4, // ASTC_2D_4X4_SRGB
223 8, // ASTC_2D_8X8_SRGB 223 8, // ASTC_2D_8X8_SRGB
224 8, // ASTC_2D_8X5_SRGB 224 8, // ASTC_2D_8X5_SRGB
@@ -323,7 +323,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_HEIGHT_TABLE = {{
323 4, // BC3_SRGB 323 4, // BC3_SRGB
324 4, // BC7_SRGB 324 4, // BC7_SRGB
325 1, // A4B4G4R4_UNORM 325 1, // A4B4G4R4_UNORM
326 1, // R4G4_UNORM 326 1, // G4R4_UNORM
327 4, // ASTC_2D_4X4_SRGB 327 4, // ASTC_2D_4X4_SRGB
328 8, // ASTC_2D_8X8_SRGB 328 8, // ASTC_2D_8X8_SRGB
329 5, // ASTC_2D_8X5_SRGB 329 5, // ASTC_2D_8X5_SRGB
@@ -428,7 +428,7 @@ constexpr std::array<u8, MaxPixelFormat> BITS_PER_BLOCK_TABLE = {{
428 128, // BC3_SRGB 428 128, // BC3_SRGB
429 128, // BC7_UNORM 429 128, // BC7_UNORM
430 16, // A4B4G4R4_UNORM 430 16, // A4B4G4R4_UNORM
431 8, // R4G4_UNORM 431 8, // G4R4_UNORM
432 128, // ASTC_2D_4X4_SRGB 432 128, // ASTC_2D_4X4_SRGB
433 128, // ASTC_2D_8X8_SRGB 433 128, // ASTC_2D_8X8_SRGB
434 128, // ASTC_2D_8X5_SRGB 434 128, // ASTC_2D_8X5_SRGB
diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp
index c71694d2a..ad935d386 100644
--- a/src/video_core/texture_cache/format_lookup_table.cpp
+++ b/src/video_core/texture_cache/format_lookup_table.cpp
@@ -63,7 +63,7 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
63 case Hash(TextureFormat::A4B4G4R4, UNORM): 63 case Hash(TextureFormat::A4B4G4R4, UNORM):
64 return PixelFormat::A4B4G4R4_UNORM; 64 return PixelFormat::A4B4G4R4_UNORM;
65 case Hash(TextureFormat::G4R4, UNORM): 65 case Hash(TextureFormat::G4R4, UNORM):
66 return PixelFormat::R4G4_UNORM; 66 return PixelFormat::G4R4_UNORM;
67 case Hash(TextureFormat::A5B5G5R1, UNORM): 67 case Hash(TextureFormat::A5B5G5R1, UNORM):
68 return PixelFormat::A5B5G5R1_UNORM; 68 return PixelFormat::A5B5G5R1_UNORM;
69 case Hash(TextureFormat::R8, UNORM): 69 case Hash(TextureFormat::R8, UNORM):
diff --git a/src/video_core/texture_cache/formatter.h b/src/video_core/texture_cache/formatter.h
index 6881e4c90..acc854715 100644
--- a/src/video_core/texture_cache/formatter.h
+++ b/src/video_core/texture_cache/formatter.h
@@ -153,8 +153,8 @@ struct fmt::formatter<VideoCore::Surface::PixelFormat> : fmt::formatter<fmt::str
153 return "BC7_SRGB"; 153 return "BC7_SRGB";
154 case PixelFormat::A4B4G4R4_UNORM: 154 case PixelFormat::A4B4G4R4_UNORM:
155 return "A4B4G4R4_UNORM"; 155 return "A4B4G4R4_UNORM";
156 case PixelFormat::R4G4_UNORM: 156 case PixelFormat::G4R4_UNORM:
157 return "R4G4_UNORM"; 157 return "G4R4_UNORM";
158 case PixelFormat::ASTC_2D_4X4_SRGB: 158 case PixelFormat::ASTC_2D_4X4_SRGB:
159 return "ASTC_2D_4X4_SRGB"; 159 return "ASTC_2D_4X4_SRGB";
160 case PixelFormat::ASTC_2D_8X8_SRGB: 160 case PixelFormat::ASTC_2D_8X8_SRGB:
diff --git a/src/video_core/texture_cache/image_base.cpp b/src/video_core/texture_cache/image_base.cpp
index f61e09ac7..91512022f 100644
--- a/src/video_core/texture_cache/image_base.cpp
+++ b/src/video_core/texture_cache/image_base.cpp
@@ -7,6 +7,7 @@
7#include <vector> 7#include <vector>
8 8
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/div_ceil.h"
10#include "video_core/surface.h" 11#include "video_core/surface.h"
11#include "video_core/texture_cache/formatter.h" 12#include "video_core/texture_cache/formatter.h"
12#include "video_core/texture_cache/image_base.h" 13#include "video_core/texture_cache/image_base.h"
@@ -182,10 +183,6 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
182 }; 183 };
183 const bool is_lhs_compressed = lhs_block.width > 1 || lhs_block.height > 1; 184 const bool is_lhs_compressed = lhs_block.width > 1 || lhs_block.height > 1;
184 const bool is_rhs_compressed = rhs_block.width > 1 || rhs_block.height > 1; 185 const bool is_rhs_compressed = rhs_block.width > 1 || rhs_block.height > 1;
185 if (is_lhs_compressed && is_rhs_compressed) {
186 LOG_ERROR(HW_GPU, "Compressed to compressed image aliasing is not implemented");
187 return;
188 }
189 const s32 lhs_mips = lhs.info.resources.levels; 186 const s32 lhs_mips = lhs.info.resources.levels;
190 const s32 rhs_mips = rhs.info.resources.levels; 187 const s32 rhs_mips = rhs.info.resources.levels;
191 const s32 num_mips = std::min(lhs_mips - base->level, rhs_mips); 188 const s32 num_mips = std::min(lhs_mips - base->level, rhs_mips);
@@ -199,12 +196,12 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
199 Extent3D lhs_size = MipSize(lhs.info.size, base->level + mip_level); 196 Extent3D lhs_size = MipSize(lhs.info.size, base->level + mip_level);
200 Extent3D rhs_size = MipSize(rhs.info.size, mip_level); 197 Extent3D rhs_size = MipSize(rhs.info.size, mip_level);
201 if (is_lhs_compressed) { 198 if (is_lhs_compressed) {
202 lhs_size.width /= lhs_block.width; 199 lhs_size.width = Common::DivCeil(lhs_size.width, lhs_block.width);
203 lhs_size.height /= lhs_block.height; 200 lhs_size.height = Common::DivCeil(lhs_size.height, lhs_block.height);
204 } 201 }
205 if (is_rhs_compressed) { 202 if (is_rhs_compressed) {
206 rhs_size.width /= rhs_block.width; 203 rhs_size.width = Common::DivCeil(rhs_size.width, rhs_block.width);
207 rhs_size.height /= rhs_block.height; 204 rhs_size.height = Common::DivCeil(rhs_size.height, rhs_block.height);
208 } 205 }
209 const Extent3D copy_size{ 206 const Extent3D copy_size{
210 .width = std::min(lhs_size.width, rhs_size.width), 207 .width = std::min(lhs_size.width, rhs_size.width),
diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h
index 1f85ec9da..620565684 100644
--- a/src/video_core/texture_cache/image_base.h
+++ b/src/video_core/texture_cache/image_base.h
@@ -88,6 +88,9 @@ struct ImageBase {
88 u32 scale_rating = 0; 88 u32 scale_rating = 0;
89 u64 scale_tick = 0; 89 u64 scale_tick = 0;
90 bool has_scaled = false; 90 bool has_scaled = false;
91
92 size_t channel = 0;
93
91 ImageFlagBits flags = ImageFlagBits::CpuModified; 94 ImageFlagBits flags = ImageFlagBits::CpuModified;
92 95
93 GPUVAddr gpu_addr = 0; 96 GPUVAddr gpu_addr = 0;
diff --git a/src/video_core/texture_cache/render_targets.h b/src/video_core/texture_cache/render_targets.h
index da8ffe9ec..1efbd6507 100644
--- a/src/video_core/texture_cache/render_targets.h
+++ b/src/video_core/texture_cache/render_targets.h
@@ -26,6 +26,7 @@ struct RenderTargets {
26 ImageViewId depth_buffer_id{}; 26 ImageViewId depth_buffer_id{};
27 std::array<u8, NUM_RT> draw_buffers{}; 27 std::array<u8, NUM_RT> draw_buffers{};
28 Extent2D size{}; 28 Extent2D size{};
29 bool is_rescaled{};
29}; 30};
30 31
31} // namespace VideoCommon 32} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache.cpp b/src/video_core/texture_cache/texture_cache.cpp
new file mode 100644
index 000000000..8a9a32f44
--- /dev/null
+++ b/src/video_core/texture_cache/texture_cache.cpp
@@ -0,0 +1,15 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "video_core/control/channel_state_cache.inc"
5#include "video_core/texture_cache/texture_cache_base.h"
6
7namespace VideoCommon {
8
9TextureCacheChannelInfo::TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept
10 : ChannelInfo(state), graphics_image_table{gpu_memory}, graphics_sampler_table{gpu_memory},
11 compute_image_table{gpu_memory}, compute_sampler_table{gpu_memory} {}
12
13template class VideoCommon::ChannelSetupCaches<VideoCommon::TextureCacheChannelInfo>;
14
15} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 1dbe01bc0..eaf4a1c95 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -1,5 +1,5 @@
1// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-3.0-or-later
3 3
4#pragma once 4#pragma once
5 5
@@ -7,6 +7,7 @@
7 7
8#include "common/alignment.h" 8#include "common/alignment.h"
9#include "common/settings.h" 9#include "common/settings.h"
10#include "video_core/control/channel_state.h"
10#include "video_core/dirty_flags.h" 11#include "video_core/dirty_flags.h"
11#include "video_core/engines/kepler_compute.h" 12#include "video_core/engines/kepler_compute.h"
12#include "video_core/texture_cache/image_view_base.h" 13#include "video_core/texture_cache/image_view_base.h"
@@ -29,12 +30,8 @@ using VideoCore::Surface::SurfaceType;
29using namespace Common::Literals; 30using namespace Common::Literals;
30 31
31template <class P> 32template <class P>
32TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_, 33TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_)
33 Tegra::Engines::Maxwell3D& maxwell3d_, 34 : runtime{runtime_}, rasterizer{rasterizer_} {
34 Tegra::Engines::KeplerCompute& kepler_compute_,
35 Tegra::MemoryManager& gpu_memory_)
36 : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
37 kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_} {
38 // Configure null sampler 35 // Configure null sampler
39 TSCEntry sampler_descriptor{}; 36 TSCEntry sampler_descriptor{};
40 sampler_descriptor.min_filter.Assign(Tegra::Texture::TextureFilter::Linear); 37 sampler_descriptor.min_filter.Assign(Tegra::Texture::TextureFilter::Linear);
@@ -93,7 +90,7 @@ void TextureCache<P>::RunGarbageCollector() {
93 const auto copies = FullDownloadCopies(image.info); 90 const auto copies = FullDownloadCopies(image.info);
94 image.DownloadMemory(map, copies); 91 image.DownloadMemory(map, copies);
95 runtime.Finish(); 92 runtime.Finish();
96 SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); 93 SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
97 } 94 }
98 if (True(image.flags & ImageFlagBits::Tracked)) { 95 if (True(image.flags & ImageFlagBits::Tracked)) {
99 UntrackImage(image, image_id); 96 UntrackImage(image, image_id);
@@ -152,22 +149,24 @@ void TextureCache<P>::MarkModification(ImageId id) noexcept {
152template <class P> 149template <class P>
153template <bool has_blacklists> 150template <bool has_blacklists>
154void TextureCache<P>::FillGraphicsImageViews(std::span<ImageViewInOut> views) { 151void TextureCache<P>::FillGraphicsImageViews(std::span<ImageViewInOut> views) {
155 FillImageViews<has_blacklists>(graphics_image_table, graphics_image_view_ids, views); 152 FillImageViews<has_blacklists>(channel_state->graphics_image_table,
153 channel_state->graphics_image_view_ids, views);
156} 154}
157 155
158template <class P> 156template <class P>
159void TextureCache<P>::FillComputeImageViews(std::span<ImageViewInOut> views) { 157void TextureCache<P>::FillComputeImageViews(std::span<ImageViewInOut> views) {
160 FillImageViews<true>(compute_image_table, compute_image_view_ids, views); 158 FillImageViews<true>(channel_state->compute_image_table, channel_state->compute_image_view_ids,
159 views);
161} 160}
162 161
163template <class P> 162template <class P>
164typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) { 163typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) {
165 if (index > graphics_sampler_table.Limit()) { 164 if (index > channel_state->graphics_sampler_table.Limit()) {
166 LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); 165 LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index);
167 return &slot_samplers[NULL_SAMPLER_ID]; 166 return &slot_samplers[NULL_SAMPLER_ID];
168 } 167 }
169 const auto [descriptor, is_new] = graphics_sampler_table.Read(index); 168 const auto [descriptor, is_new] = channel_state->graphics_sampler_table.Read(index);
170 SamplerId& id = graphics_sampler_ids[index]; 169 SamplerId& id = channel_state->graphics_sampler_ids[index];
171 if (is_new) { 170 if (is_new) {
172 id = FindSampler(descriptor); 171 id = FindSampler(descriptor);
173 } 172 }
@@ -176,12 +175,12 @@ typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) {
176 175
177template <class P> 176template <class P>
178typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) { 177typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) {
179 if (index > compute_sampler_table.Limit()) { 178 if (index > channel_state->compute_sampler_table.Limit()) {
180 LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); 179 LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index);
181 return &slot_samplers[NULL_SAMPLER_ID]; 180 return &slot_samplers[NULL_SAMPLER_ID];
182 } 181 }
183 const auto [descriptor, is_new] = compute_sampler_table.Read(index); 182 const auto [descriptor, is_new] = channel_state->compute_sampler_table.Read(index);
184 SamplerId& id = compute_sampler_ids[index]; 183 SamplerId& id = channel_state->compute_sampler_ids[index];
185 if (is_new) { 184 if (is_new) {
186 id = FindSampler(descriptor); 185 id = FindSampler(descriptor);
187 } 186 }
@@ -191,34 +190,36 @@ typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) {
191template <class P> 190template <class P>
192void TextureCache<P>::SynchronizeGraphicsDescriptors() { 191void TextureCache<P>::SynchronizeGraphicsDescriptors() {
193 using SamplerIndex = Tegra::Engines::Maxwell3D::Regs::SamplerIndex; 192 using SamplerIndex = Tegra::Engines::Maxwell3D::Regs::SamplerIndex;
194 const bool linked_tsc = maxwell3d.regs.sampler_index == SamplerIndex::ViaHeaderIndex; 193 const bool linked_tsc = maxwell3d->regs.sampler_index == SamplerIndex::ViaHeaderIndex;
195 const u32 tic_limit = maxwell3d.regs.tic.limit; 194 const u32 tic_limit = maxwell3d->regs.tic.limit;
196 const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d.regs.tsc.limit; 195 const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tsc.limit;
197 if (graphics_sampler_table.Synchornize(maxwell3d.regs.tsc.Address(), tsc_limit)) { 196 if (channel_state->graphics_sampler_table.Synchornize(maxwell3d->regs.tsc.Address(),
198 graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); 197 tsc_limit)) {
198 channel_state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
199 } 199 }
200 if (graphics_image_table.Synchornize(maxwell3d.regs.tic.Address(), tic_limit)) { 200 if (channel_state->graphics_image_table.Synchornize(maxwell3d->regs.tic.Address(), tic_limit)) {
201 graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); 201 channel_state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
202 } 202 }
203} 203}
204 204
205template <class P> 205template <class P>
206void TextureCache<P>::SynchronizeComputeDescriptors() { 206void TextureCache<P>::SynchronizeComputeDescriptors() {
207 const bool linked_tsc = kepler_compute.launch_description.linked_tsc; 207 const bool linked_tsc = kepler_compute->launch_description.linked_tsc;
208 const u32 tic_limit = kepler_compute.regs.tic.limit; 208 const u32 tic_limit = kepler_compute->regs.tic.limit;
209 const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute.regs.tsc.limit; 209 const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit;
210 const GPUVAddr tsc_gpu_addr = kepler_compute.regs.tsc.Address(); 210 const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address();
211 if (compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) { 211 if (channel_state->compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) {
212 compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); 212 channel_state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
213 } 213 }
214 if (compute_image_table.Synchornize(kepler_compute.regs.tic.Address(), tic_limit)) { 214 if (channel_state->compute_image_table.Synchornize(kepler_compute->regs.tic.Address(),
215 compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); 215 tic_limit)) {
216 channel_state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
216 } 217 }
217} 218}
218 219
219template <class P> 220template <class P>
220bool TextureCache<P>::RescaleRenderTargets(bool is_clear) { 221bool TextureCache<P>::RescaleRenderTargets(bool is_clear) {
221 auto& flags = maxwell3d.dirty.flags; 222 auto& flags = maxwell3d->dirty.flags;
222 u32 scale_rating = 0; 223 u32 scale_rating = 0;
223 bool rescaled = false; 224 bool rescaled = false;
224 std::array<ImageId, NUM_RT> tmp_color_images{}; 225 std::array<ImageId, NUM_RT> tmp_color_images{};
@@ -315,7 +316,7 @@ bool TextureCache<P>::RescaleRenderTargets(bool is_clear) {
315template <class P> 316template <class P>
316void TextureCache<P>::UpdateRenderTargets(bool is_clear) { 317void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
317 using namespace VideoCommon::Dirty; 318 using namespace VideoCommon::Dirty;
318 auto& flags = maxwell3d.dirty.flags; 319 auto& flags = maxwell3d->dirty.flags;
319 if (!flags[Dirty::RenderTargets]) { 320 if (!flags[Dirty::RenderTargets]) {
320 for (size_t index = 0; index < NUM_RT; ++index) { 321 for (size_t index = 0; index < NUM_RT; ++index) {
321 ImageViewId& color_buffer_id = render_targets.color_buffer_ids[index]; 322 ImageViewId& color_buffer_id = render_targets.color_buffer_ids[index];
@@ -342,7 +343,7 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
342 PrepareImageView(depth_buffer_id, true, is_clear && IsFullClear(depth_buffer_id)); 343 PrepareImageView(depth_buffer_id, true, is_clear && IsFullClear(depth_buffer_id));
343 344
344 for (size_t index = 0; index < NUM_RT; ++index) { 345 for (size_t index = 0; index < NUM_RT; ++index) {
345 render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d.regs.rt_control.Map(index)); 346 render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d->regs.rt_control.Map(index));
346 } 347 }
347 u32 up_scale = 1; 348 u32 up_scale = 1;
348 u32 down_shift = 0; 349 u32 down_shift = 0;
@@ -351,9 +352,10 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
351 down_shift = Settings::values.resolution_info.down_shift; 352 down_shift = Settings::values.resolution_info.down_shift;
352 } 353 }
353 render_targets.size = Extent2D{ 354 render_targets.size = Extent2D{
354 (maxwell3d.regs.render_area.width * up_scale) >> down_shift, 355 (maxwell3d->regs.render_area.width * up_scale) >> down_shift,
355 (maxwell3d.regs.render_area.height * up_scale) >> down_shift, 356 (maxwell3d->regs.render_area.height * up_scale) >> down_shift,
356 }; 357 };
358 render_targets.is_rescaled = is_rescaling;
357 359
358 flags[Dirty::DepthBiasGlobal] = true; 360 flags[Dirty::DepthBiasGlobal] = true;
359} 361}
@@ -458,7 +460,7 @@ void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
458 const auto copies = FullDownloadCopies(image.info); 460 const auto copies = FullDownloadCopies(image.info);
459 image.DownloadMemory(map, copies); 461 image.DownloadMemory(map, copies);
460 runtime.Finish(); 462 runtime.Finish();
461 SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); 463 SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
462 } 464 }
463} 465}
464 466
@@ -477,12 +479,20 @@ void TextureCache<P>::UnmapMemory(VAddr cpu_addr, size_t size) {
477} 479}
478 480
479template <class P> 481template <class P>
480void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) { 482void TextureCache<P>::UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size) {
481 std::vector<ImageId> deleted_images; 483 std::vector<ImageId> deleted_images;
482 ForEachImageInRegionGPU(gpu_addr, size, 484 ForEachImageInRegionGPU(as_id, gpu_addr, size,
483 [&](ImageId id, Image&) { deleted_images.push_back(id); }); 485 [&](ImageId id, Image&) { deleted_images.push_back(id); });
484 for (const ImageId id : deleted_images) { 486 for (const ImageId id : deleted_images) {
485 Image& image = slot_images[id]; 487 Image& image = slot_images[id];
488 if (True(image.flags & ImageFlagBits::CpuModified)) {
489 return;
490 }
491 image.flags |= ImageFlagBits::CpuModified;
492 if (True(image.flags & ImageFlagBits::Tracked)) {
493 UntrackImage(image, id);
494 }
495 /*
486 if (True(image.flags & ImageFlagBits::Remapped)) { 496 if (True(image.flags & ImageFlagBits::Remapped)) {
487 continue; 497 continue;
488 } 498 }
@@ -490,6 +500,7 @@ void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) {
490 if (True(image.flags & ImageFlagBits::Tracked)) { 500 if (True(image.flags & ImageFlagBits::Tracked)) {
491 UntrackImage(image, id); 501 UntrackImage(image, id);
492 } 502 }
503 */
493 } 504 }
494} 505}
495 506
@@ -655,7 +666,7 @@ void TextureCache<P>::PopAsyncFlushes() {
655 for (const ImageId image_id : download_ids) { 666 for (const ImageId image_id : download_ids) {
656 const ImageBase& image = slot_images[image_id]; 667 const ImageBase& image = slot_images[image_id];
657 const auto copies = FullDownloadCopies(image.info); 668 const auto copies = FullDownloadCopies(image.info);
658 SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, download_span); 669 SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, download_span);
659 download_map.offset += image.unswizzled_size_bytes; 670 download_map.offset += image.unswizzled_size_bytes;
660 download_span = download_span.subspan(image.unswizzled_size_bytes); 671 download_span = download_span.subspan(image.unswizzled_size_bytes);
661 } 672 }
@@ -714,26 +725,26 @@ void TextureCache<P>::UploadImageContents(Image& image, StagingBuffer& staging)
714 const GPUVAddr gpu_addr = image.gpu_addr; 725 const GPUVAddr gpu_addr = image.gpu_addr;
715 726
716 if (True(image.flags & ImageFlagBits::AcceleratedUpload)) { 727 if (True(image.flags & ImageFlagBits::AcceleratedUpload)) {
717 gpu_memory.ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes()); 728 gpu_memory->ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes());
718 const auto uploads = FullUploadSwizzles(image.info); 729 const auto uploads = FullUploadSwizzles(image.info);
719 runtime.AccelerateImageUpload(image, staging, uploads); 730 runtime.AccelerateImageUpload(image, staging, uploads);
720 } else if (True(image.flags & ImageFlagBits::Converted)) { 731 } else if (True(image.flags & ImageFlagBits::Converted)) {
721 std::vector<u8> unswizzled_data(image.unswizzled_size_bytes); 732 std::vector<u8> unswizzled_data(image.unswizzled_size_bytes);
722 auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, unswizzled_data); 733 auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, unswizzled_data);
723 ConvertImage(unswizzled_data, image.info, mapped_span, copies); 734 ConvertImage(unswizzled_data, image.info, mapped_span, copies);
724 image.UploadMemory(staging, copies); 735 image.UploadMemory(staging, copies);
725 } else { 736 } else {
726 const auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, mapped_span); 737 const auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, mapped_span);
727 image.UploadMemory(staging, copies); 738 image.UploadMemory(staging, copies);
728 } 739 }
729} 740}
730 741
731template <class P> 742template <class P>
732ImageViewId TextureCache<P>::FindImageView(const TICEntry& config) { 743ImageViewId TextureCache<P>::FindImageView(const TICEntry& config) {
733 if (!IsValidEntry(gpu_memory, config)) { 744 if (!IsValidEntry(*gpu_memory, config)) {
734 return NULL_IMAGE_VIEW_ID; 745 return NULL_IMAGE_VIEW_ID;
735 } 746 }
736 const auto [pair, is_new] = image_views.try_emplace(config); 747 const auto [pair, is_new] = channel_state->image_views.try_emplace(config);
737 ImageViewId& image_view_id = pair->second; 748 ImageViewId& image_view_id = pair->second;
738 if (is_new) { 749 if (is_new) {
739 image_view_id = CreateImageView(config); 750 image_view_id = CreateImageView(config);
@@ -777,9 +788,9 @@ ImageId TextureCache<P>::FindOrInsertImage(const ImageInfo& info, GPUVAddr gpu_a
777template <class P> 788template <class P>
778ImageId TextureCache<P>::FindImage(const ImageInfo& info, GPUVAddr gpu_addr, 789ImageId TextureCache<P>::FindImage(const ImageInfo& info, GPUVAddr gpu_addr,
779 RelaxedOptions options) { 790 RelaxedOptions options) {
780 std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 791 std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
781 if (!cpu_addr) { 792 if (!cpu_addr) {
782 cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info)); 793 cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info));
783 if (!cpu_addr) { 794 if (!cpu_addr) {
784 return ImageId{}; 795 return ImageId{};
785 } 796 }
@@ -860,7 +871,7 @@ void TextureCache<P>::InvalidateScale(Image& image) {
860 image.scale_tick = frame_tick + 1; 871 image.scale_tick = frame_tick + 1;
861 } 872 }
862 const std::span<const ImageViewId> image_view_ids = image.image_view_ids; 873 const std::span<const ImageViewId> image_view_ids = image.image_view_ids;
863 auto& dirty = maxwell3d.dirty.flags; 874 auto& dirty = maxwell3d->dirty.flags;
864 dirty[Dirty::RenderTargets] = true; 875 dirty[Dirty::RenderTargets] = true;
865 dirty[Dirty::ZetaBuffer] = true; 876 dirty[Dirty::ZetaBuffer] = true;
866 for (size_t rt = 0; rt < NUM_RT; ++rt) { 877 for (size_t rt = 0; rt < NUM_RT; ++rt) {
@@ -880,12 +891,15 @@ void TextureCache<P>::InvalidateScale(Image& image) {
880 } 891 }
881 image.image_view_ids.clear(); 892 image.image_view_ids.clear();
882 image.image_view_infos.clear(); 893 image.image_view_infos.clear();
883 if constexpr (ENABLE_VALIDATION) { 894 for (size_t c : active_channel_ids) {
884 std::ranges::fill(graphics_image_view_ids, CORRUPT_ID); 895 auto& channel_info = channel_storage[c];
885 std::ranges::fill(compute_image_view_ids, CORRUPT_ID); 896 if constexpr (ENABLE_VALIDATION) {
897 std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID);
898 std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID);
899 }
900 channel_info.graphics_image_table.Invalidate();
901 channel_info.compute_image_table.Invalidate();
886 } 902 }
887 graphics_image_table.Invalidate();
888 compute_image_table.Invalidate();
889 has_deleted_images = true; 903 has_deleted_images = true;
890} 904}
891 905
@@ -929,10 +943,10 @@ bool TextureCache<P>::ScaleDown(Image& image) {
929template <class P> 943template <class P>
930ImageId TextureCache<P>::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr, 944ImageId TextureCache<P>::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr,
931 RelaxedOptions options) { 945 RelaxedOptions options) {
932 std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 946 std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
933 if (!cpu_addr) { 947 if (!cpu_addr) {
934 const auto size = CalculateGuestSizeInBytes(info); 948 const auto size = CalculateGuestSizeInBytes(info);
935 cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, size); 949 cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, size);
936 if (!cpu_addr) { 950 if (!cpu_addr) {
937 const VAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space; 951 const VAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space;
938 virtual_invalid_space += Common::AlignUp(size, 32); 952 virtual_invalid_space += Common::AlignUp(size, 32);
@@ -1050,7 +1064,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
1050 const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr); 1064 const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr);
1051 Image& new_image = slot_images[new_image_id]; 1065 Image& new_image = slot_images[new_image_id];
1052 1066
1053 if (!gpu_memory.IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) { 1067 if (!gpu_memory->IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) {
1054 new_image.flags |= ImageFlagBits::Sparse; 1068 new_image.flags |= ImageFlagBits::Sparse;
1055 } 1069 }
1056 1070
@@ -1192,7 +1206,7 @@ SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) {
1192 if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) { 1206 if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) {
1193 return NULL_SAMPLER_ID; 1207 return NULL_SAMPLER_ID;
1194 } 1208 }
1195 const auto [pair, is_new] = samplers.try_emplace(config); 1209 const auto [pair, is_new] = channel_state->samplers.try_emplace(config);
1196 if (is_new) { 1210 if (is_new) {
1197 pair->second = slot_samplers.insert(runtime, config); 1211 pair->second = slot_samplers.insert(runtime, config);
1198 } 1212 }
@@ -1201,7 +1215,7 @@ SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) {
1201 1215
1202template <class P> 1216template <class P>
1203ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) { 1217ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) {
1204 const auto& regs = maxwell3d.regs; 1218 const auto& regs = maxwell3d->regs;
1205 if (index >= regs.rt_control.count) { 1219 if (index >= regs.rt_control.count) {
1206 return ImageViewId{}; 1220 return ImageViewId{};
1207 } 1221 }
@@ -1219,7 +1233,7 @@ ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) {
1219 1233
1220template <class P> 1234template <class P>
1221ImageViewId TextureCache<P>::FindDepthBuffer(bool is_clear) { 1235ImageViewId TextureCache<P>::FindDepthBuffer(bool is_clear) {
1222 const auto& regs = maxwell3d.regs; 1236 const auto& regs = maxwell3d->regs;
1223 if (!regs.zeta_enable) { 1237 if (!regs.zeta_enable) {
1224 return ImageViewId{}; 1238 return ImageViewId{};
1225 } 1239 }
@@ -1316,11 +1330,17 @@ void TextureCache<P>::ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& f
1316 1330
1317template <class P> 1331template <class P>
1318template <typename Func> 1332template <typename Func>
1319void TextureCache<P>::ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func) { 1333void TextureCache<P>::ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size,
1334 Func&& func) {
1320 using FuncReturn = typename std::invoke_result<Func, ImageId, Image&>::type; 1335 using FuncReturn = typename std::invoke_result<Func, ImageId, Image&>::type;
1321 static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>; 1336 static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>;
1322 boost::container::small_vector<ImageId, 8> images; 1337 boost::container::small_vector<ImageId, 8> images;
1323 ForEachGPUPage(gpu_addr, size, [this, &images, gpu_addr, size, func](u64 page) { 1338 auto storage_id = getStorageID(as_id);
1339 if (!storage_id) {
1340 return;
1341 }
1342 auto& gpu_page_table = gpu_page_table_storage[*storage_id];
1343 ForEachGPUPage(gpu_addr, size, [this, gpu_page_table, &images, gpu_addr, size, func](u64 page) {
1324 const auto it = gpu_page_table.find(page); 1344 const auto it = gpu_page_table.find(page);
1325 if (it == gpu_page_table.end()) { 1345 if (it == gpu_page_table.end()) {
1326 if constexpr (BOOL_BREAK) { 1346 if constexpr (BOOL_BREAK) {
@@ -1403,9 +1423,9 @@ template <typename Func>
1403void TextureCache<P>::ForEachSparseSegment(ImageBase& image, Func&& func) { 1423void TextureCache<P>::ForEachSparseSegment(ImageBase& image, Func&& func) {
1404 using FuncReturn = typename std::invoke_result<Func, GPUVAddr, VAddr, size_t>::type; 1424 using FuncReturn = typename std::invoke_result<Func, GPUVAddr, VAddr, size_t>::type;
1405 static constexpr bool RETURNS_BOOL = std::is_same_v<FuncReturn, bool>; 1425 static constexpr bool RETURNS_BOOL = std::is_same_v<FuncReturn, bool>;
1406 const auto segments = gpu_memory.GetSubmappedRange(image.gpu_addr, image.guest_size_bytes); 1426 const auto segments = gpu_memory->GetSubmappedRange(image.gpu_addr, image.guest_size_bytes);
1407 for (const auto& [gpu_addr, size] : segments) { 1427 for (const auto& [gpu_addr, size] : segments) {
1408 std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 1428 std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
1409 ASSERT(cpu_addr); 1429 ASSERT(cpu_addr);
1410 if constexpr (RETURNS_BOOL) { 1430 if constexpr (RETURNS_BOOL) {
1411 if (func(gpu_addr, *cpu_addr, size)) { 1431 if (func(gpu_addr, *cpu_addr, size)) {
@@ -1448,8 +1468,9 @@ void TextureCache<P>::RegisterImage(ImageId image_id) {
1448 } 1468 }
1449 image.lru_index = lru_cache.Insert(image_id, frame_tick); 1469 image.lru_index = lru_cache.Insert(image_id, frame_tick);
1450 1470
1451 ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, 1471 ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, image_id](u64 page) {
1452 [this, image_id](u64 page) { gpu_page_table[page].push_back(image_id); }); 1472 (*channel_state->gpu_page_table)[page].push_back(image_id);
1473 });
1453 if (False(image.flags & ImageFlagBits::Sparse)) { 1474 if (False(image.flags & ImageFlagBits::Sparse)) {
1454 auto map_id = 1475 auto map_id =
1455 slot_map_views.insert(image.gpu_addr, image.cpu_addr, image.guest_size_bytes, image_id); 1476 slot_map_views.insert(image.gpu_addr, image.cpu_addr, image.guest_size_bytes, image_id);
@@ -1480,9 +1501,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
1480 image.flags &= ~ImageFlagBits::BadOverlap; 1501 image.flags &= ~ImageFlagBits::BadOverlap;
1481 lru_cache.Free(image.lru_index); 1502 lru_cache.Free(image.lru_index);
1482 const auto& clear_page_table = 1503 const auto& clear_page_table =
1483 [this, image_id]( 1504 [this, image_id](u64 page,
1484 u64 page, 1505 std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>&
1485 std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) { 1506 selected_page_table) {
1486 const auto page_it = selected_page_table.find(page); 1507 const auto page_it = selected_page_table.find(page);
1487 if (page_it == selected_page_table.end()) { 1508 if (page_it == selected_page_table.end()) {
1488 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); 1509 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
@@ -1497,8 +1518,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
1497 } 1518 }
1498 image_ids.erase(vector_it); 1519 image_ids.erase(vector_it);
1499 }; 1520 };
1500 ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, 1521 ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, &clear_page_table](u64 page) {
1501 [this, &clear_page_table](u64 page) { clear_page_table(page, gpu_page_table); }); 1522 clear_page_table(page, (*channel_state->gpu_page_table));
1523 });
1502 if (False(image.flags & ImageFlagBits::Sparse)) { 1524 if (False(image.flags & ImageFlagBits::Sparse)) {
1503 const auto map_id = image.map_view_id; 1525 const auto map_id = image.map_view_id;
1504 ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) { 1526 ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) {
@@ -1631,7 +1653,7 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
1631 ASSERT_MSG(False(image.flags & ImageFlagBits::Registered), "Image was not unregistered"); 1653 ASSERT_MSG(False(image.flags & ImageFlagBits::Registered), "Image was not unregistered");
1632 1654
1633 // Mark render targets as dirty 1655 // Mark render targets as dirty
1634 auto& dirty = maxwell3d.dirty.flags; 1656 auto& dirty = maxwell3d->dirty.flags;
1635 dirty[Dirty::RenderTargets] = true; 1657 dirty[Dirty::RenderTargets] = true;
1636 dirty[Dirty::ZetaBuffer] = true; 1658 dirty[Dirty::ZetaBuffer] = true;
1637 for (size_t rt = 0; rt < NUM_RT; ++rt) { 1659 for (size_t rt = 0; rt < NUM_RT; ++rt) {
@@ -1681,24 +1703,30 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
1681 if (alloc_images.empty()) { 1703 if (alloc_images.empty()) {
1682 image_allocs_table.erase(alloc_it); 1704 image_allocs_table.erase(alloc_it);
1683 } 1705 }
1684 if constexpr (ENABLE_VALIDATION) { 1706 for (size_t c : active_channel_ids) {
1685 std::ranges::fill(graphics_image_view_ids, CORRUPT_ID); 1707 auto& channel_info = channel_storage[c];
1686 std::ranges::fill(compute_image_view_ids, CORRUPT_ID); 1708 if constexpr (ENABLE_VALIDATION) {
1709 std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID);
1710 std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID);
1711 }
1712 channel_info.graphics_image_table.Invalidate();
1713 channel_info.compute_image_table.Invalidate();
1687 } 1714 }
1688 graphics_image_table.Invalidate();
1689 compute_image_table.Invalidate();
1690 has_deleted_images = true; 1715 has_deleted_images = true;
1691} 1716}
1692 1717
1693template <class P> 1718template <class P>
1694void TextureCache<P>::RemoveImageViewReferences(std::span<const ImageViewId> removed_views) { 1719void TextureCache<P>::RemoveImageViewReferences(std::span<const ImageViewId> removed_views) {
1695 auto it = image_views.begin(); 1720 for (size_t c : active_channel_ids) {
1696 while (it != image_views.end()) { 1721 auto& channel_info = channel_storage[c];
1697 const auto found = std::ranges::find(removed_views, it->second); 1722 auto it = channel_info.image_views.begin();
1698 if (found != removed_views.end()) { 1723 while (it != channel_info.image_views.end()) {
1699 it = image_views.erase(it); 1724 const auto found = std::ranges::find(removed_views, it->second);
1700 } else { 1725 if (found != removed_views.end()) {
1701 ++it; 1726 it = channel_info.image_views.erase(it);
1727 } else {
1728 ++it;
1729 }
1702 } 1730 }
1703 } 1731 }
1704} 1732}
@@ -1729,6 +1757,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
1729 boost::container::small_vector<const AliasedImage*, 1> aliased_images; 1757 boost::container::small_vector<const AliasedImage*, 1> aliased_images;
1730 Image& image = slot_images[image_id]; 1758 Image& image = slot_images[image_id];
1731 bool any_rescaled = True(image.flags & ImageFlagBits::Rescaled); 1759 bool any_rescaled = True(image.flags & ImageFlagBits::Rescaled);
1760 bool any_modified = True(image.flags & ImageFlagBits::GpuModified);
1732 u64 most_recent_tick = image.modification_tick; 1761 u64 most_recent_tick = image.modification_tick;
1733 for (const AliasedImage& aliased : image.aliased_images) { 1762 for (const AliasedImage& aliased : image.aliased_images) {
1734 ImageBase& aliased_image = slot_images[aliased.id]; 1763 ImageBase& aliased_image = slot_images[aliased.id];
@@ -1736,9 +1765,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
1736 most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick); 1765 most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick);
1737 aliased_images.push_back(&aliased); 1766 aliased_images.push_back(&aliased);
1738 any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled); 1767 any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled);
1739 if (True(aliased_image.flags & ImageFlagBits::GpuModified)) { 1768 any_modified |= True(aliased_image.flags & ImageFlagBits::GpuModified);
1740 image.flags |= ImageFlagBits::GpuModified;
1741 }
1742 } 1769 }
1743 } 1770 }
1744 if (aliased_images.empty()) { 1771 if (aliased_images.empty()) {
@@ -1753,6 +1780,9 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
1753 } 1780 }
1754 } 1781 }
1755 image.modification_tick = most_recent_tick; 1782 image.modification_tick = most_recent_tick;
1783 if (any_modified) {
1784 image.flags |= ImageFlagBits::GpuModified;
1785 }
1756 std::ranges::sort(aliased_images, [this](const AliasedImage* lhs, const AliasedImage* rhs) { 1786 std::ranges::sort(aliased_images, [this](const AliasedImage* lhs, const AliasedImage* rhs) {
1757 const ImageBase& lhs_image = slot_images[lhs->id]; 1787 const ImageBase& lhs_image = slot_images[lhs->id];
1758 const ImageBase& rhs_image = slot_images[rhs->id]; 1788 const ImageBase& rhs_image = slot_images[rhs->id];
@@ -1931,6 +1961,7 @@ std::pair<FramebufferId, ImageViewId> TextureCache<P>::RenderTargetFromImage(
1931 .color_buffer_ids = {color_view_id}, 1961 .color_buffer_ids = {color_view_id},
1932 .depth_buffer_id = depth_view_id, 1962 .depth_buffer_id = depth_view_id,
1933 .size = {extent.width >> samples_x, extent.height >> samples_y}, 1963 .size = {extent.width >> samples_x, extent.height >> samples_y},
1964 .is_rescaled = is_rescaled,
1934 }); 1965 });
1935 return {framebuffer_id, view_id}; 1966 return {framebuffer_id, view_id};
1936} 1967}
@@ -1943,7 +1974,7 @@ bool TextureCache<P>::IsFullClear(ImageViewId id) {
1943 const ImageViewBase& image_view = slot_image_views[id]; 1974 const ImageViewBase& image_view = slot_image_views[id];
1944 const ImageBase& image = slot_images[image_view.image_id]; 1975 const ImageBase& image = slot_images[image_view.image_id];
1945 const Extent3D size = image_view.size; 1976 const Extent3D size = image_view.size;
1946 const auto& regs = maxwell3d.regs; 1977 const auto& regs = maxwell3d->regs;
1947 const auto& scissor = regs.scissor_test[0]; 1978 const auto& scissor = regs.scissor_test[0];
1948 if (image.info.resources.levels > 1 || image.info.resources.layers > 1) { 1979 if (image.info.resources.levels > 1 || image.info.resources.layers > 1) {
1949 // Images with multiple resources can't be cleared in a single call 1980 // Images with multiple resources can't be cleared in a single call
@@ -1958,4 +1989,19 @@ bool TextureCache<P>::IsFullClear(ImageViewId id) {
1958 scissor.max_y >= size.height; 1989 scissor.max_y >= size.height;
1959} 1990}
1960 1991
1992template <class P>
1993void TextureCache<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) {
1994 VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo>::CreateChannel(channel);
1995 const auto it = channel_map.find(channel.bind_id);
1996 auto* this_state = &channel_storage[it->second];
1997 const auto& this_as_ref = address_spaces[channel.memory_manager->GetID()];
1998 this_state->gpu_page_table = &gpu_page_table_storage[this_as_ref.storage_id];
1999}
2000
2001/// Bind a channel for execution.
2002template <class P>
2003void TextureCache<P>::OnGPUASRegister([[maybe_unused]] size_t map_id) {
2004 gpu_page_table_storage.emplace_back();
2005}
2006
1961} // namespace VideoCommon 2007} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 7e6c6cef2..2fa8445eb 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -1,8 +1,10 @@
1// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-3.0-or-later
3 3
4#pragma once 4#pragma once
5 5
6#include <deque>
7#include <limits>
6#include <mutex> 8#include <mutex>
7#include <span> 9#include <span>
8#include <type_traits> 10#include <type_traits>
@@ -11,9 +13,11 @@
11#include <queue> 13#include <queue>
12 14
13#include "common/common_types.h" 15#include "common/common_types.h"
16#include "common/hash.h"
14#include "common/literals.h" 17#include "common/literals.h"
15#include "common/lru_cache.h" 18#include "common/lru_cache.h"
16#include "video_core/compatible_formats.h" 19#include "video_core/compatible_formats.h"
20#include "video_core/control/channel_state_cache.h"
17#include "video_core/delayed_destruction_ring.h" 21#include "video_core/delayed_destruction_ring.h"
18#include "video_core/engines/fermi_2d.h" 22#include "video_core/engines/fermi_2d.h"
19#include "video_core/surface.h" 23#include "video_core/surface.h"
@@ -26,6 +30,10 @@
26#include "video_core/texture_cache/types.h" 30#include "video_core/texture_cache/types.h"
27#include "video_core/textures/texture.h" 31#include "video_core/textures/texture.h"
28 32
33namespace Tegra::Control {
34struct ChannelState;
35}
36
29namespace VideoCommon { 37namespace VideoCommon {
30 38
31using Tegra::Texture::SwizzleSource; 39using Tegra::Texture::SwizzleSource;
@@ -44,8 +52,35 @@ struct ImageViewInOut {
44 ImageViewId id{}; 52 ImageViewId id{};
45}; 53};
46 54
55using TextureCacheGPUMap = std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>;
56
57class TextureCacheChannelInfo : public ChannelInfo {
58public:
59 TextureCacheChannelInfo() = delete;
60 TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept;
61 TextureCacheChannelInfo(const TextureCacheChannelInfo& state) = delete;
62 TextureCacheChannelInfo& operator=(const TextureCacheChannelInfo&) = delete;
63 TextureCacheChannelInfo(TextureCacheChannelInfo&& other) noexcept = default;
64 TextureCacheChannelInfo& operator=(TextureCacheChannelInfo&& other) noexcept = default;
65
66 DescriptorTable<TICEntry> graphics_image_table{gpu_memory};
67 DescriptorTable<TSCEntry> graphics_sampler_table{gpu_memory};
68 std::vector<SamplerId> graphics_sampler_ids;
69 std::vector<ImageViewId> graphics_image_view_ids;
70
71 DescriptorTable<TICEntry> compute_image_table{gpu_memory};
72 DescriptorTable<TSCEntry> compute_sampler_table{gpu_memory};
73 std::vector<SamplerId> compute_sampler_ids;
74 std::vector<ImageViewId> compute_image_view_ids;
75
76 std::unordered_map<TICEntry, ImageViewId> image_views;
77 std::unordered_map<TSCEntry, SamplerId> samplers;
78
79 TextureCacheGPUMap* gpu_page_table;
80};
81
47template <class P> 82template <class P>
48class TextureCache { 83class TextureCache : public VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo> {
49 /// Address shift for caching images into a hash table 84 /// Address shift for caching images into a hash table
50 static constexpr u64 YUZU_PAGEBITS = 20; 85 static constexpr u64 YUZU_PAGEBITS = 20;
51 86
@@ -58,6 +93,8 @@ class TextureCache {
58 /// True when the API can provide info about the memory of the device. 93 /// True when the API can provide info about the memory of the device.
59 static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO; 94 static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO;
60 95
96 static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()};
97
61 static constexpr s64 TARGET_THRESHOLD = 4_GiB; 98 static constexpr s64 TARGET_THRESHOLD = 4_GiB;
62 static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB; 99 static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB;
63 static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB + 625_MiB; 100 static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB + 625_MiB;
@@ -77,16 +114,8 @@ class TextureCache {
77 PixelFormat src_format; 114 PixelFormat src_format;
78 }; 115 };
79 116
80 template <typename T>
81 struct IdentityHash {
82 [[nodiscard]] size_t operator()(T value) const noexcept {
83 return static_cast<size_t>(value);
84 }
85 };
86
87public: 117public:
88 explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&, Tegra::Engines::Maxwell3D&, 118 explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&);
89 Tegra::Engines::KeplerCompute&, Tegra::MemoryManager&);
90 119
91 /// Notify the cache that a new frame has been queued 120 /// Notify the cache that a new frame has been queued
92 void TickFrame(); 121 void TickFrame();
@@ -142,7 +171,7 @@ public:
142 void UnmapMemory(VAddr cpu_addr, size_t size); 171 void UnmapMemory(VAddr cpu_addr, size_t size);
143 172
144 /// Remove images in a region 173 /// Remove images in a region
145 void UnmapGPUMemory(GPUVAddr gpu_addr, size_t size); 174 void UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size);
146 175
147 /// Blit an image with the given parameters 176 /// Blit an image with the given parameters
148 void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst, 177 void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
@@ -171,6 +200,9 @@ public:
171 200
172 [[nodiscard]] bool IsRescaling(const ImageViewBase& image_view) const noexcept; 201 [[nodiscard]] bool IsRescaling(const ImageViewBase& image_view) const noexcept;
173 202
203 /// Create channel state.
204 void CreateChannel(Tegra::Control::ChannelState& channel) final override;
205
174 std::mutex mutex; 206 std::mutex mutex;
175 207
176private: 208private:
@@ -205,6 +237,8 @@ private:
205 } 237 }
206 } 238 }
207 239
240 void OnGPUASRegister(size_t map_id) final override;
241
208 /// Runs the Garbage Collector. 242 /// Runs the Garbage Collector.
209 void RunGarbageCollector(); 243 void RunGarbageCollector();
210 244
@@ -273,7 +307,7 @@ private:
273 void ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func); 307 void ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func);
274 308
275 template <typename Func> 309 template <typename Func>
276 void ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func); 310 void ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, Func&& func);
277 311
278 template <typename Func> 312 template <typename Func>
279 void ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, Func&& func); 313 void ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, Func&& func);
@@ -338,31 +372,16 @@ private:
338 u64 GetScaledImageSizeBytes(ImageBase& image); 372 u64 GetScaledImageSizeBytes(ImageBase& image);
339 373
340 Runtime& runtime; 374 Runtime& runtime;
341 VideoCore::RasterizerInterface& rasterizer;
342 Tegra::Engines::Maxwell3D& maxwell3d;
343 Tegra::Engines::KeplerCompute& kepler_compute;
344 Tegra::MemoryManager& gpu_memory;
345 375
346 DescriptorTable<TICEntry> graphics_image_table{gpu_memory}; 376 VideoCore::RasterizerInterface& rasterizer;
347 DescriptorTable<TSCEntry> graphics_sampler_table{gpu_memory}; 377 std::deque<TextureCacheGPUMap> gpu_page_table_storage;
348 std::vector<SamplerId> graphics_sampler_ids;
349 std::vector<ImageViewId> graphics_image_view_ids;
350
351 DescriptorTable<TICEntry> compute_image_table{gpu_memory};
352 DescriptorTable<TSCEntry> compute_sampler_table{gpu_memory};
353 std::vector<SamplerId> compute_sampler_ids;
354 std::vector<ImageViewId> compute_image_view_ids;
355 378
356 RenderTargets render_targets; 379 RenderTargets render_targets;
357 380
358 std::unordered_map<TICEntry, ImageViewId> image_views;
359 std::unordered_map<TSCEntry, SamplerId> samplers;
360 std::unordered_map<RenderTargets, FramebufferId> framebuffers; 381 std::unordered_map<RenderTargets, FramebufferId> framebuffers;
361 382
362 std::unordered_map<u64, std::vector<ImageMapId>, IdentityHash<u64>> page_table; 383 std::unordered_map<u64, std::vector<ImageMapId>, Common::IdentityHash<u64>> page_table;
363 std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>> gpu_page_table; 384 std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>> sparse_page_table;
364 std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>> sparse_page_table;
365
366 std::unordered_map<ImageId, std::vector<ImageViewId>> sparse_views; 385 std::unordered_map<ImageId, std::vector<ImageViewId>> sparse_views;
367 386
368 VAddr virtual_invalid_space{}; 387 VAddr virtual_invalid_space{};
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index 1820823b2..1223df5a0 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -517,7 +517,6 @@ void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr
517 const u32 host_bytes_per_layer = num_blocks_per_layer * bytes_per_block; 517 const u32 host_bytes_per_layer = num_blocks_per_layer * bytes_per_block;
518 518
519 UNIMPLEMENTED_IF(info.tile_width_spacing > 0); 519 UNIMPLEMENTED_IF(info.tile_width_spacing > 0);
520
521 UNIMPLEMENTED_IF(copy.image_offset.x != 0); 520 UNIMPLEMENTED_IF(copy.image_offset.x != 0);
522 UNIMPLEMENTED_IF(copy.image_offset.y != 0); 521 UNIMPLEMENTED_IF(copy.image_offset.y != 0);
523 UNIMPLEMENTED_IF(copy.image_offset.z != 0); 522 UNIMPLEMENTED_IF(copy.image_offset.z != 0);
@@ -755,7 +754,7 @@ bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config
755 if (address == 0) { 754 if (address == 0) {
756 return false; 755 return false;
757 } 756 }
758 if (address > (1ULL << 48)) { 757 if (address >= (1ULL << 40)) {
759 return false; 758 return false;
760 } 759 }
761 if (gpu_memory.GpuToCpuAddress(address).has_value()) { 760 if (gpu_memory.GpuToCpuAddress(address).has_value()) {
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 913f8ebcb..52d067a2d 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -35,7 +35,7 @@ void incrpdep(u32& value) {
35 35
36template <bool TO_LINEAR, u32 BYTES_PER_PIXEL> 36template <bool TO_LINEAR, u32 BYTES_PER_PIXEL>
37void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height, u32 depth, 37void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height, u32 depth,
38 u32 block_height, u32 block_depth, u32 stride_alignment) { 38 u32 block_height, u32 block_depth, u32 stride) {
39 // The origin of the transformation can be configured here, leave it as zero as the current API 39 // The origin of the transformation can be configured here, leave it as zero as the current API
40 // doesn't expose it. 40 // doesn't expose it.
41 static constexpr u32 origin_x = 0; 41 static constexpr u32 origin_x = 0;
@@ -45,7 +45,6 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32
45 // We can configure here a custom pitch 45 // We can configure here a custom pitch
46 // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch. 46 // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch.
47 const u32 pitch = width * BYTES_PER_PIXEL; 47 const u32 pitch = width * BYTES_PER_PIXEL;
48 const u32 stride = Common::AlignUpLog2(width, stride_alignment) * BYTES_PER_PIXEL;
49 48
50 const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); 49 const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT);
51 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); 50 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
@@ -89,6 +88,69 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32
89 } 88 }
90} 89}
91 90
91template <bool TO_LINEAR, u32 BYTES_PER_PIXEL>
92void SwizzleSubrectImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height,
93 u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 num_lines,
94 u32 block_height, u32 block_depth, u32 pitch_linear) {
95 // The origin of the transformation can be configured here, leave it as zero as the current API
96 // doesn't expose it.
97 static constexpr u32 origin_z = 0;
98
99 // We can configure here a custom pitch
100 // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch.
101 const u32 pitch = pitch_linear;
102 const u32 stride = Common::AlignUpLog2(width * BYTES_PER_PIXEL, GOB_SIZE_X_SHIFT);
103
104 const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT);
105 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
106 const u32 slice_size =
107 Common::DivCeilLog2(height, block_height + GOB_SIZE_Y_SHIFT) * block_size;
108
109 const u32 block_height_mask = (1U << block_height) - 1;
110 const u32 block_depth_mask = (1U << block_depth) - 1;
111 const u32 x_shift = GOB_SIZE_SHIFT + block_height + block_depth;
112
113 u32 unprocessed_lines = num_lines;
114 u32 extent_y = std::min(num_lines, height - origin_y);
115
116 for (u32 slice = 0; slice < depth; ++slice) {
117 const u32 z = slice + origin_z;
118 const u32 offset_z = (z >> block_depth) * slice_size +
119 ((z & block_depth_mask) << (GOB_SIZE_SHIFT + block_height));
120 const u32 lines_in_y = std::min(unprocessed_lines, extent_y);
121 for (u32 line = 0; line < lines_in_y; ++line) {
122 const u32 y = line + origin_y;
123 const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(y);
124
125 const u32 block_y = y >> GOB_SIZE_Y_SHIFT;
126 const u32 offset_y = (block_y >> block_height) * block_size +
127 ((block_y & block_height_mask) << GOB_SIZE_SHIFT);
128
129 u32 swizzled_x = pdep<SWIZZLE_X_BITS>(origin_x * BYTES_PER_PIXEL);
130 for (u32 column = 0; column < extent_x;
131 ++column, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) {
132 const u32 x = (column + origin_x) * BYTES_PER_PIXEL;
133 const u32 offset_x = (x >> GOB_SIZE_X_SHIFT) << x_shift;
134
135 const u32 base_swizzled_offset = offset_z + offset_y + offset_x;
136 const u32 swizzled_offset = base_swizzled_offset + (swizzled_x | swizzled_y);
137
138 const u32 unswizzled_offset =
139 slice * pitch * height + line * pitch + column * BYTES_PER_PIXEL;
140
141 u8* const dst = &output[TO_LINEAR ? swizzled_offset : unswizzled_offset];
142 const u8* const src = &input[TO_LINEAR ? unswizzled_offset : swizzled_offset];
143
144 std::memcpy(dst, src, BYTES_PER_PIXEL);
145 }
146 }
147 unprocessed_lines -= lines_in_y;
148 if (unprocessed_lines == 0) {
149 return;
150 }
151 }
152}
153
92template <bool TO_LINEAR> 154template <bool TO_LINEAR>
93void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, 155void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
94 u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) { 156 u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) {
@@ -111,122 +173,39 @@ void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixe
111 } 173 }
112} 174}
113 175
114template <u32 BYTES_PER_PIXEL>
115void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
116 u8* swizzled_data, const u8* unswizzled_data, u32 block_height_bit,
117 u32 offset_x, u32 offset_y) {
118 const u32 block_height = 1U << block_height_bit;
119 const u32 image_width_in_gobs =
120 (swizzled_width * BYTES_PER_PIXEL + (GOB_SIZE_X - 1)) / GOB_SIZE_X;
121 for (u32 line = 0; line < subrect_height; ++line) {
122 const u32 dst_y = line + offset_y;
123 const u32 gob_address_y =
124 (dst_y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs +
125 ((dst_y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE;
126
127 const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(dst_y);
128 u32 swizzled_x = pdep<SWIZZLE_X_BITS>(offset_x * BYTES_PER_PIXEL);
129 for (u32 x = 0; x < subrect_width;
130 ++x, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) {
131 const u32 dst_x = x + offset_x;
132 const u32 gob_address =
133 gob_address_y + (dst_x * BYTES_PER_PIXEL / GOB_SIZE_X) * GOB_SIZE * block_height;
134 const u32 swizzled_offset = gob_address + (swizzled_x | swizzled_y);
135 const u32 unswizzled_offset = line * source_pitch + x * BYTES_PER_PIXEL;
136
137 const u8* const source_line = unswizzled_data + unswizzled_offset;
138 u8* const dest_addr = swizzled_data + swizzled_offset;
139 std::memcpy(dest_addr, source_line, BYTES_PER_PIXEL);
140 }
141 }
142}
143
144template <u32 BYTES_PER_PIXEL>
145void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 block_height,
146 u32 origin_x, u32 origin_y, u8* output, const u8* input) {
147 const u32 stride = width * BYTES_PER_PIXEL;
148 const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X;
149 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height);
150
151 const u32 block_height_mask = (1U << block_height) - 1;
152 const u32 x_shift = GOB_SIZE_SHIFT + block_height;
153
154 for (u32 line = 0; line < line_count; ++line) {
155 const u32 src_y = line + origin_y;
156 const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(src_y);
157
158 const u32 block_y = src_y >> GOB_SIZE_Y_SHIFT;
159 const u32 src_offset_y = (block_y >> block_height) * block_size +
160 ((block_y & block_height_mask) << GOB_SIZE_SHIFT);
161
162 u32 swizzled_x = pdep<SWIZZLE_X_BITS>(origin_x * BYTES_PER_PIXEL);
163 for (u32 column = 0; column < line_length_in;
164 ++column, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) {
165 const u32 src_x = (column + origin_x) * BYTES_PER_PIXEL;
166 const u32 src_offset_x = (src_x >> GOB_SIZE_X_SHIFT) << x_shift;
167
168 const u32 swizzled_offset = src_offset_y + src_offset_x + (swizzled_x | swizzled_y);
169 const u32 unswizzled_offset = line * pitch + column * BYTES_PER_PIXEL;
170
171 std::memcpy(output + unswizzled_offset, input + swizzled_offset, BYTES_PER_PIXEL);
172 }
173 }
174}
175
176template <u32 BYTES_PER_PIXEL>
177void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height,
178 u32 block_height, u32 block_depth, u32 origin_x, u32 origin_y, u8* output,
179 const u8* input) {
180 UNIMPLEMENTED_IF(origin_x > 0);
181 UNIMPLEMENTED_IF(origin_y > 0);
182
183 const u32 stride = width * BYTES_PER_PIXEL;
184 const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X;
185 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
186
187 const u32 block_height_mask = (1U << block_height) - 1;
188 const u32 x_shift = static_cast<u32>(GOB_SIZE_SHIFT) + block_height + block_depth;
189
190 for (u32 line = 0; line < line_count; ++line) {
191 const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(line);
192 const u32 block_y = line / GOB_SIZE_Y;
193 const u32 dst_offset_y =
194 (block_y >> block_height) * block_size + (block_y & block_height_mask) * GOB_SIZE;
195
196 u32 swizzled_x = 0;
197 for (u32 x = 0; x < line_length_in; ++x, incrpdep<SWIZZLE_X_BITS, 1>(swizzled_x)) {
198 const u32 dst_offset =
199 ((x / GOB_SIZE_X) << x_shift) + dst_offset_y + (swizzled_x | swizzled_y);
200 const u32 src_offset = x * BYTES_PER_PIXEL + line * pitch;
201 std::memcpy(output + dst_offset, input + src_offset, BYTES_PER_PIXEL);
202 }
203 }
204}
205} // Anonymous namespace 176} // Anonymous namespace
206 177
207void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, 178void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
208 u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth, 179 u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth,
209 u32 stride_alignment) { 180 u32 stride_alignment) {
181 const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel;
182 const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel)));
183 width = (width * bytes_per_pixel) >> new_bpp;
184 bytes_per_pixel = 1U << new_bpp;
210 Swizzle<false>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth, 185 Swizzle<false>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth,
211 stride_alignment); 186 stride);
212} 187}
213 188
214void SwizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, 189void SwizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
215 u32 height, u32 depth, u32 block_height, u32 block_depth, 190 u32 height, u32 depth, u32 block_height, u32 block_depth,
216 u32 stride_alignment) { 191 u32 stride_alignment) {
192 const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel;
193 const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel)));
194 width = (width * bytes_per_pixel) >> new_bpp;
195 bytes_per_pixel = 1U << new_bpp;
217 Swizzle<true>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth, 196 Swizzle<true>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth,
218 stride_alignment); 197 stride);
219} 198}
220 199
221void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, 200void SwizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
222 u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data, 201 u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y,
223 u32 block_height_bit, u32 offset_x, u32 offset_y) { 202 u32 block_height, u32 block_depth, u32 pitch_linear) {
224 switch (bytes_per_pixel) { 203 switch (bytes_per_pixel) {
225#define BPP_CASE(x) \ 204#define BPP_CASE(x) \
226 case x: \ 205 case x: \
227 return SwizzleSubrect<x>(subrect_width, subrect_height, source_pitch, swizzled_width, \ 206 return SwizzleSubrectImpl<true, x>(output, input, width, height, depth, origin_x, \
228 swizzled_data, unswizzled_data, block_height_bit, offset_x, \ 207 origin_y, extent_x, extent_y, block_height, \
229 offset_y); 208 block_depth, pitch_linear);
230 BPP_CASE(1) 209 BPP_CASE(1)
231 BPP_CASE(2) 210 BPP_CASE(2)
232 BPP_CASE(3) 211 BPP_CASE(3)
@@ -241,13 +220,15 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32
241 } 220 }
242} 221}
243 222
244void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel, 223void UnswizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
245 u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input) { 224 u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x,
225 u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear) {
246 switch (bytes_per_pixel) { 226 switch (bytes_per_pixel) {
247#define BPP_CASE(x) \ 227#define BPP_CASE(x) \
248 case x: \ 228 case x: \
249 return UnswizzleSubrect<x>(line_length_in, line_count, pitch, width, block_height, \ 229 return SwizzleSubrectImpl<false, x>(output, input, width, height, depth, origin_x, \
250 origin_x, origin_y, output, input); 230 origin_y, extent_x, extent_y, block_height, \
231 block_depth, pitch_linear);
251 BPP_CASE(1) 232 BPP_CASE(1)
252 BPP_CASE(2) 233 BPP_CASE(2)
253 BPP_CASE(3) 234 BPP_CASE(3)
@@ -262,55 +243,6 @@ void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width,
262 } 243 }
263} 244}
264 245
265void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height,
266 u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x,
267 u32 origin_y, u8* output, const u8* input) {
268 switch (bytes_per_pixel) {
269#define BPP_CASE(x) \
270 case x: \
271 return SwizzleSliceToVoxel<x>(line_length_in, line_count, pitch, width, height, \
272 block_height, block_depth, origin_x, origin_y, output, \
273 input);
274 BPP_CASE(1)
275 BPP_CASE(2)
276 BPP_CASE(3)
277 BPP_CASE(4)
278 BPP_CASE(6)
279 BPP_CASE(8)
280 BPP_CASE(12)
281 BPP_CASE(16)
282#undef BPP_CASE
283 default:
284 ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel);
285 }
286}
287
288void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 dst_y,
289 const u32 block_height_bit, const std::size_t copy_size, const u8* source_data,
290 u8* swizzle_data) {
291 const u32 block_height = 1U << block_height_bit;
292 const u32 image_width_in_gobs{(width + GOB_SIZE_X - 1) / GOB_SIZE_X};
293 std::size_t count = 0;
294 for (std::size_t y = dst_y; y < height && count < copy_size; ++y) {
295 const std::size_t gob_address_y =
296 (y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs +
297 ((y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE;
298 const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(static_cast<u32>(y));
299 u32 swizzled_x = pdep<SWIZZLE_X_BITS>(dst_x);
300 for (std::size_t x = dst_x; x < width && count < copy_size;
301 ++x, incrpdep<SWIZZLE_X_BITS, 1>(swizzled_x)) {
302 const std::size_t gob_address =
303 gob_address_y + (x / GOB_SIZE_X) * GOB_SIZE * block_height;
304 const std::size_t swizzled_offset = gob_address + (swizzled_x | swizzled_y);
305 const u8* source_line = source_data + count;
306 u8* dest_addr = swizzle_data + swizzled_offset;
307 count++;
308
309 *dest_addr = *source_line;
310 }
311 }
312}
313
314std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, 246std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
315 u32 block_height, u32 block_depth) { 247 u32 block_height, u32 block_depth) {
316 if (tiled) { 248 if (tiled) {
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h
index 31a11708f..e70407692 100644
--- a/src/video_core/textures/decoders.h
+++ b/src/video_core/textures/decoders.h
@@ -40,7 +40,6 @@ constexpr SwizzleTable MakeSwizzleTable() {
40 } 40 }
41 return table; 41 return table;
42} 42}
43constexpr SwizzleTable SWIZZLE_TABLE = MakeSwizzleTable();
44 43
45/// Unswizzles a block linear texture into linear memory. 44/// Unswizzles a block linear texture into linear memory.
46void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, 45void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
@@ -57,34 +56,14 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height
57 u32 block_height, u32 block_depth); 56 u32 block_height, u32 block_depth);
58 57
59/// Copies an untiled subrectangle into a tiled surface. 58/// Copies an untiled subrectangle into a tiled surface.
60void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, 59void SwizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
61 u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data, 60 u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y,
62 u32 block_height_bit, u32 offset_x, u32 offset_y); 61 u32 block_height, u32 block_depth, u32 pitch_linear);
63 62
64/// Copies a tiled subrectangle into a linear surface. 63/// Copies a tiled subrectangle into a linear surface.
65void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel, 64void UnswizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
66 u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input); 65 u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x,
67 66 u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear);
68/// @brief Swizzles a 2D array of pixels into a 3D texture
69/// @param line_length_in Number of pixels per line
70/// @param line_count Number of lines
71/// @param pitch Number of bytes per line
72/// @param width Width of the swizzled texture
73/// @param height Height of the swizzled texture
74/// @param bytes_per_pixel Number of bytes used per pixel
75/// @param block_height Block height shift
76/// @param block_depth Block depth shift
77/// @param origin_x Column offset in pixels of the swizzled texture
78/// @param origin_y Row offset in pixels of the swizzled texture
79/// @param output Pointer to the pixels of the swizzled texture
80/// @param input Pointer to the 2D array of pixels used as input
81/// @pre input and output points to an array large enough to hold the number of bytes used
82void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height,
83 u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x,
84 u32 origin_y, u8* output, const u8* input);
85
86void SwizzleKepler(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height,
87 std::size_t copy_size, const u8* source_data, u8* swizzle_data);
88 67
89/// Obtains the offset of the gob for positions 'dst_x' & 'dst_y' 68/// Obtains the offset of the gob for positions 'dst_x' & 'dst_y'
90u64 GetGOBOffset(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height, 69u64 GetGOBOffset(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height,
diff --git a/src/video_core/vulkan_common/vulkan_wrapper.h b/src/video_core/vulkan_common/vulkan_wrapper.h
index 795f16bfb..1b3f493bd 100644
--- a/src/video_core/vulkan_common/vulkan_wrapper.h
+++ b/src/video_core/vulkan_common/vulkan_wrapper.h
@@ -519,9 +519,7 @@ public:
519 dld{rhs.dld} {} 519 dld{rhs.dld} {}
520 520
521 /// Assign an allocation transfering ownership from another allocation. 521 /// Assign an allocation transfering ownership from another allocation.
522 /// Releases any previously held allocation.
523 PoolAllocations& operator=(PoolAllocations&& rhs) noexcept { 522 PoolAllocations& operator=(PoolAllocations&& rhs) noexcept {
524 Release();
525 allocations = std::move(rhs.allocations); 523 allocations = std::move(rhs.allocations);
526 num = rhs.num; 524 num = rhs.num;
527 device = rhs.device; 525 device = rhs.device;
@@ -530,11 +528,6 @@ public:
530 return *this; 528 return *this;
531 } 529 }
532 530
533 /// Destroys any held allocation.
534 ~PoolAllocations() {
535 Release();
536 }
537
538 /// Returns the number of allocations. 531 /// Returns the number of allocations.
539 std::size_t size() const noexcept { 532 std::size_t size() const noexcept {
540 return num; 533 return num;
@@ -557,19 +550,6 @@ public:
557 } 550 }
558 551
559private: 552private:
560 /// Destroys the held allocations if they exist.
561 void Release() noexcept {
562 if (!allocations) {
563 return;
564 }
565 const Span<AllocationType> span(allocations.get(), num);
566 const VkResult result = Free(device, pool, span, *dld);
567 // There's no way to report errors from a destructor.
568 if (result != VK_SUCCESS) {
569 std::terminate();
570 }
571 }
572
573 std::unique_ptr<AllocationType[]> allocations; 553 std::unique_ptr<AllocationType[]> allocations;
574 std::size_t num = 0; 554 std::size_t num = 0;
575 VkDevice device = nullptr; 555 VkDevice device = nullptr;