summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h53
-rw-r--r--src/video_core/engines/maxwell_dma.cpp107
-rw-r--r--src/video_core/engines/maxwell_dma.h88
-rw-r--r--src/video_core/renderer_null/null_rasterizer.h8
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h10
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp234
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h11
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp14
-rw-r--r--src/video_core/texture_cache/image_info.cpp45
-rw-r--r--src/video_core/texture_cache/image_info.h2
-rw-r--r--src/video_core/texture_cache/texture_cache.h69
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h5
-rw-r--r--src/video_core/texture_cache/types.h1
-rw-r--r--src/video_core/texture_cache/util.cpp98
-rw-r--r--src/video_core/texture_cache/util.h10
15 files changed, 658 insertions, 97 deletions
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 06fd40851..2a150ccdc 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -55,6 +55,19 @@ constexpr u32 NUM_STORAGE_BUFFERS = 16;
55constexpr u32 NUM_TEXTURE_BUFFERS = 16; 55constexpr u32 NUM_TEXTURE_BUFFERS = 16;
56constexpr u32 NUM_STAGES = 5; 56constexpr u32 NUM_STAGES = 5;
57 57
58enum class ObtainBufferSynchronize : u32 {
59 NoSynchronize = 0,
60 FullSynchronize = 1,
61 SynchronizeNoDirty = 2,
62};
63
64enum class ObtainBufferOperation : u32 {
65 DoNothing = 0,
66 MarkAsWritten = 1,
67 DiscardWrite = 2,
68 MarkQuery = 3,
69};
70
58using UniformBufferSizes = std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>; 71using UniformBufferSizes = std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>;
59using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>; 72using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>;
60 73
@@ -191,6 +204,10 @@ public:
191 204
192 bool DMAClear(GPUVAddr src_address, u64 amount, u32 value); 205 bool DMAClear(GPUVAddr src_address, u64 amount, u32 value);
193 206
207 [[nodiscard]] std::pair<Buffer*, u32> ObtainBuffer(GPUVAddr gpu_addr, u32 size,
208 ObtainBufferSynchronize sync_info,
209 ObtainBufferOperation post_op);
210
194 /// Return true when a CPU region is modified from the GPU 211 /// Return true when a CPU region is modified from the GPU
195 [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size); 212 [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size);
196 213
@@ -642,6 +659,42 @@ bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
642} 659}
643 660
644template <class P> 661template <class P>
662std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainBuffer(GPUVAddr gpu_addr, u32 size,
663 ObtainBufferSynchronize sync_info,
664 ObtainBufferOperation post_op) {
665 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
666 if (!cpu_addr) {
667 return {&slot_buffers[NULL_BUFFER_ID], 0};
668 }
669 const BufferId buffer_id = FindBuffer(*cpu_addr, size);
670 Buffer& buffer = slot_buffers[buffer_id];
671
672 // synchronize op
673 switch (sync_info) {
674 case ObtainBufferSynchronize::FullSynchronize:
675 SynchronizeBuffer(buffer, *cpu_addr, size);
676 break;
677 default:
678 break;
679 }
680
681 switch (post_op) {
682 case ObtainBufferOperation::MarkAsWritten:
683 MarkWrittenBuffer(buffer_id, *cpu_addr, size);
684 break;
685 case ObtainBufferOperation::DiscardWrite: {
686 IntervalType interval{*cpu_addr, size};
687 ClearDownload(interval);
688 break;
689 }
690 default:
691 break;
692 }
693
694 return {&buffer, buffer.Offset(*cpu_addr)};
695}
696
697template <class P>
645void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, 698void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
646 u32 size) { 699 u32 size) {
647 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); 700 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 7762c7d96..e68850dc5 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -14,7 +14,13 @@
14#include "video_core/textures/decoders.h" 14#include "video_core/textures/decoders.h"
15 15
16MICROPROFILE_DECLARE(GPU_DMAEngine); 16MICROPROFILE_DECLARE(GPU_DMAEngine);
17MICROPROFILE_DECLARE(GPU_DMAEngineBL);
18MICROPROFILE_DECLARE(GPU_DMAEngineLB);
19MICROPROFILE_DECLARE(GPU_DMAEngineBB);
17MICROPROFILE_DEFINE(GPU_DMAEngine, "GPU", "DMA Engine", MP_RGB(224, 224, 128)); 20MICROPROFILE_DEFINE(GPU_DMAEngine, "GPU", "DMA Engine", MP_RGB(224, 224, 128));
21MICROPROFILE_DEFINE(GPU_DMAEngineBL, "GPU", "DMA Engine Block - Linear", MP_RGB(224, 224, 128));
22MICROPROFILE_DEFINE(GPU_DMAEngineLB, "GPU", "DMA Engine Linear - Block", MP_RGB(224, 224, 128));
23MICROPROFILE_DEFINE(GPU_DMAEngineBB, "GPU", "DMA Engine Block - Block", MP_RGB(224, 224, 128));
18 24
19namespace Tegra::Engines { 25namespace Tegra::Engines {
20 26
@@ -72,6 +78,7 @@ void MaxwellDMA::Launch() {
72 memory_manager.FlushCaching(); 78 memory_manager.FlushCaching();
73 if (!is_src_pitch && !is_dst_pitch) { 79 if (!is_src_pitch && !is_dst_pitch) {
74 // If both the source and the destination are in block layout, assert. 80 // If both the source and the destination are in block layout, assert.
81 MICROPROFILE_SCOPE(GPU_DMAEngineBB);
75 CopyBlockLinearToBlockLinear(); 82 CopyBlockLinearToBlockLinear();
76 ReleaseSemaphore(); 83 ReleaseSemaphore();
77 return; 84 return;
@@ -87,8 +94,10 @@ void MaxwellDMA::Launch() {
87 } 94 }
88 } else { 95 } else {
89 if (!is_src_pitch && is_dst_pitch) { 96 if (!is_src_pitch && is_dst_pitch) {
97 MICROPROFILE_SCOPE(GPU_DMAEngineBL);
90 CopyBlockLinearToPitch(); 98 CopyBlockLinearToPitch();
91 } else { 99 } else {
100 MICROPROFILE_SCOPE(GPU_DMAEngineLB);
92 CopyPitchToBlockLinear(); 101 CopyPitchToBlockLinear();
93 } 102 }
94 } 103 }
@@ -153,21 +162,35 @@ void MaxwellDMA::Launch() {
153} 162}
154 163
155void MaxwellDMA::CopyBlockLinearToPitch() { 164void MaxwellDMA::CopyBlockLinearToPitch() {
156 UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0); 165 UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
157 UNIMPLEMENTED_IF(regs.src_params.layer != 0); 166
158 167 u32 bytes_per_pixel = 1;
159 const bool is_remapping = regs.launch_dma.remap_enable != 0; 168 DMA::ImageOperand src_operand;
160 169 src_operand.bytes_per_pixel = bytes_per_pixel;
161 // Optimized path for micro copies. 170 src_operand.params = regs.src_params;
162 const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count; 171 src_operand.address = regs.offset_in;
163 if (!is_remapping && dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X && 172
164 regs.src_params.height > GOB_SIZE_Y) { 173 DMA::BufferOperand dst_operand;
165 FastCopyBlockLinearToPitch(); 174 dst_operand.pitch = regs.pitch_out;
175 dst_operand.width = regs.line_length_in;
176 dst_operand.height = regs.line_count;
177 dst_operand.address = regs.offset_out;
178 DMA::ImageCopy copy_info{};
179 copy_info.length_x = regs.line_length_in;
180 copy_info.length_y = regs.line_count;
181 auto& accelerate = rasterizer->AccessAccelerateDMA();
182 if (accelerate.ImageToBuffer(copy_info, src_operand, dst_operand)) {
166 return; 183 return;
167 } 184 }
168 185
186 UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0);
187 UNIMPLEMENTED_IF(regs.src_params.block_size.depth != 0);
188 UNIMPLEMENTED_IF(regs.src_params.block_size.depth == 0 && regs.src_params.depth != 1);
189
169 // Deswizzle the input and copy it over. 190 // Deswizzle the input and copy it over.
170 const Parameters& src_params = regs.src_params; 191 const DMA::Parameters& src_params = regs.src_params;
192
193 const bool is_remapping = regs.launch_dma.remap_enable != 0;
171 194
172 const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1; 195 const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
173 const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1; 196 const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
@@ -187,7 +210,7 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
187 x_offset >>= bpp_shift; 210 x_offset >>= bpp_shift;
188 } 211 }
189 212
190 const u32 bytes_per_pixel = base_bpp << bpp_shift; 213 bytes_per_pixel = base_bpp << bpp_shift;
191 const u32 height = src_params.height; 214 const u32 height = src_params.height;
192 const u32 depth = src_params.depth; 215 const u32 depth = src_params.depth;
193 const u32 block_height = src_params.block_size.height; 216 const u32 block_height = src_params.block_size.height;
@@ -195,11 +218,12 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
195 const size_t src_size = 218 const size_t src_size =
196 CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth); 219 CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
197 220
221 const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
198 read_buffer.resize_destructive(src_size); 222 read_buffer.resize_destructive(src_size);
199 write_buffer.resize_destructive(dst_size); 223 write_buffer.resize_destructive(dst_size);
200 224
201 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); 225 memory_manager.ReadBlock(src_operand.address, read_buffer.data(), src_size);
202 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); 226 memory_manager.ReadBlockUnsafe(dst_operand.address, write_buffer.data(), dst_size);
203 227
204 UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset, 228 UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
205 src_params.origin.y, x_elements, regs.line_count, block_height, block_depth, 229 src_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
@@ -216,6 +240,24 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
216 const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1; 240 const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
217 const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1; 241 const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
218 242
243 u32 bytes_per_pixel = 1;
244 DMA::ImageOperand dst_operand;
245 dst_operand.bytes_per_pixel = bytes_per_pixel;
246 dst_operand.params = regs.dst_params;
247 dst_operand.address = regs.offset_out;
248 DMA::BufferOperand src_operand;
249 src_operand.pitch = regs.pitch_in;
250 src_operand.width = regs.line_length_in;
251 src_operand.height = regs.line_count;
252 src_operand.address = regs.offset_in;
253 DMA::ImageCopy copy_info{};
254 copy_info.length_x = regs.line_length_in;
255 copy_info.length_y = regs.line_count;
256 auto& accelerate = rasterizer->AccessAccelerateDMA();
257 if (accelerate.BufferToImage(copy_info, src_operand, dst_operand)) {
258 return;
259 }
260
219 const auto& dst_params = regs.dst_params; 261 const auto& dst_params = regs.dst_params;
220 262
221 const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size; 263 const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
@@ -233,7 +275,7 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
233 x_offset >>= bpp_shift; 275 x_offset >>= bpp_shift;
234 } 276 }
235 277
236 const u32 bytes_per_pixel = base_bpp << bpp_shift; 278 bytes_per_pixel = base_bpp << bpp_shift;
237 const u32 height = dst_params.height; 279 const u32 height = dst_params.height;
238 const u32 depth = dst_params.depth; 280 const u32 depth = dst_params.depth;
239 const u32 block_height = dst_params.block_size.height; 281 const u32 block_height = dst_params.block_size.height;
@@ -260,45 +302,14 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
260 memory_manager.WriteBlockCached(regs.offset_out, write_buffer.data(), dst_size); 302 memory_manager.WriteBlockCached(regs.offset_out, write_buffer.data(), dst_size);
261} 303}
262 304
263void MaxwellDMA::FastCopyBlockLinearToPitch() {
264 const u32 bytes_per_pixel = 1U;
265 const size_t src_size = GOB_SIZE;
266 const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
267 u32 pos_x = regs.src_params.origin.x;
268 u32 pos_y = regs.src_params.origin.y;
269 const u64 offset = GetGOBOffset(regs.src_params.width, regs.src_params.height, pos_x, pos_y,
270 regs.src_params.block_size.height, bytes_per_pixel);
271 const u32 x_in_gob = 64 / bytes_per_pixel;
272 pos_x = pos_x % x_in_gob;
273 pos_y = pos_y % 8;
274
275 read_buffer.resize_destructive(src_size);
276 write_buffer.resize_destructive(dst_size);
277
278 if (Settings::IsGPULevelExtreme()) {
279 memory_manager.ReadBlock(regs.offset_in + offset, read_buffer.data(), src_size);
280 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
281 } else {
282 memory_manager.ReadBlockUnsafe(regs.offset_in + offset, read_buffer.data(), src_size);
283 memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
284 }
285
286 UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, regs.src_params.width,
287 regs.src_params.height, 1, pos_x, pos_y, regs.line_length_in, regs.line_count,
288 regs.src_params.block_size.height, regs.src_params.block_size.depth,
289 regs.pitch_out);
290
291 memory_manager.WriteBlockCached(regs.offset_out, write_buffer.data(), dst_size);
292}
293
294void MaxwellDMA::CopyBlockLinearToBlockLinear() { 305void MaxwellDMA::CopyBlockLinearToBlockLinear() {
295 UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0); 306 UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0);
296 307
297 const bool is_remapping = regs.launch_dma.remap_enable != 0; 308 const bool is_remapping = regs.launch_dma.remap_enable != 0;
298 309
299 // Deswizzle the input and copy it over. 310 // Deswizzle the input and copy it over.
300 const Parameters& src = regs.src_params; 311 const DMA::Parameters& src = regs.src_params;
301 const Parameters& dst = regs.dst_params; 312 const DMA::Parameters& dst = regs.dst_params;
302 313
303 const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1; 314 const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
304 const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1; 315 const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index 0e594fa74..69e26cb32 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -24,6 +24,54 @@ namespace VideoCore {
24class RasterizerInterface; 24class RasterizerInterface;
25} 25}
26 26
27namespace Tegra {
28namespace DMA {
29
30union Origin {
31 BitField<0, 16, u32> x;
32 BitField<16, 16, u32> y;
33};
34static_assert(sizeof(Origin) == 4);
35
36struct ImageCopy {
37 u32 length_x{};
38 u32 length_y{};
39};
40
41union BlockSize {
42 BitField<0, 4, u32> width;
43 BitField<4, 4, u32> height;
44 BitField<8, 4, u32> depth;
45 BitField<12, 4, u32> gob_height;
46};
47static_assert(sizeof(BlockSize) == 4);
48
49struct Parameters {
50 BlockSize block_size;
51 u32 width;
52 u32 height;
53 u32 depth;
54 u32 layer;
55 Origin origin;
56};
57static_assert(sizeof(Parameters) == 24);
58
59struct ImageOperand {
60 u32 bytes_per_pixel;
61 Parameters params;
62 GPUVAddr address;
63};
64
65struct BufferOperand {
66 u32 pitch;
67 u32 width;
68 u32 height;
69 GPUVAddr address;
70};
71
72} // namespace DMA
73} // namespace Tegra
74
27namespace Tegra::Engines { 75namespace Tegra::Engines {
28 76
29class AccelerateDMAInterface { 77class AccelerateDMAInterface {
@@ -32,6 +80,12 @@ public:
32 virtual bool BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) = 0; 80 virtual bool BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) = 0;
33 81
34 virtual bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) = 0; 82 virtual bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) = 0;
83
84 virtual bool ImageToBuffer(const DMA::ImageCopy& copy_info, const DMA::ImageOperand& src,
85 const DMA::BufferOperand& dst) = 0;
86
87 virtual bool BufferToImage(const DMA::ImageCopy& copy_info, const DMA::BufferOperand& src,
88 const DMA::ImageOperand& dst) = 0;
35}; 89};
36 90
37/** 91/**
@@ -51,30 +105,6 @@ public:
51 } 105 }
52 }; 106 };
53 107
54 union BlockSize {
55 BitField<0, 4, u32> width;
56 BitField<4, 4, u32> height;
57 BitField<8, 4, u32> depth;
58 BitField<12, 4, u32> gob_height;
59 };
60 static_assert(sizeof(BlockSize) == 4);
61
62 union Origin {
63 BitField<0, 16, u32> x;
64 BitField<16, 16, u32> y;
65 };
66 static_assert(sizeof(Origin) == 4);
67
68 struct Parameters {
69 BlockSize block_size;
70 u32 width;
71 u32 height;
72 u32 depth;
73 u32 layer;
74 Origin origin;
75 };
76 static_assert(sizeof(Parameters) == 24);
77
78 struct Semaphore { 108 struct Semaphore {
79 PackedGPUVAddr address; 109 PackedGPUVAddr address;
80 u32 payload; 110 u32 payload;
@@ -227,8 +257,6 @@ private:
227 257
228 void CopyBlockLinearToBlockLinear(); 258 void CopyBlockLinearToBlockLinear();
229 259
230 void FastCopyBlockLinearToPitch();
231
232 void ReleaseSemaphore(); 260 void ReleaseSemaphore();
233 261
234 void ConsumeSinkImpl() override; 262 void ConsumeSinkImpl() override;
@@ -261,17 +289,17 @@ private:
261 u32 reserved05[0x3f]; 289 u32 reserved05[0x3f];
262 PackedGPUVAddr offset_in; 290 PackedGPUVAddr offset_in;
263 PackedGPUVAddr offset_out; 291 PackedGPUVAddr offset_out;
264 u32 pitch_in; 292 s32 pitch_in;
265 u32 pitch_out; 293 s32 pitch_out;
266 u32 line_length_in; 294 u32 line_length_in;
267 u32 line_count; 295 u32 line_count;
268 u32 reserved06[0xb6]; 296 u32 reserved06[0xb6];
269 u32 remap_consta_value; 297 u32 remap_consta_value;
270 u32 remap_constb_value; 298 u32 remap_constb_value;
271 RemapConst remap_const; 299 RemapConst remap_const;
272 Parameters dst_params; 300 DMA::Parameters dst_params;
273 u32 reserved07[0x1]; 301 u32 reserved07[0x1];
274 Parameters src_params; 302 DMA::Parameters src_params;
275 u32 reserved08[0x275]; 303 u32 reserved08[0x275];
276 u32 pm_trigger_end; 304 u32 pm_trigger_end;
277 u32 reserved09[0x3ba]; 305 u32 reserved09[0x3ba];
diff --git a/src/video_core/renderer_null/null_rasterizer.h b/src/video_core/renderer_null/null_rasterizer.h
index 51f896e43..0c59e6a1f 100644
--- a/src/video_core/renderer_null/null_rasterizer.h
+++ b/src/video_core/renderer_null/null_rasterizer.h
@@ -22,6 +22,14 @@ public:
22 explicit AccelerateDMA(); 22 explicit AccelerateDMA();
23 bool BufferCopy(GPUVAddr start_address, GPUVAddr end_address, u64 amount) override; 23 bool BufferCopy(GPUVAddr start_address, GPUVAddr end_address, u64 amount) override;
24 bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override; 24 bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override;
25 bool ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::ImageOperand& src,
26 const Tegra::DMA::BufferOperand& dst) override {
27 return false;
28 }
29 bool BufferToImage(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& src,
30 const Tegra::DMA::ImageOperand& dst) override {
31 return false;
32 }
25}; 33};
26 34
27class RasterizerNull final : public VideoCore::RasterizerAccelerated, 35class RasterizerNull final : public VideoCore::RasterizerAccelerated,
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 0c45832ae..7e21fc43d 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -56,6 +56,16 @@ public:
56 56
57 bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override; 57 bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override;
58 58
59 bool ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::ImageOperand& src,
60 const Tegra::DMA::BufferOperand& dst) override {
61 return false;
62 }
63
64 bool BufferToImage(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& src,
65 const Tegra::DMA::ImageOperand& dst) override {
66 return false;
67 }
68
59private: 69private:
60 BufferCache& buffer_cache; 70 BufferCache& buffer_cache;
61}; 71};
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 719edbcfb..f085d53a1 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -172,7 +172,7 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
172 buffer_cache(*this, cpu_memory_, buffer_cache_runtime), 172 buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
173 pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue, 173 pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue,
174 render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()), 174 render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
175 query_cache{*this, device, scheduler}, accelerate_dma{buffer_cache}, 175 query_cache{*this, device, scheduler}, accelerate_dma(buffer_cache, texture_cache, scheduler),
176 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), 176 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
177 wfi_event(device.GetLogical().CreateEvent()) { 177 wfi_event(device.GetLogical().CreateEvent()) {
178 scheduler.SetQueryCache(query_cache); 178 scheduler.SetQueryCache(query_cache);
@@ -756,7 +756,9 @@ void RasterizerVulkan::FlushWork() {
756 draw_counter = 0; 756 draw_counter = 0;
757} 757}
758 758
759AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} 759AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_, TextureCache& texture_cache_,
760 Scheduler& scheduler_)
761 : buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, scheduler{scheduler_} {}
760 762
761bool AccelerateDMA::BufferClear(GPUVAddr src_address, u64 amount, u32 value) { 763bool AccelerateDMA::BufferClear(GPUVAddr src_address, u64 amount, u32 value) {
762 std::scoped_lock lock{buffer_cache.mutex}; 764 std::scoped_lock lock{buffer_cache.mutex};
@@ -768,6 +770,234 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64
768 return buffer_cache.DMACopy(src_address, dest_address, amount); 770 return buffer_cache.DMACopy(src_address, dest_address, amount);
769} 771}
770 772
773bool AccelerateDMA::ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info,
774 const Tegra::DMA::ImageOperand& src,
775 const Tegra::DMA::BufferOperand& dst) {
776 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
777 auto query_image = texture_cache.ObtainImage(src, false);
778 if (!query_image) {
779 return false;
780 }
781 auto* image = query_image->first;
782 auto [level, base] = query_image->second;
783 const u32 buffer_size = static_cast<u32>(dst.pitch * dst.height);
784 const auto [buffer, offset] = buffer_cache.ObtainBuffer(
785 dst.address, buffer_size, VideoCommon::ObtainBufferSynchronize::FullSynchronize,
786 VideoCommon::ObtainBufferOperation::MarkAsWritten);
787
788 const bool is_rescaled = image->IsRescaled();
789 if (is_rescaled) {
790 image->ScaleDown();
791 }
792 VkImageSubresourceLayers subresources{
793 .aspectMask = image->AspectMask(),
794 .mipLevel = level,
795 .baseArrayLayer = base,
796 .layerCount = 1,
797 };
798 const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format);
799 const auto convert = [old_bpp = src.bytes_per_pixel, bpp](u32 value) {
800 return (old_bpp * value) / bpp;
801 };
802 const u32 base_x = convert(src.params.origin.x.Value());
803 const u32 base_y = src.params.origin.y.Value();
804 const u32 length_x = convert(copy_info.length_x);
805 const u32 length_y = copy_info.length_y;
806 VkOffset3D image_offset{
807 .x = static_cast<s32>(base_x),
808 .y = static_cast<s32>(base_y),
809 .z = 0,
810 };
811 VkExtent3D image_extent{
812 .width = length_x,
813 .height = length_y,
814 .depth = 1,
815 };
816 auto buff_info(dst);
817 buff_info.pitch = convert(dst.pitch);
818 scheduler.RequestOutsideRenderPassOperationContext();
819 scheduler.Record([src_image = image->Handle(), dst_buffer = buffer->Handle(),
820 buffer_offset = offset, subresources, image_offset, image_extent,
821 buff_info](vk::CommandBuffer cmdbuf) {
822 const std::array buffer_copy_info{
823 VkBufferImageCopy{
824 .bufferOffset = buffer_offset,
825 .bufferRowLength = buff_info.pitch,
826 .bufferImageHeight = buff_info.height,
827 .imageSubresource = subresources,
828 .imageOffset = image_offset,
829 .imageExtent = image_extent,
830 },
831 };
832 const VkImageSubresourceRange range{
833 .aspectMask = subresources.aspectMask,
834 .baseMipLevel = subresources.mipLevel,
835 .levelCount = 1,
836 .baseArrayLayer = subresources.baseArrayLayer,
837 .layerCount = 1,
838 };
839 static constexpr VkMemoryBarrier WRITE_BARRIER{
840 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
841 .pNext = nullptr,
842 .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
843 .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
844 };
845 const std::array pre_barriers{
846 VkImageMemoryBarrier{
847 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
848 .pNext = nullptr,
849 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
850 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
851 VK_ACCESS_TRANSFER_WRITE_BIT,
852 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
853 .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
854 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
855 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
856 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
857 .image = src_image,
858 .subresourceRange = range,
859 },
860 };
861 const std::array post_barriers{
862 VkImageMemoryBarrier{
863 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
864 .pNext = nullptr,
865 .srcAccessMask = 0,
866 .dstAccessMask = 0,
867 .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
868 .newLayout = VK_IMAGE_LAYOUT_GENERAL,
869 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
870 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
871 .image = src_image,
872 .subresourceRange = range,
873 },
874 };
875 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
876 0, {}, {}, pre_barriers);
877 cmdbuf.CopyImageToBuffer(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_buffer,
878 buffer_copy_info);
879 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
880 0, WRITE_BARRIER, nullptr, post_barriers);
881 });
882 if (is_rescaled) {
883 image->ScaleUp(true);
884 }
885 return true;
886}
887
888bool AccelerateDMA::BufferToImage(const Tegra::DMA::ImageCopy& copy_info,
889 const Tegra::DMA::BufferOperand& src,
890 const Tegra::DMA::ImageOperand& dst) {
891 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
892 auto query_image = texture_cache.ObtainImage(dst, true);
893 if (!query_image) {
894 return false;
895 }
896 auto* image = query_image->first;
897 auto [level, base] = query_image->second;
898 const u32 buffer_size = static_cast<u32>(src.pitch * src.height);
899 const auto [buffer, offset] = buffer_cache.ObtainBuffer(
900 src.address, buffer_size, VideoCommon::ObtainBufferSynchronize::FullSynchronize,
901 VideoCommon::ObtainBufferOperation::DoNothing);
902 const bool is_rescaled = image->IsRescaled();
903 if (is_rescaled) {
904 image->ScaleDown(true);
905 }
906 VkImageSubresourceLayers subresources{
907 .aspectMask = image->AspectMask(),
908 .mipLevel = level,
909 .baseArrayLayer = base,
910 .layerCount = 1,
911 };
912 const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format);
913 const auto convert = [old_bpp = dst.bytes_per_pixel, bpp](u32 value) {
914 return (old_bpp * value) / bpp;
915 };
916 const u32 base_x = convert(dst.params.origin.x.Value());
917 const u32 base_y = dst.params.origin.y.Value();
918 const u32 length_x = convert(copy_info.length_x);
919 const u32 length_y = copy_info.length_y;
920 VkOffset3D image_offset{
921 .x = static_cast<s32>(base_x),
922 .y = static_cast<s32>(base_y),
923 .z = 0,
924 };
925 VkExtent3D image_extent{
926 .width = length_x,
927 .height = length_y,
928 .depth = 1,
929 };
930 auto buff_info(src);
931 buff_info.pitch = convert(src.pitch);
932 scheduler.RequestOutsideRenderPassOperationContext();
933 scheduler.Record([dst_image = image->Handle(), src_buffer = buffer->Handle(),
934 buffer_offset = offset, subresources, image_offset, image_extent,
935 buff_info](vk::CommandBuffer cmdbuf) {
936 const std::array buffer_copy_info{
937 VkBufferImageCopy{
938 .bufferOffset = buffer_offset,
939 .bufferRowLength = buff_info.pitch,
940 .bufferImageHeight = buff_info.height,
941 .imageSubresource = subresources,
942 .imageOffset = image_offset,
943 .imageExtent = image_extent,
944 },
945 };
946 const VkImageSubresourceRange range{
947 .aspectMask = subresources.aspectMask,
948 .baseMipLevel = subresources.mipLevel,
949 .levelCount = 1,
950 .baseArrayLayer = subresources.baseArrayLayer,
951 .layerCount = 1,
952 };
953 static constexpr VkMemoryBarrier READ_BARRIER{
954 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
955 .pNext = nullptr,
956 .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT,
957 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
958 };
959 const std::array pre_barriers{
960 VkImageMemoryBarrier{
961 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
962 .pNext = nullptr,
963 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
964 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
965 VK_ACCESS_TRANSFER_WRITE_BIT,
966 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
967 .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
968 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
969 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
970 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
971 .image = dst_image,
972 .subresourceRange = range,
973 },
974 };
975 const std::array post_barriers{
976 VkImageMemoryBarrier{
977 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
978 .pNext = nullptr,
979 .srcAccessMask = 0,
980 .dstAccessMask = 0,
981 .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
982 .newLayout = VK_IMAGE_LAYOUT_GENERAL,
983 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
984 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
985 .image = dst_image,
986 .subresourceRange = range,
987 },
988 };
989 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
990 0, READ_BARRIER, {}, pre_barriers);
991 cmdbuf.CopyBufferToImage(src_buffer, dst_image, VK_IMAGE_LAYOUT_GENERAL, buffer_copy_info);
992 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
993 0, nullptr, nullptr, post_barriers);
994 });
995 if (is_rescaled) {
996 image->ScaleUp();
997 }
998 return true;
999}
1000
771void RasterizerVulkan::UpdateDynamicStates() { 1001void RasterizerVulkan::UpdateDynamicStates() {
772 auto& regs = maxwell3d->regs; 1002 auto& regs = maxwell3d->regs;
773 UpdateViewportsState(regs); 1003 UpdateViewportsState(regs);
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index a0508b57c..7746c5434 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -45,14 +45,23 @@ class StateTracker;
45 45
46class AccelerateDMA : public Tegra::Engines::AccelerateDMAInterface { 46class AccelerateDMA : public Tegra::Engines::AccelerateDMAInterface {
47public: 47public:
48 explicit AccelerateDMA(BufferCache& buffer_cache); 48 explicit AccelerateDMA(BufferCache& buffer_cache, TextureCache& texture_cache,
49 Scheduler& scheduler);
49 50
50 bool BufferCopy(GPUVAddr start_address, GPUVAddr end_address, u64 amount) override; 51 bool BufferCopy(GPUVAddr start_address, GPUVAddr end_address, u64 amount) override;
51 52
52 bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override; 53 bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override;
53 54
55 bool ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::ImageOperand& src,
56 const Tegra::DMA::BufferOperand& dst) override;
57
58 bool BufferToImage(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& src,
59 const Tegra::DMA::ImageOperand& dst) override;
60
54private: 61private:
55 BufferCache& buffer_cache; 62 BufferCache& buffer_cache;
63 TextureCache& texture_cache;
64 Scheduler& scheduler;
56}; 65};
57 66
58class RasterizerVulkan final : public VideoCore::RasterizerAccelerated, 67class RasterizerVulkan final : public VideoCore::RasterizerAccelerated,
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 80adb70eb..8a204f93f 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -864,13 +864,19 @@ void TextureCacheRuntime::ReinterpretImage(Image& dst, Image& src,
864 const VkImageAspectFlags src_aspect_mask = src.AspectMask(); 864 const VkImageAspectFlags src_aspect_mask = src.AspectMask();
865 const VkImageAspectFlags dst_aspect_mask = dst.AspectMask(); 865 const VkImageAspectFlags dst_aspect_mask = dst.AspectMask();
866 866
867 std::ranges::transform(copies, vk_in_copies.begin(), [src_aspect_mask](const auto& copy) { 867 const auto bpp_in = BytesPerBlock(src.info.format) / DefaultBlockWidth(src.info.format);
868 return MakeBufferImageCopy(copy, true, src_aspect_mask); 868 const auto bpp_out = BytesPerBlock(dst.info.format) / DefaultBlockWidth(dst.info.format);
869 }); 869 std::ranges::transform(copies, vk_in_copies.begin(),
870 [src_aspect_mask, bpp_in, bpp_out](const auto& copy) {
871 auto copy2 = copy;
872 copy2.src_offset.x = (bpp_out * copy.src_offset.x) / bpp_in;
873 copy2.extent.width = (bpp_out * copy.extent.width) / bpp_in;
874 return MakeBufferImageCopy(copy2, true, src_aspect_mask);
875 });
870 std::ranges::transform(copies, vk_out_copies.begin(), [dst_aspect_mask](const auto& copy) { 876 std::ranges::transform(copies, vk_out_copies.begin(), [dst_aspect_mask](const auto& copy) {
871 return MakeBufferImageCopy(copy, false, dst_aspect_mask); 877 return MakeBufferImageCopy(copy, false, dst_aspect_mask);
872 }); 878 });
873 const u32 img_bpp = BytesPerBlock(src.info.format); 879 const u32 img_bpp = BytesPerBlock(dst.info.format);
874 size_t total_size = 0; 880 size_t total_size = 0;
875 for (const auto& copy : copies) { 881 for (const auto& copy : copies) {
876 total_size += copy.extent.width * copy.extent.height * copy.extent.depth * img_bpp; 882 total_size += copy.extent.width * copy.extent.height * copy.extent.depth * img_bpp;
diff --git a/src/video_core/texture_cache/image_info.cpp b/src/video_core/texture_cache/image_info.cpp
index e9100091e..a1296b574 100644
--- a/src/video_core/texture_cache/image_info.cpp
+++ b/src/video_core/texture_cache/image_info.cpp
@@ -216,10 +216,51 @@ ImageInfo::ImageInfo(const Tegra::Engines::Fermi2D::Surface& config) noexcept {
216 .height = config.height, 216 .height = config.height,
217 .depth = 1, 217 .depth = 1,
218 }; 218 };
219 rescaleable = block.depth == 0; 219 rescaleable = block.depth == 0 && size.height > 256;
220 rescaleable &= size.height > 256;
221 downscaleable = size.height > 512; 220 downscaleable = size.height > 512;
222 } 221 }
223} 222}
224 223
224static PixelFormat ByteSizeToFormat(u32 bytes_per_pixel) {
225 switch (bytes_per_pixel) {
226 case 1:
227 return PixelFormat::R8_UINT;
228 case 2:
229 return PixelFormat::R8G8_UINT;
230 case 4:
231 return PixelFormat::A8B8G8R8_UINT;
232 case 8:
233 return PixelFormat::R16G16B16A16_UINT;
234 case 16:
235 return PixelFormat::R32G32B32A32_UINT;
236 default:
237 UNIMPLEMENTED();
238 return PixelFormat::Invalid;
239 }
240}
241
242ImageInfo::ImageInfo(const Tegra::DMA::ImageOperand& config) noexcept {
243 const u32 bytes_per_pixel = config.bytes_per_pixel;
244 format = ByteSizeToFormat(bytes_per_pixel);
245 type = config.params.block_size.depth > 0 ? ImageType::e3D : ImageType::e2D;
246 num_samples = 1;
247 block = Extent3D{
248 .width = config.params.block_size.width,
249 .height = config.params.block_size.height,
250 .depth = config.params.block_size.depth,
251 };
252 size = Extent3D{
253 .width = config.params.width,
254 .height = config.params.height,
255 .depth = config.params.depth,
256 };
257 tile_width_spacing = 0;
258 resources.levels = 1;
259 resources.layers = 1;
260 layer_stride = CalculateLayerStride(*this);
261 maybe_unaligned_layer_stride = CalculateLayerSize(*this);
262 rescaleable = block.depth == 0 && size.height > 256;
263 downscaleable = size.height > 512;
264}
265
225} // namespace VideoCommon 266} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/image_info.h b/src/video_core/texture_cache/image_info.h
index 93755e15e..a12f5b44f 100644
--- a/src/video_core/texture_cache/image_info.h
+++ b/src/video_core/texture_cache/image_info.h
@@ -5,6 +5,7 @@
5 5
6#include "video_core/engines/fermi_2d.h" 6#include "video_core/engines/fermi_2d.h"
7#include "video_core/engines/maxwell_3d.h" 7#include "video_core/engines/maxwell_3d.h"
8#include "video_core/engines/maxwell_dma.h"
8#include "video_core/surface.h" 9#include "video_core/surface.h"
9#include "video_core/texture_cache/types.h" 10#include "video_core/texture_cache/types.h"
10 11
@@ -19,6 +20,7 @@ struct ImageInfo {
19 explicit ImageInfo(const Tegra::Engines::Maxwell3D::Regs& regs, size_t index) noexcept; 20 explicit ImageInfo(const Tegra::Engines::Maxwell3D::Regs& regs, size_t index) noexcept;
20 explicit ImageInfo(const Tegra::Engines::Maxwell3D::Regs& regs) noexcept; 21 explicit ImageInfo(const Tegra::Engines::Maxwell3D::Regs& regs) noexcept;
21 explicit ImageInfo(const Tegra::Engines::Fermi2D::Surface& config) noexcept; 22 explicit ImageInfo(const Tegra::Engines::Fermi2D::Surface& config) noexcept;
23 explicit ImageInfo(const Tegra::DMA::ImageOperand& config) noexcept;
22 24
23 PixelFormat format = PixelFormat::Invalid; 25 PixelFormat format = PixelFormat::Invalid;
24 ImageType type = ImageType::e1D; 26 ImageType type = ImageType::e1D;
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 9dd152fbe..335338434 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -1359,6 +1359,75 @@ std::optional<typename TextureCache<P>::BlitImages> TextureCache<P>::GetBlitImag
1359} 1359}
1360 1360
1361template <class P> 1361template <class P>
1362ImageId TextureCache<P>::FindDMAImage(const ImageInfo& info, GPUVAddr gpu_addr) {
1363 std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
1364 if (!cpu_addr) {
1365 cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info));
1366 if (!cpu_addr) {
1367 return ImageId{};
1368 }
1369 }
1370 ImageId image_id{};
1371 boost::container::small_vector<ImageId, 1> image_ids;
1372 const auto lambda = [&](ImageId existing_image_id, ImageBase& existing_image) {
1373 if (True(existing_image.flags & ImageFlagBits::Remapped)) {
1374 return false;
1375 }
1376 if (info.type == ImageType::Linear || existing_image.info.type == ImageType::Linear)
1377 [[unlikely]] {
1378 const bool strict_size = True(existing_image.flags & ImageFlagBits::Strong);
1379 const ImageInfo& existing = existing_image.info;
1380 if (existing_image.gpu_addr == gpu_addr && existing.type == info.type &&
1381 existing.pitch == info.pitch &&
1382 IsPitchLinearSameSize(existing, info, strict_size) &&
1383 IsViewCompatible(existing.format, info.format, false, true)) {
1384 image_id = existing_image_id;
1385 image_ids.push_back(existing_image_id);
1386 return true;
1387 }
1388 } else if (IsSubCopy(info, existing_image, gpu_addr)) {
1389 image_id = existing_image_id;
1390 image_ids.push_back(existing_image_id);
1391 return true;
1392 }
1393 return false;
1394 };
1395 ForEachImageInRegion(*cpu_addr, CalculateGuestSizeInBytes(info), lambda);
1396 if (image_ids.size() <= 1) [[likely]] {
1397 return image_id;
1398 }
1399 auto image_ids_compare = [this](ImageId a, ImageId b) {
1400 auto& image_a = slot_images[a];
1401 auto& image_b = slot_images[b];
1402 return image_a.modification_tick < image_b.modification_tick;
1403 };
1404 return *std::ranges::max_element(image_ids, image_ids_compare);
1405}
1406
1407template <class P>
1408std::optional<std::pair<typename TextureCache<P>::Image*, std::pair<u32, u32>>>
1409TextureCache<P>::ObtainImage(const Tegra::DMA::ImageOperand& operand, bool mark_as_modified) {
1410 ImageInfo dst_info(operand);
1411 ImageId dst_id = FindDMAImage(dst_info, operand.address);
1412 if (!dst_id) {
1413 return std::nullopt;
1414 }
1415 auto& image = slot_images[dst_id];
1416 auto base = image.TryFindBase(operand.address);
1417 if (!base) {
1418 return std::nullopt;
1419 }
1420 if (False(image.flags & ImageFlagBits::GpuModified)) {
1421 // No need to waste time on an image that's synced with guest
1422 return std::nullopt;
1423 }
1424 PrepareImage(dst_id, mark_as_modified, false);
1425 auto& new_image = slot_images[dst_id];
1426 lru_cache.Touch(new_image.lru_index, frame_tick);
1427 return std::make_pair(&new_image, std::make_pair(base->level, base->layer));
1428}
1429
1430template <class P>
1362SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) { 1431SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) {
1363 if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) { 1432 if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) {
1364 return NULL_SAMPLER_ID; 1433 return NULL_SAMPLER_ID;
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 013836933..848a5d9ea 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -209,6 +209,9 @@ public:
209 /// Pop asynchronous downloads 209 /// Pop asynchronous downloads
210 void PopAsyncFlushes(); 210 void PopAsyncFlushes();
211 211
212 [[nodiscard]] std::optional<std::pair<Image*, std::pair<u32, u32>>> ObtainImage(
213 const Tegra::DMA::ImageOperand& operand, bool mark_as_modified);
214
212 /// Return true when a CPU region is modified from the GPU 215 /// Return true when a CPU region is modified from the GPU
213 [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size); 216 [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size);
214 217
@@ -300,6 +303,8 @@ private:
300 /// Remove joined images from the cache 303 /// Remove joined images from the cache
301 [[nodiscard]] ImageId JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VAddr cpu_addr); 304 [[nodiscard]] ImageId JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VAddr cpu_addr);
302 305
306 [[nodiscard]] ImageId FindDMAImage(const ImageInfo& info, GPUVAddr gpu_addr);
307
303 /// Return a blit image pair from the given guest blit parameters 308 /// Return a blit image pair from the given guest blit parameters
304 [[nodiscard]] std::optional<BlitImages> GetBlitImages( 309 [[nodiscard]] std::optional<BlitImages> GetBlitImages(
305 const Tegra::Engines::Fermi2D::Surface& dst, const Tegra::Engines::Fermi2D::Surface& src, 310 const Tegra::Engines::Fermi2D::Surface& dst, const Tegra::Engines::Fermi2D::Surface& src,
diff --git a/src/video_core/texture_cache/types.h b/src/video_core/texture_cache/types.h
index 0453456b4..a0e10643f 100644
--- a/src/video_core/texture_cache/types.h
+++ b/src/video_core/texture_cache/types.h
@@ -54,6 +54,7 @@ enum class RelaxedOptions : u32 {
54 Format = 1 << 1, 54 Format = 1 << 1,
55 Samples = 1 << 2, 55 Samples = 1 << 2,
56 ForceBrokenViews = 1 << 3, 56 ForceBrokenViews = 1 << 3,
57 FormatBpp = 1 << 4,
57}; 58};
58DECLARE_ENUM_FLAG_OPERATORS(RelaxedOptions) 59DECLARE_ENUM_FLAG_OPERATORS(RelaxedOptions)
59 60
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index 697f86641..de37db684 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -743,6 +743,44 @@ std::vector<ImageCopy> MakeShrinkImageCopies(const ImageInfo& dst, const ImageIn
743 return copies; 743 return copies;
744} 744}
745 745
746std::vector<ImageCopy> MakeReinterpretImageCopies(const ImageInfo& src, u32 up_scale,
747 u32 down_shift) {
748 std::vector<ImageCopy> copies;
749 copies.reserve(src.resources.levels);
750 const bool is_3d = src.type == ImageType::e3D;
751 for (s32 level = 0; level < src.resources.levels; ++level) {
752 ImageCopy& copy = copies.emplace_back();
753 copy.src_subresource = SubresourceLayers{
754 .base_level = level,
755 .base_layer = 0,
756 .num_layers = src.resources.layers,
757 };
758 copy.dst_subresource = SubresourceLayers{
759 .base_level = level,
760 .base_layer = 0,
761 .num_layers = src.resources.layers,
762 };
763 copy.src_offset = Offset3D{
764 .x = 0,
765 .y = 0,
766 .z = 0,
767 };
768 copy.dst_offset = Offset3D{
769 .x = 0,
770 .y = 0,
771 .z = 0,
772 };
773 const Extent3D mip_size = AdjustMipSize(src.size, level);
774 copy.extent = AdjustSamplesSize(mip_size, src.num_samples);
775 if (is_3d) {
776 copy.extent.depth = src.size.depth;
777 }
778 copy.extent.width = std::max<u32>((copy.extent.width * up_scale) >> down_shift, 1);
779 copy.extent.height = std::max<u32>((copy.extent.height * up_scale) >> down_shift, 1);
780 }
781 return copies;
782}
783
746bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config) { 784bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config) {
747 const GPUVAddr address = config.Address(); 785 const GPUVAddr address = config.Address();
748 if (address == 0) { 786 if (address == 0) {
@@ -999,6 +1037,20 @@ bool IsBlockLinearSizeCompatible(const ImageInfo& lhs, const ImageInfo& rhs, u32
999 } 1037 }
1000} 1038}
1001 1039
1040bool IsBlockLinearSizeCompatibleBPPRelaxed(const ImageInfo& lhs, const ImageInfo& rhs,
1041 u32 lhs_level, u32 rhs_level) noexcept {
1042 ASSERT(lhs.type != ImageType::Linear);
1043 ASSERT(rhs.type != ImageType::Linear);
1044 const auto lhs_bpp = BytesPerBlock(lhs.format);
1045 const auto rhs_bpp = BytesPerBlock(rhs.format);
1046 const Extent3D lhs_size = AdjustMipSize(lhs.size, lhs_level);
1047 const Extent3D rhs_size = AdjustMipSize(rhs.size, rhs_level);
1048 return Common::AlignUpLog2(lhs_size.width * lhs_bpp, GOB_SIZE_X_SHIFT) ==
1049 Common::AlignUpLog2(rhs_size.width * rhs_bpp, GOB_SIZE_X_SHIFT) &&
1050 Common::AlignUpLog2(lhs_size.height, GOB_SIZE_Y_SHIFT) ==
1051 Common::AlignUpLog2(rhs_size.height, GOB_SIZE_Y_SHIFT);
1052}
1053
1002bool IsPitchLinearSameSize(const ImageInfo& lhs, const ImageInfo& rhs, bool strict_size) noexcept { 1054bool IsPitchLinearSameSize(const ImageInfo& lhs, const ImageInfo& rhs, bool strict_size) noexcept {
1003 ASSERT(lhs.type == ImageType::Linear); 1055 ASSERT(lhs.type == ImageType::Linear);
1004 ASSERT(rhs.type == ImageType::Linear); 1056 ASSERT(rhs.type == ImageType::Linear);
@@ -1073,7 +1125,8 @@ std::optional<SubresourceBase> FindSubresource(const ImageInfo& candidate, const
1073 // Format checking is relaxed, but we still have to check for matching bytes per block. 1125 // Format checking is relaxed, but we still have to check for matching bytes per block.
1074 // This avoids creating a view for blits on UE4 titles where formats with different bytes 1126 // This avoids creating a view for blits on UE4 titles where formats with different bytes
1075 // per block are aliased. 1127 // per block are aliased.
1076 if (BytesPerBlock(existing.format) != BytesPerBlock(candidate.format)) { 1128 if (BytesPerBlock(existing.format) != BytesPerBlock(candidate.format) &&
1129 False(options & RelaxedOptions::FormatBpp)) {
1077 return std::nullopt; 1130 return std::nullopt;
1078 } 1131 }
1079 } else { 1132 } else {
@@ -1088,10 +1141,8 @@ std::optional<SubresourceBase> FindSubresource(const ImageInfo& candidate, const
1088 if (existing.type != candidate.type) { 1141 if (existing.type != candidate.type) {
1089 return std::nullopt; 1142 return std::nullopt;
1090 } 1143 }
1091 if (False(options & RelaxedOptions::Samples)) { 1144 if (False(options & RelaxedOptions::Samples) && existing.num_samples != candidate.num_samples) {
1092 if (existing.num_samples != candidate.num_samples) { 1145 return std::nullopt;
1093 return std::nullopt;
1094 }
1095 } 1146 }
1096 if (existing.resources.levels < candidate.resources.levels + base->level) { 1147 if (existing.resources.levels < candidate.resources.levels + base->level) {
1097 return std::nullopt; 1148 return std::nullopt;
@@ -1101,14 +1152,16 @@ std::optional<SubresourceBase> FindSubresource(const ImageInfo& candidate, const
1101 if (mip_depth < candidate.size.depth + base->layer) { 1152 if (mip_depth < candidate.size.depth + base->layer) {
1102 return std::nullopt; 1153 return std::nullopt;
1103 } 1154 }
1104 } else { 1155 } else if (existing.resources.layers < candidate.resources.layers + base->layer) {
1105 if (existing.resources.layers < candidate.resources.layers + base->layer) { 1156 return std::nullopt;
1106 return std::nullopt;
1107 }
1108 } 1157 }
1109 const bool strict_size = False(options & RelaxedOptions::Size); 1158 const bool strict_size = False(options & RelaxedOptions::Size);
1110 if (!IsBlockLinearSizeCompatible(existing, candidate, base->level, 0, strict_size)) { 1159 if (!IsBlockLinearSizeCompatible(existing, candidate, base->level, 0, strict_size)) {
1111 return std::nullopt; 1160 if (False(options & RelaxedOptions::FormatBpp)) {
1161 return std::nullopt;
1162 } else if (!IsBlockLinearSizeCompatibleBPPRelaxed(existing, candidate, base->level, 0)) {
1163 return std::nullopt;
1164 }
1112 } 1165 }
1113 // TODO: compare block sizes 1166 // TODO: compare block sizes
1114 return base; 1167 return base;
@@ -1120,6 +1173,31 @@ bool IsSubresource(const ImageInfo& candidate, const ImageBase& image, GPUVAddr
1120 .has_value(); 1173 .has_value();
1121} 1174}
1122 1175
1176bool IsSubCopy(const ImageInfo& candidate, const ImageBase& image, GPUVAddr candidate_addr) {
1177 const std::optional<SubresourceBase> base = image.TryFindBase(candidate_addr);
1178 if (!base) {
1179 return false;
1180 }
1181 const ImageInfo& existing = image.info;
1182 if (existing.resources.levels < candidate.resources.levels + base->level) {
1183 return false;
1184 }
1185 if (existing.type == ImageType::e3D) {
1186 const u32 mip_depth = std::max(1U, existing.size.depth << base->level);
1187 if (mip_depth < candidate.size.depth + base->layer) {
1188 return false;
1189 }
1190 } else {
1191 if (existing.resources.layers < candidate.resources.layers + base->layer) {
1192 return false;
1193 }
1194 }
1195 if (!IsBlockLinearSizeCompatibleBPPRelaxed(existing, candidate, base->level, 0)) {
1196 return false;
1197 }
1198 return true;
1199}
1200
1123void DeduceBlitImages(ImageInfo& dst_info, ImageInfo& src_info, const ImageBase* dst, 1201void DeduceBlitImages(ImageInfo& dst_info, ImageInfo& src_info, const ImageBase* dst,
1124 const ImageBase* src) { 1202 const ImageBase* src) {
1125 const auto original_dst_format = dst_info.format; 1203 const auto original_dst_format = dst_info.format;
diff --git a/src/video_core/texture_cache/util.h b/src/video_core/texture_cache/util.h
index d103db8ae..84aa6880d 100644
--- a/src/video_core/texture_cache/util.h
+++ b/src/video_core/texture_cache/util.h
@@ -56,6 +56,10 @@ struct OverlapResult {
56 SubresourceBase base, u32 up_scale = 1, 56 SubresourceBase base, u32 up_scale = 1,
57 u32 down_shift = 0); 57 u32 down_shift = 0);
58 58
59[[nodiscard]] std::vector<ImageCopy> MakeReinterpretImageCopies(const ImageInfo& src,
60 u32 up_scale = 1,
61 u32 down_shift = 0);
62
59[[nodiscard]] bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config); 63[[nodiscard]] bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config);
60 64
61[[nodiscard]] std::vector<BufferImageCopy> UnswizzleImage(Tegra::MemoryManager& gpu_memory, 65[[nodiscard]] std::vector<BufferImageCopy> UnswizzleImage(Tegra::MemoryManager& gpu_memory,
@@ -88,6 +92,9 @@ void SwizzleImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr, const Ima
88[[nodiscard]] bool IsPitchLinearSameSize(const ImageInfo& lhs, const ImageInfo& rhs, 92[[nodiscard]] bool IsPitchLinearSameSize(const ImageInfo& lhs, const ImageInfo& rhs,
89 bool strict_size) noexcept; 93 bool strict_size) noexcept;
90 94
95[[nodiscard]] bool IsBlockLinearSizeCompatibleBPPRelaxed(const ImageInfo& lhs, const ImageInfo& rhs,
96 u32 lhs_level, u32 rhs_level) noexcept;
97
91[[nodiscard]] std::optional<OverlapResult> ResolveOverlap(const ImageInfo& new_info, 98[[nodiscard]] std::optional<OverlapResult> ResolveOverlap(const ImageInfo& new_info,
92 GPUVAddr gpu_addr, VAddr cpu_addr, 99 GPUVAddr gpu_addr, VAddr cpu_addr,
93 const ImageBase& overlap, 100 const ImageBase& overlap,
@@ -106,6 +113,9 @@ void SwizzleImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr, const Ima
106 GPUVAddr candidate_addr, RelaxedOptions options, bool broken_views, 113 GPUVAddr candidate_addr, RelaxedOptions options, bool broken_views,
107 bool native_bgr); 114 bool native_bgr);
108 115
116[[nodiscard]] bool IsSubCopy(const ImageInfo& candidate, const ImageBase& image,
117 GPUVAddr candidate_addr);
118
109void DeduceBlitImages(ImageInfo& dst_info, ImageInfo& src_info, const ImageBase* dst, 119void DeduceBlitImages(ImageInfo& dst_info, ImageInfo& src_info, const ImageBase* dst,
110 const ImageBase* src); 120 const ImageBase* src);
111 121