summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/video_core/engines/fermi_2d.cpp9
-rw-r--r--src/video_core/engines/kepler_memory.cpp11
-rw-r--r--src/video_core/engines/kepler_memory.h7
-rw-r--r--src/video_core/engines/maxwell_dma.cpp73
-rw-r--r--src/video_core/engines/maxwell_dma.h8
-rw-r--r--src/video_core/gpu.cpp4
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp6
-rw-r--r--src/video_core/textures/decoders.cpp40
-rw-r--r--src/video_core/textures/decoders.h9
9 files changed, 144 insertions, 23 deletions
diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp
index 597b279b9..74e44c7fe 100644
--- a/src/video_core/engines/fermi_2d.cpp
+++ b/src/video_core/engines/fermi_2d.cpp
@@ -47,9 +47,12 @@ void Fermi2D::HandleSurfaceCopy() {
47 u32 dst_bytes_per_pixel = RenderTargetBytesPerPixel(regs.dst.format); 47 u32 dst_bytes_per_pixel = RenderTargetBytesPerPixel(regs.dst.format);
48 48
49 if (!rasterizer.AccelerateSurfaceCopy(regs.src, regs.dst)) { 49 if (!rasterizer.AccelerateSurfaceCopy(regs.src, regs.dst)) {
50 // TODO(bunnei): The below implementation currently will not get hit, as 50 rasterizer.FlushRegion(source_cpu, src_bytes_per_pixel * regs.src.width * regs.src.height);
51 // AccelerateSurfaceCopy tries to always copy and will always return success. This should be 51 // We have to invalidate the destination region to evict any outdated surfaces from the
52 // changed once we properly support flushing. 52 // cache. We do this before actually writing the new data because the destination address
53 // might contain a dirty surface that will have to be written back to memory.
54 rasterizer.InvalidateRegion(dest_cpu,
55 dst_bytes_per_pixel * regs.dst.width * regs.dst.height);
53 56
54 if (regs.src.linear == regs.dst.linear) { 57 if (regs.src.linear == regs.dst.linear) {
55 // If the input layout and the output layout are the same, just perform a raw copy. 58 // If the input layout and the output layout are the same, just perform a raw copy.
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index 66ae6332d..585290d9f 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -5,10 +5,14 @@
5#include "common/logging/log.h" 5#include "common/logging/log.h"
6#include "core/memory.h" 6#include "core/memory.h"
7#include "video_core/engines/kepler_memory.h" 7#include "video_core/engines/kepler_memory.h"
8#include "video_core/rasterizer_interface.h"
8 9
9namespace Tegra::Engines { 10namespace Tegra::Engines {
10 11
11KeplerMemory::KeplerMemory(MemoryManager& memory_manager) : memory_manager(memory_manager) {} 12KeplerMemory::KeplerMemory(VideoCore::RasterizerInterface& rasterizer,
13 MemoryManager& memory_manager)
14 : memory_manager(memory_manager), rasterizer{rasterizer} {}
15
12KeplerMemory::~KeplerMemory() = default; 16KeplerMemory::~KeplerMemory() = default;
13 17
14void KeplerMemory::WriteReg(u32 method, u32 value) { 18void KeplerMemory::WriteReg(u32 method, u32 value) {
@@ -37,6 +41,11 @@ void KeplerMemory::ProcessData(u32 data) {
37 VAddr dest_address = 41 VAddr dest_address =
38 *memory_manager.GpuToCpuAddress(address + state.write_offset * sizeof(u32)); 42 *memory_manager.GpuToCpuAddress(address + state.write_offset * sizeof(u32));
39 43
44 // We have to invalidate the destination region to evict any outdated surfaces from the cache.
45 // We do this before actually writing the new data because the destination address might contain
46 // a dirty surface that will have to be written back to memory.
47 rasterizer.InvalidateRegion(dest_address, sizeof(u32));
48
40 Memory::Write32(dest_address, data); 49 Memory::Write32(dest_address, data);
41 50
42 state.write_offset++; 51 state.write_offset++;
diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h
index b0d0078cf..bf4a13cff 100644
--- a/src/video_core/engines/kepler_memory.h
+++ b/src/video_core/engines/kepler_memory.h
@@ -11,6 +11,10 @@
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "video_core/memory_manager.h" 12#include "video_core/memory_manager.h"
13 13
14namespace VideoCore {
15class RasterizerInterface;
16}
17
14namespace Tegra::Engines { 18namespace Tegra::Engines {
15 19
16#define KEPLERMEMORY_REG_INDEX(field_name) \ 20#define KEPLERMEMORY_REG_INDEX(field_name) \
@@ -18,7 +22,7 @@ namespace Tegra::Engines {
18 22
19class KeplerMemory final { 23class KeplerMemory final {
20public: 24public:
21 KeplerMemory(MemoryManager& memory_manager); 25 KeplerMemory(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager);
22 ~KeplerMemory(); 26 ~KeplerMemory();
23 27
24 /// Write the value to the register identified by method. 28 /// Write the value to the register identified by method.
@@ -72,6 +76,7 @@ public:
72 76
73private: 77private:
74 MemoryManager& memory_manager; 78 MemoryManager& memory_manager;
79 VideoCore::RasterizerInterface& rasterizer;
75 80
76 void ProcessData(u32 data); 81 void ProcessData(u32 data);
77}; 82};
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index bf2a21bb6..103cd110e 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -4,12 +4,14 @@
4 4
5#include "core/memory.h" 5#include "core/memory.h"
6#include "video_core/engines/maxwell_dma.h" 6#include "video_core/engines/maxwell_dma.h"
7#include "video_core/rasterizer_interface.h"
7#include "video_core/textures/decoders.h" 8#include "video_core/textures/decoders.h"
8 9
9namespace Tegra { 10namespace Tegra {
10namespace Engines { 11namespace Engines {
11 12
12MaxwellDMA::MaxwellDMA(MemoryManager& memory_manager) : memory_manager(memory_manager) {} 13MaxwellDMA::MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager)
14 : memory_manager(memory_manager), rasterizer{rasterizer} {}
13 15
14void MaxwellDMA::WriteReg(u32 method, u32 value) { 16void MaxwellDMA::WriteReg(u32 method, u32 value) {
15 ASSERT_MSG(method < Regs::NUM_REGS, 17 ASSERT_MSG(method < Regs::NUM_REGS,
@@ -44,38 +46,79 @@ void MaxwellDMA::HandleCopy() {
44 ASSERT(regs.exec.query_mode == Regs::QueryMode::None); 46 ASSERT(regs.exec.query_mode == Regs::QueryMode::None);
45 ASSERT(regs.exec.query_intr == Regs::QueryIntr::None); 47 ASSERT(regs.exec.query_intr == Regs::QueryIntr::None);
46 ASSERT(regs.exec.copy_mode == Regs::CopyMode::Unk2); 48 ASSERT(regs.exec.copy_mode == Regs::CopyMode::Unk2);
47 ASSERT(regs.src_params.pos_x == 0);
48 ASSERT(regs.src_params.pos_y == 0);
49 ASSERT(regs.dst_params.pos_x == 0); 49 ASSERT(regs.dst_params.pos_x == 0);
50 ASSERT(regs.dst_params.pos_y == 0); 50 ASSERT(regs.dst_params.pos_y == 0);
51 51
52 if (regs.exec.is_dst_linear == regs.exec.is_src_linear) { 52 if (!regs.exec.is_dst_linear && !regs.exec.is_src_linear) {
53 std::size_t copy_size = regs.x_count; 53 // If both the source and the destination are in block layout, assert.
54 UNREACHABLE_MSG("Tiled->Tiled DMA transfers are not yet implemented");
55 return;
56 }
54 57
58 if (regs.exec.is_dst_linear && regs.exec.is_src_linear) {
55 // When the enable_2d bit is disabled, the copy is performed as if we were copying a 1D 59 // When the enable_2d bit is disabled, the copy is performed as if we were copying a 1D
56 // buffer of length `x_count`, otherwise we copy a 2D buffer of size (x_count, y_count). 60 // buffer of length `x_count`, otherwise we copy a 2D image of dimensions (x_count,
57 if (regs.exec.enable_2d) { 61 // y_count).
58 copy_size = copy_size * regs.y_count; 62 if (!regs.exec.enable_2d) {
63 Memory::CopyBlock(dest_cpu, source_cpu, regs.x_count);
64 return;
59 } 65 }
60 66
61 Memory::CopyBlock(dest_cpu, source_cpu, copy_size); 67 // If both the source and the destination are in linear layout, perform a line-by-line
68 // copy. We're going to take a subrect of size (x_count, y_count) from the source
69 // rectangle. There is no need to manually flush/invalidate the regions because
70 // CopyBlock does that for us.
71 for (u32 line = 0; line < regs.y_count; ++line) {
72 const VAddr source_line = source_cpu + line * regs.src_pitch;
73 const VAddr dest_line = dest_cpu + line * regs.dst_pitch;
74 Memory::CopyBlock(dest_line, source_line, regs.x_count);
75 }
62 return; 76 return;
63 } 77 }
64 78
65 ASSERT(regs.exec.enable_2d == 1); 79 ASSERT(regs.exec.enable_2d == 1);
80
81 std::size_t copy_size = regs.x_count * regs.y_count;
82
83 const auto FlushAndInvalidate = [&](u32 src_size, u32 dst_size) {
84 // TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated
85 // copying.
86 rasterizer.FlushRegion(source_cpu, src_size);
87
88 // We have to invalidate the destination region to evict any outdated surfaces from the
89 // cache. We do this before actually writing the new data because the destination address
90 // might contain a dirty surface that will have to be written back to memory.
91 rasterizer.InvalidateRegion(dest_cpu, dst_size);
92 };
93
66 u8* src_buffer = Memory::GetPointer(source_cpu); 94 u8* src_buffer = Memory::GetPointer(source_cpu);
67 u8* dst_buffer = Memory::GetPointer(dest_cpu); 95 u8* dst_buffer = Memory::GetPointer(dest_cpu);
68 96
69 if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) { 97 if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) {
98 ASSERT(regs.src_params.size_z == 1);
70 // If the input is tiled and the output is linear, deswizzle the input and copy it over. 99 // If the input is tiled and the output is linear, deswizzle the input and copy it over.
71 Texture::CopySwizzledData(regs.src_params.size_x, regs.src_params.size_y, 100
72 regs.src_params.size_z, 1, 1, src_buffer, dst_buffer, true, 101 u32 src_bytes_per_pixel = regs.src_pitch / regs.src_params.size_x;
73 regs.src_params.BlockHeight(), regs.src_params.BlockDepth()); 102
103 FlushAndInvalidate(regs.src_pitch * regs.src_params.size_y,
104 copy_size * src_bytes_per_pixel);
105
106 Texture::UnswizzleSubrect(regs.x_count, regs.y_count, regs.dst_pitch,
107 regs.src_params.size_x, src_bytes_per_pixel, source_cpu, dest_cpu,
108 regs.src_params.BlockHeight(), regs.src_params.pos_x,
109 regs.src_params.pos_y);
74 } else { 110 } else {
111 ASSERT(regs.dst_params.size_z == 1);
112 ASSERT(regs.src_pitch == regs.x_count);
113
114 u32 src_bpp = regs.src_pitch / regs.x_count;
115
116 FlushAndInvalidate(regs.src_pitch * regs.y_count,
117 regs.dst_params.size_x * regs.dst_params.size_y * src_bpp);
118
75 // If the input is linear and the output is tiled, swizzle the input and copy it over. 119 // If the input is linear and the output is tiled, swizzle the input and copy it over.
76 Texture::CopySwizzledData(regs.dst_params.size_x, regs.dst_params.size_y, 120 Texture::SwizzleSubrect(regs.x_count, regs.y_count, regs.src_pitch, regs.dst_params.size_x,
77 regs.dst_params.size_z, 1, 1, dst_buffer, src_buffer, false, 121 src_bpp, dest_cpu, source_cpu, regs.dst_params.BlockHeight());
78 regs.dst_params.BlockHeight(), regs.dst_params.BlockDepth());
79 } 122 }
80} 123}
81 124
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index df19e02e2..5f3704f05 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -12,11 +12,15 @@
12#include "video_core/gpu.h" 12#include "video_core/gpu.h"
13#include "video_core/memory_manager.h" 13#include "video_core/memory_manager.h"
14 14
15namespace VideoCore {
16class RasterizerInterface;
17}
18
15namespace Tegra::Engines { 19namespace Tegra::Engines {
16 20
17class MaxwellDMA final { 21class MaxwellDMA final {
18public: 22public:
19 explicit MaxwellDMA(MemoryManager& memory_manager); 23 explicit MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager);
20 ~MaxwellDMA() = default; 24 ~MaxwellDMA() = default;
21 25
22 /// Write the value to the register identified by method. 26 /// Write the value to the register identified by method.
@@ -133,6 +137,8 @@ public:
133 MemoryManager& memory_manager; 137 MemoryManager& memory_manager;
134 138
135private: 139private:
140 VideoCore::RasterizerInterface& rasterizer;
141
136 /// Performs the copy from the source buffer to the destination buffer as configured in the 142 /// Performs the copy from the source buffer to the destination buffer as configured in the
137 /// registers. 143 /// registers.
138 void HandleCopy(); 144 void HandleCopy();
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 9ba7e3533..83c7e5b0b 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -27,8 +27,8 @@ GPU::GPU(VideoCore::RasterizerInterface& rasterizer) {
27 maxwell_3d = std::make_unique<Engines::Maxwell3D>(rasterizer, *memory_manager); 27 maxwell_3d = std::make_unique<Engines::Maxwell3D>(rasterizer, *memory_manager);
28 fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer, *memory_manager); 28 fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer, *memory_manager);
29 maxwell_compute = std::make_unique<Engines::MaxwellCompute>(); 29 maxwell_compute = std::make_unique<Engines::MaxwellCompute>();
30 maxwell_dma = std::make_unique<Engines::MaxwellDMA>(*memory_manager); 30 maxwell_dma = std::make_unique<Engines::MaxwellDMA>(rasterizer, *memory_manager);
31 kepler_memory = std::make_unique<Engines::KeplerMemory>(*memory_manager); 31 kepler_memory = std::make_unique<Engines::KeplerMemory>(rasterizer, *memory_manager);
32} 32}
33 33
34GPU::~GPU() = default; 34GPU::~GPU() = default;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 468253033..3daccf82f 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -659,6 +659,12 @@ void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
659bool RasterizerOpenGL::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, 659bool RasterizerOpenGL::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
660 const Tegra::Engines::Fermi2D::Regs::Surface& dst) { 660 const Tegra::Engines::Fermi2D::Regs::Surface& dst) {
661 MICROPROFILE_SCOPE(OpenGL_Blits); 661 MICROPROFILE_SCOPE(OpenGL_Blits);
662
663 if (Settings::values.use_accurate_gpu_emulation) {
664 // Skip the accelerated copy and perform a slow but more accurate copy
665 return false;
666 }
667
662 res_cache.FermiCopySurface(src, dst); 668 res_cache.FermiCopySurface(src, dst);
663 return true; 669 return true;
664} 670}
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 18ab723f7..f1b40e7f5 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -237,6 +237,46 @@ std::vector<u8> UnswizzleTexture(VAddr address, u32 tile_size, u32 bytes_per_pix
237 return unswizzled_data; 237 return unswizzled_data;
238} 238}
239 239
240void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
241 u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data,
242 u32 block_height) {
243 const u32 image_width_in_gobs{(swizzled_width * bytes_per_pixel + 63) / 64};
244 for (u32 line = 0; line < subrect_height; ++line) {
245 const u32 gob_address_y =
246 (line / (8 * block_height)) * 512 * block_height * image_width_in_gobs +
247 (line % (8 * block_height) / 8) * 512;
248 const auto& table = legacy_swizzle_table[line % 8];
249 for (u32 x = 0; x < subrect_width; ++x) {
250 const u32 gob_address = gob_address_y + (x * bytes_per_pixel / 64) * 512 * block_height;
251 const u32 swizzled_offset = gob_address + table[(x * bytes_per_pixel) % 64];
252 const VAddr source_line = unswizzled_data + line * source_pitch + x * bytes_per_pixel;
253 const VAddr dest_addr = swizzled_data + swizzled_offset;
254
255 Memory::CopyBlock(dest_addr, source_line, bytes_per_pixel);
256 }
257 }
258}
259
260void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width,
261 u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data,
262 u32 block_height, u32 offset_x, u32 offset_y) {
263 for (u32 line = 0; line < subrect_height; ++line) {
264 const u32 y2 = line + offset_y;
265 const u32 gob_address_y =
266 (y2 / (8 * block_height)) * 512 * block_height + (y2 % (8 * block_height) / 8) * 512;
267 const auto& table = legacy_swizzle_table[y2 % 8];
268 for (u32 x = 0; x < subrect_width; ++x) {
269 const u32 x2 = (x + offset_x) * bytes_per_pixel;
270 const u32 gob_address = gob_address_y + (x2 / 64) * 512 * block_height;
271 const u32 swizzled_offset = gob_address + table[x2 % 64];
272 const VAddr dest_line = unswizzled_data + line * dest_pitch + x * bytes_per_pixel;
273 const VAddr source_addr = swizzled_data + swizzled_offset;
274
275 Memory::CopyBlock(dest_line, source_addr, bytes_per_pixel);
276 }
277 }
278}
279
240std::vector<u8> DecodeTexture(const std::vector<u8>& texture_data, TextureFormat format, u32 width, 280std::vector<u8> DecodeTexture(const std::vector<u8>& texture_data, TextureFormat format, u32 width,
241 u32 height) { 281 u32 height) {
242 std::vector<u8> rgba_data; 282 std::vector<u8> rgba_data;
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h
index aaf316947..4726f54a5 100644
--- a/src/video_core/textures/decoders.h
+++ b/src/video_core/textures/decoders.h
@@ -35,4 +35,13 @@ std::vector<u8> DecodeTexture(const std::vector<u8>& texture_data, TextureFormat
35std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, 35std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
36 u32 block_height, u32 block_depth); 36 u32 block_height, u32 block_depth);
37 37
38/// Copies an untiled subrectangle into a tiled surface.
39void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
40 u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data,
41 u32 block_height);
42/// Copies a tiled subrectangle into a linear surface.
43void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width,
44 u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data,
45 u32 block_height, u32 offset_x, u32 offset_y);
46
38} // namespace Tegra::Texture 47} // namespace Tegra::Texture