summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/video_core/engines/maxwell_dma.cpp127
-rw-r--r--src/video_core/engines/maxwell_dma.h2
2 files changed, 73 insertions, 56 deletions
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 3909d36c1..4eb7a100d 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -56,66 +56,85 @@ void MaxwellDMA::Launch() {
56 ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); 56 ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
57 ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); 57 ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
58 58
59 const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; 59 if (launch.multi_line_enable) {
60 const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH; 60 const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
61 61 const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
62 if (!is_src_pitch && !is_dst_pitch) { 62
63 // If both the source and the destination are in block layout, assert. 63 if (!is_src_pitch && !is_dst_pitch) {
64 UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented"); 64 // If both the source and the destination are in block layout, assert.
65 return; 65 UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented");
66 } 66 return;
67 }
67 68
68 if (is_src_pitch && is_dst_pitch) { 69 if (is_src_pitch && is_dst_pitch) {
69 CopyPitchToPitch(); 70 for (u32 line = 0; line < regs.line_count; ++line) {
71 const GPUVAddr source_line =
72 regs.offset_in + static_cast<size_t>(line) * regs.pitch_in;
73 const GPUVAddr dest_line =
74 regs.offset_out + static_cast<size_t>(line) * regs.pitch_out;
75 memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in);
76 }
77 } else {
78 if (!is_src_pitch && is_dst_pitch) {
79 CopyBlockLinearToPitch();
80 } else {
81 CopyPitchToBlockLinear();
82 }
83 }
70 } else { 84 } else {
71 ASSERT(launch.multi_line_enable == 1); 85 // TODO: allow multisized components.
72 86 auto& accelerate = rasterizer->AccessAccelerateDMA();
73 if (!is_src_pitch && is_dst_pitch) { 87 const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A;
74 CopyBlockLinearToPitch(); 88 if (regs.launch_dma.remap_enable != 0 && is_const_a_dst) {
89 ASSERT(regs.remap_const.component_size_minus_one == 3);
90 accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value);
91 std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value);
92 memory_manager.WriteBlockUnsafe(regs.offset_out,
93 reinterpret_cast<u8*>(tmp_buffer.data()),
94 regs.line_length_in * sizeof(u32));
75 } else { 95 } else {
76 CopyPitchToBlockLinear(); 96 auto convert_linear_2_blocklinear_addr = [](u64 address) {
97 return (address & ~0x1f0ULL) | ((address & 0x40) >> 2) | ((address & 0x10) << 1) |
98 ((address & 0x180) >> 1) | ((address & 0x20) << 3);
99 };
100 auto src_kind = memory_manager.GetPageKind(regs.offset_in);
101 auto dst_kind = memory_manager.GetPageKind(regs.offset_out);
102 const bool is_src_pitch = IsPitchKind(static_cast<PTEKind>(src_kind));
103 const bool is_dst_pitch = IsPitchKind(static_cast<PTEKind>(dst_kind));
104 if (!is_src_pitch && is_dst_pitch) {
105 std::vector<u8> tmp_buffer(regs.line_length_in);
106 std::vector<u8> dst_buffer(regs.line_length_in);
107 memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
108 regs.line_length_in);
109 for (u32 offset = 0; offset < regs.line_length_in; ++offset) {
110 dst_buffer[offset] =
111 tmp_buffer[convert_linear_2_blocklinear_addr(regs.offset_in + offset) -
112 regs.offset_in];
113 }
114 memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in);
115 } else if (is_src_pitch && !is_dst_pitch) {
116 std::vector<u8> tmp_buffer(regs.line_length_in);
117 std::vector<u8> dst_buffer(regs.line_length_in);
118 memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
119 regs.line_length_in);
120 for (u32 offset = 0; offset < regs.line_length_in; ++offset) {
121 dst_buffer[convert_linear_2_blocklinear_addr(regs.offset_out + offset) -
122 regs.offset_out] = tmp_buffer[offset];
123 }
124 memory_manager.WriteBlock(regs.offset_out, dst_buffer.data(), regs.line_length_in);
125 } else {
126 if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
127 std::vector<u8> tmp_buffer(regs.line_length_in);
128 memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(),
129 regs.line_length_in);
130 memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(),
131 regs.line_length_in);
132 }
133 }
77 } 134 }
78 } 135 }
79 ReleaseSemaphore();
80}
81 136
82void MaxwellDMA::CopyPitchToPitch() { 137 ReleaseSemaphore();
83 // When `multi_line_enable` bit is enabled we copy a 2D image of dimensions
84 // (line_length_in, line_count).
85 // Otherwise the copy is performed as if we were copying a 1D buffer of length line_length_in.
86 const bool remap_enabled = regs.launch_dma.remap_enable != 0;
87 if (regs.launch_dma.multi_line_enable) {
88 UNIMPLEMENTED_IF(remap_enabled);
89
90 // Perform a line-by-line copy.
91 // We're going to take a subrect of size (line_length_in, line_count) from the source
92 // rectangle. There is no need to manually flush/invalidate the regions because CopyBlock
93 // does that for us.
94 for (u32 line = 0; line < regs.line_count; ++line) {
95 const GPUVAddr source_line = regs.offset_in + static_cast<size_t>(line) * regs.pitch_in;
96 const GPUVAddr dest_line = regs.offset_out + static_cast<size_t>(line) * regs.pitch_out;
97 memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in);
98 }
99 return;
100 }
101 // TODO: allow multisized components.
102 auto& accelerate = rasterizer->AccessAccelerateDMA();
103 const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A;
104 const bool is_buffer_clear = remap_enabled && is_const_a_dst;
105 if (is_buffer_clear) {
106 ASSERT(regs.remap_const.component_size_minus_one == 3);
107 accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value);
108 std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value);
109 memory_manager.WriteBlockUnsafe(regs.offset_out, reinterpret_cast<u8*>(tmp_buffer.data()),
110 regs.line_length_in * sizeof(u32));
111 return;
112 }
113 UNIMPLEMENTED_IF(remap_enabled);
114 if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
115 std::vector<u8> tmp_buffer(regs.line_length_in);
116 memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), regs.line_length_in);
117 memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(), regs.line_length_in);
118 }
119} 138}
120 139
121void MaxwellDMA::CopyBlockLinearToPitch() { 140void MaxwellDMA::CopyBlockLinearToPitch() {
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index bc48320ce..953e34adc 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -219,8 +219,6 @@ private:
219 /// registers. 219 /// registers.
220 void Launch(); 220 void Launch();
221 221
222 void CopyPitchToPitch();
223
224 void CopyBlockLinearToPitch(); 222 void CopyBlockLinearToPitch();
225 223
226 void CopyPitchToBlockLinear(); 224 void CopyPitchToBlockLinear();