diff options
| author | 2020-04-20 02:16:56 -0400 | |
|---|---|---|
| committer | 2020-04-23 08:52:55 -0400 | |
| commit | 3fedcc2f6e001f0ed1fd791de4f9692570359eef (patch) | |
| tree | 49109516beab33d825cc653d4e885107304da332 /src/video_core/dma_pusher.cpp | |
| parent | Merge pull request #3730 from lioncash/time (diff) | |
| download | yuzu-3fedcc2f6e001f0ed1fd791de4f9692570359eef.tar.gz yuzu-3fedcc2f6e001f0ed1fd791de4f9692570359eef.tar.xz yuzu-3fedcc2f6e001f0ed1fd791de4f9692570359eef.zip | |
DMAPusher: Propagate multimethod writes into the engines.
Diffstat (limited to 'src/video_core/dma_pusher.cpp')
| -rw-r--r-- | src/video_core/dma_pusher.cpp | 30 |
1 files changed, 21 insertions, 9 deletions
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index 324dafdcd..16311f05e 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp | |||
| @@ -71,16 +71,22 @@ bool DmaPusher::Step() { | |||
| 71 | gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(), | 71 | gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(), |
| 72 | command_list_header.size * sizeof(u32)); | 72 | command_list_header.size * sizeof(u32)); |
| 73 | 73 | ||
| 74 | for (const CommandHeader& command_header : command_headers) { | 74 | for (std::size_t index = 0; index < command_headers.size();) { |
| 75 | 75 | const CommandHeader& command_header = command_headers[index]; | |
| 76 | // now, see if we're in the middle of a command | 76 | |
| 77 | if (dma_state.length_pending) { | 77 | if (dma_state.method_count) { |
| 78 | // Second word of long non-inc methods command - method count | ||
| 79 | dma_state.length_pending = 0; | ||
| 80 | dma_state.method_count = command_header.method_count_; | ||
| 81 | } else if (dma_state.method_count) { | ||
| 82 | // Data word of methods command | 78 | // Data word of methods command |
| 83 | CallMethod(command_header.argument); | 79 | if (dma_state.non_incrementing) { |
| 80 | const u32 max_write = static_cast<u32>( | ||
| 81 | std::min<std::size_t>(index + dma_state.method_count, command_headers.size()) - | ||
| 82 | index); | ||
| 83 | CallMultiMethod(&command_header.argument, max_write); | ||
| 84 | dma_state.method_count -= max_write; | ||
| 85 | index += max_write; | ||
| 86 | continue; | ||
| 87 | } else { | ||
| 88 | CallMethod(command_header.argument); | ||
| 89 | } | ||
| 84 | 90 | ||
| 85 | if (!dma_state.non_incrementing) { | 91 | if (!dma_state.non_incrementing) { |
| 86 | dma_state.method++; | 92 | dma_state.method++; |
| @@ -120,6 +126,7 @@ bool DmaPusher::Step() { | |||
| 120 | break; | 126 | break; |
| 121 | } | 127 | } |
| 122 | } | 128 | } |
| 129 | index++; | ||
| 123 | } | 130 | } |
| 124 | 131 | ||
| 125 | if (!non_main) { | 132 | if (!non_main) { |
| @@ -140,4 +147,9 @@ void DmaPusher::CallMethod(u32 argument) const { | |||
| 140 | gpu.CallMethod({dma_state.method, argument, dma_state.subchannel, dma_state.method_count}); | 147 | gpu.CallMethod({dma_state.method, argument, dma_state.subchannel, dma_state.method_count}); |
| 141 | } | 148 | } |
| 142 | 149 | ||
| 150 | void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const { | ||
| 151 | gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods, | ||
| 152 | dma_state.method_count); | ||
| 153 | } | ||
| 154 | |||
| 143 | } // namespace Tegra | 155 | } // namespace Tegra |