diff options
| author | 2019-07-17 21:03:53 -0300 | |
|---|---|---|
| committer | 2019-09-10 20:22:31 -0300 | |
| commit | 36abf67e79b234a361b99a342391249095ccd79c (patch) | |
| tree | 3e6e0e818e952a038fbe10262bf39cf6d52eaa61 /src/video_core/shader/decode | |
| parent | Merge pull request #2823 from ReinUsesLisp/shr-clamp (diff) | |
| download | yuzu-36abf67e79b234a361b99a342391249095ccd79c.tar.gz yuzu-36abf67e79b234a361b99a342391249095ccd79c.tar.xz yuzu-36abf67e79b234a361b99a342391249095ccd79c.zip | |
shader/image: Implement SUATOM and fix SUST
Diffstat (limited to 'src/video_core/shader/decode')
| -rw-r--r-- | src/video_core/shader/decode/image.cpp | 92 |
1 files changed, 71 insertions, 21 deletions
diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp index 008109a99..d54fb88c9 100644 --- a/src/video_core/shader/decode/image.cpp +++ b/src/video_core/shader/decode/image.cpp | |||
| @@ -44,7 +44,6 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) { | |||
| 44 | switch (opcode->get().GetId()) { | 44 | switch (opcode->get().GetId()) { |
| 45 | case OpCode::Id::SUST: { | 45 | case OpCode::Id::SUST: { |
| 46 | UNIMPLEMENTED_IF(instr.sust.mode != Tegra::Shader::SurfaceDataMode::P); | 46 | UNIMPLEMENTED_IF(instr.sust.mode != Tegra::Shader::SurfaceDataMode::P); |
| 47 | UNIMPLEMENTED_IF(instr.sust.image_type == Tegra::Shader::ImageType::TextureBuffer); | ||
| 48 | UNIMPLEMENTED_IF(instr.sust.out_of_bounds_store != Tegra::Shader::OutOfBoundsStore::Ignore); | 47 | UNIMPLEMENTED_IF(instr.sust.out_of_bounds_store != Tegra::Shader::OutOfBoundsStore::Ignore); |
| 49 | UNIMPLEMENTED_IF(instr.sust.component_mask_selector != 0xf); // Ensure we have an RGBA store | 48 | UNIMPLEMENTED_IF(instr.sust.component_mask_selector != 0xf); // Ensure we have an RGBA store |
| 50 | 49 | ||
| @@ -66,8 +65,46 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) { | |||
| 66 | image.MarkWrite(); | 65 | image.MarkWrite(); |
| 67 | 66 | ||
| 68 | MetaImage meta{image, values}; | 67 | MetaImage meta{image, values}; |
| 69 | const Node store{Operation(OperationCode::ImageStore, meta, std::move(coords))}; | 68 | bb.push_back(Operation(OperationCode::ImageStore, meta, std::move(coords))); |
| 70 | bb.push_back(store); | 69 | break; |
| 70 | } | ||
| 71 | case OpCode::Id::SUATOM: { | ||
| 72 | UNIMPLEMENTED_IF(instr.suatom_d.is_ba != 0); | ||
| 73 | |||
| 74 | Node value = GetRegister(instr.gpr0); | ||
| 75 | |||
| 76 | std::vector<Node> coords; | ||
| 77 | const std::size_t num_coords{GetImageTypeNumCoordinates(instr.sust.image_type)}; | ||
| 78 | for (std::size_t i = 0; i < num_coords; ++i) { | ||
| 79 | coords.push_back(GetRegister(instr.gpr8.Value() + i)); | ||
| 80 | } | ||
| 81 | |||
| 82 | const OperationCode operation_code = [instr] { | ||
| 83 | switch (instr.suatom_d.operation) { | ||
| 84 | case Tegra::Shader::ImageAtomicOperation::Add: | ||
| 85 | return OperationCode::AtomicImageAdd; | ||
| 86 | case Tegra::Shader::ImageAtomicOperation::Min: | ||
| 87 | return OperationCode::AtomicImageMin; | ||
| 88 | case Tegra::Shader::ImageAtomicOperation::Max: | ||
| 89 | return OperationCode::AtomicImageMax; | ||
| 90 | case Tegra::Shader::ImageAtomicOperation::And: | ||
| 91 | return OperationCode::AtomicImageAnd; | ||
| 92 | case Tegra::Shader::ImageAtomicOperation::Or: | ||
| 93 | return OperationCode::AtomicImageOr; | ||
| 94 | case Tegra::Shader::ImageAtomicOperation::Xor: | ||
| 95 | return OperationCode::AtomicImageXor; | ||
| 96 | case Tegra::Shader::ImageAtomicOperation::Exch: | ||
| 97 | return OperationCode::AtomicImageExchange; | ||
| 98 | default: | ||
| 99 | UNIMPLEMENTED_MSG("Unimplemented operation={}", | ||
| 100 | static_cast<u32>(instr.suatom_d.operation.Value())); | ||
| 101 | return OperationCode::AtomicImageAdd; | ||
| 102 | } | ||
| 103 | }(); | ||
| 104 | |||
| 105 | const auto& image{GetImage(instr.image, instr.suatom_d.image_type, instr.suatom_d.size)}; | ||
| 106 | MetaImage meta{image, {std::move(value)}}; | ||
| 107 | SetRegister(bb, instr.gpr0, Operation(operation_code, meta, std::move(coords))); | ||
| 71 | break; | 108 | break; |
| 72 | } | 109 | } |
| 73 | default: | 110 | default: |
| @@ -77,38 +114,51 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) { | |||
| 77 | return pc; | 114 | return pc; |
| 78 | } | 115 | } |
| 79 | 116 | ||
| 80 | Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) { | 117 | Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type, |
| 81 | const auto offset{static_cast<u64>(image.index.Value())}; | 118 | std::optional<Tegra::Shader::ImageAtomicSize> size) { |
| 82 | 119 | const auto offset{static_cast<std::size_t>(image.index.Value())}; | |
| 83 | // If this image has already been used, return the existing mapping. | 120 | if (const auto image = TryUseExistingImage(offset, type, size)) { |
| 84 | const auto it = used_images.find(offset); | 121 | return *image; |
| 85 | if (it != used_images.end()) { | ||
| 86 | ASSERT(it->second.GetType() == type); | ||
| 87 | return it->second; | ||
| 88 | } | 122 | } |
| 89 | 123 | ||
| 90 | // Otherwise create a new mapping for this image. | ||
| 91 | const std::size_t next_index{used_images.size()}; | 124 | const std::size_t next_index{used_images.size()}; |
| 92 | return used_images.emplace(offset, Image{offset, next_index, type}).first->second; | 125 | return used_images.emplace(offset, Image{offset, next_index, type, size}).first->second; |
| 93 | } | 126 | } |
| 94 | 127 | ||
| 95 | Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type) { | 128 | Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type, |
| 129 | std::optional<Tegra::Shader::ImageAtomicSize> size) { | ||
| 96 | const Node image_register{GetRegister(reg)}; | 130 | const Node image_register{GetRegister(reg)}; |
| 97 | const auto [base_image, cbuf_index, cbuf_offset]{ | 131 | const auto [base_image, cbuf_index, cbuf_offset]{ |
| 98 | TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size()))}; | 132 | TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size()))}; |
| 99 | const auto cbuf_key{(static_cast<u64>(cbuf_index) << 32) | static_cast<u64>(cbuf_offset)}; | 133 | const auto cbuf_key{(static_cast<u64>(cbuf_index) << 32) | static_cast<u64>(cbuf_offset)}; |
| 100 | 134 | ||
| 101 | // If this image has already been used, return the existing mapping. | 135 | if (const auto image = TryUseExistingImage(cbuf_key, type, size)) { |
| 102 | const auto it = used_images.find(cbuf_key); | 136 | return *image; |
| 103 | if (it != used_images.end()) { | ||
| 104 | ASSERT(it->second.GetType() == type); | ||
| 105 | return it->second; | ||
| 106 | } | 137 | } |
| 107 | 138 | ||
| 108 | // Otherwise create a new mapping for this image. | ||
| 109 | const std::size_t next_index{used_images.size()}; | 139 | const std::size_t next_index{used_images.size()}; |
| 110 | return used_images.emplace(cbuf_key, Image{cbuf_index, cbuf_offset, next_index, type}) | 140 | return used_images.emplace(cbuf_key, Image{cbuf_index, cbuf_offset, next_index, type, size}) |
| 111 | .first->second; | 141 | .first->second; |
| 112 | } | 142 | } |
| 113 | 143 | ||
| 144 | Image* ShaderIR::TryUseExistingImage(u64 offset, Tegra::Shader::ImageType type, | ||
| 145 | std::optional<Tegra::Shader::ImageAtomicSize> size) { | ||
| 146 | auto it = used_images.find(offset); | ||
| 147 | if (it == used_images.end()) { | ||
| 148 | return nullptr; | ||
| 149 | } | ||
| 150 | auto& image = it->second; | ||
| 151 | ASSERT(image.GetType() == type); | ||
| 152 | |||
| 153 | if (size) { | ||
| 154 | // We know the size, if it's known it has to be the same as before, otherwise we can set it. | ||
| 155 | if (image.IsSizeKnown()) { | ||
| 156 | ASSERT(image.GetSize() == size); | ||
| 157 | } else { | ||
| 158 | image.SetSize(*size); | ||
| 159 | } | ||
| 160 | } | ||
| 161 | return ℑ | ||
| 162 | } | ||
| 163 | |||
| 114 | } // namespace VideoCommon::Shader | 164 | } // namespace VideoCommon::Shader |