summaryrefslogtreecommitdiff
path: root/src/video_core/shader/decode
diff options
context:
space:
mode:
authorGravatar ReinUsesLisp2019-09-18 01:50:40 -0300
committerGravatar ReinUsesLisp2019-09-21 17:33:52 -0300
commit44000971e271e350638611b0265a3fed7bcced2a (patch)
treeb224df1c5477a7e31cb0176d9299b635c6363f61 /src/video_core/shader/decode
parentshader/image: Implement SULD and remove irrelevant code (diff)
downloadyuzu-44000971e271e350638611b0265a3fed7bcced2a.tar.gz
yuzu-44000971e271e350638611b0265a3fed7bcced2a.tar.xz
yuzu-44000971e271e350638611b0265a3fed7bcced2a.zip
gl_shader_decompiler: Use uint for images and fix SUATOM
In the process remove implementation of SUATOM.MIN and SUATOM.MAX as these require a distinction between U32 and S32. These have to be implemented with imageCompSwap loop.
Diffstat (limited to 'src/video_core/shader/decode')
-rw-r--r--src/video_core/shader/decode/image.cpp66
1 files changed, 29 insertions, 37 deletions
diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp
index e611f9f3b..95ec1cdd9 100644
--- a/src/video_core/shader/decode/image.cpp
+++ b/src/video_core/shader/decode/image.cpp
@@ -101,32 +101,35 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
101 UNIMPLEMENTED_IF(instr.suatom_d.is_ba != 0); 101 UNIMPLEMENTED_IF(instr.suatom_d.is_ba != 0);
102 102
103 const OperationCode operation_code = [instr] { 103 const OperationCode operation_code = [instr] {
104 switch (instr.suatom_d.operation) { 104 switch (instr.suatom_d.operation_type) {
105 case Tegra::Shader::ImageAtomicOperation::Add: 105 case Tegra::Shader::ImageAtomicOperationType::S32:
106 return OperationCode::AtomicImageAdd; 106 case Tegra::Shader::ImageAtomicOperationType::U32:
107 case Tegra::Shader::ImageAtomicOperation::Min: 107 switch (instr.suatom_d.operation) {
108 return OperationCode::AtomicImageMin; 108 case Tegra::Shader::ImageAtomicOperation::Add:
109 case Tegra::Shader::ImageAtomicOperation::Max: 109 return OperationCode::AtomicImageAdd;
110 return OperationCode::AtomicImageMax; 110 case Tegra::Shader::ImageAtomicOperation::And:
111 case Tegra::Shader::ImageAtomicOperation::And: 111 return OperationCode::AtomicImageAnd;
112 return OperationCode::AtomicImageAnd; 112 case Tegra::Shader::ImageAtomicOperation::Or:
113 case Tegra::Shader::ImageAtomicOperation::Or: 113 return OperationCode::AtomicImageOr;
114 return OperationCode::AtomicImageOr; 114 case Tegra::Shader::ImageAtomicOperation::Xor:
115 case Tegra::Shader::ImageAtomicOperation::Xor: 115 return OperationCode::AtomicImageXor;
116 return OperationCode::AtomicImageXor; 116 case Tegra::Shader::ImageAtomicOperation::Exch:
117 case Tegra::Shader::ImageAtomicOperation::Exch: 117 return OperationCode::AtomicImageExchange;
118 return OperationCode::AtomicImageExchange; 118 }
119 default: 119 default:
120 UNIMPLEMENTED_MSG("Unimplemented operation={}", 120 break;
121 static_cast<u32>(instr.suatom_d.operation.Value()));
122 return OperationCode::AtomicImageAdd;
123 } 121 }
122 UNIMPLEMENTED_MSG("Unimplemented operation={} type={}",
123 static_cast<u64>(instr.suatom_d.operation.Value()),
124 static_cast<u64>(instr.suatom_d.operation_type.Value()));
125 return OperationCode::AtomicImageAdd;
124 }(); 126 }();
125 127
126 Node value = GetRegister(instr.gpr0); 128 Node value = GetRegister(instr.gpr0);
127 129
128 const auto type = instr.suatom_d.image_type; 130 const auto type = instr.suatom_d.image_type;
129 const auto& image{GetImage(instr.image, type, instr.suatom_d.size)}; 131 auto& image = GetImage(instr.image, type);
132 image.MarkAtomic();
130 133
131 MetaImage meta{image, {std::move(value)}}; 134 MetaImage meta{image, {std::move(value)}};
132 SetRegister(bb, instr.gpr0, Operation(operation_code, meta, GetCoordinates(type))); 135 SetRegister(bb, instr.gpr0, Operation(operation_code, meta, GetCoordinates(type)));
@@ -139,35 +142,32 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
139 return pc; 142 return pc;
140} 143}
141 144
142Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type, 145Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) {
143 std::optional<Tegra::Shader::ImageAtomicSize> size) {
144 const auto offset{static_cast<std::size_t>(image.index.Value())}; 146 const auto offset{static_cast<std::size_t>(image.index.Value())};
145 if (const auto image = TryUseExistingImage(offset, type, size)) { 147 if (const auto image = TryUseExistingImage(offset, type)) {
146 return *image; 148 return *image;
147 } 149 }
148 150
149 const std::size_t next_index{used_images.size()}; 151 const std::size_t next_index{used_images.size()};
150 return used_images.emplace(offset, Image{offset, next_index, type, size}).first->second; 152 return used_images.emplace(offset, Image{offset, next_index, type}).first->second;
151} 153}
152 154
153Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type, 155Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type) {
154 std::optional<Tegra::Shader::ImageAtomicSize> size) {
155 const Node image_register{GetRegister(reg)}; 156 const Node image_register{GetRegister(reg)};
156 const auto [base_image, cbuf_index, cbuf_offset]{ 157 const auto [base_image, cbuf_index, cbuf_offset]{
157 TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size()))}; 158 TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size()))};
158 const auto cbuf_key{(static_cast<u64>(cbuf_index) << 32) | static_cast<u64>(cbuf_offset)}; 159 const auto cbuf_key{(static_cast<u64>(cbuf_index) << 32) | static_cast<u64>(cbuf_offset)};
159 160
160 if (const auto image = TryUseExistingImage(cbuf_key, type, size)) { 161 if (const auto image = TryUseExistingImage(cbuf_key, type)) {
161 return *image; 162 return *image;
162 } 163 }
163 164
164 const std::size_t next_index{used_images.size()}; 165 const std::size_t next_index{used_images.size()};
165 return used_images.emplace(cbuf_key, Image{cbuf_index, cbuf_offset, next_index, type, size}) 166 return used_images.emplace(cbuf_key, Image{cbuf_index, cbuf_offset, next_index, type})
166 .first->second; 167 .first->second;
167} 168}
168 169
169Image* ShaderIR::TryUseExistingImage(u64 offset, Tegra::Shader::ImageType type, 170Image* ShaderIR::TryUseExistingImage(u64 offset, Tegra::Shader::ImageType type) {
170 std::optional<Tegra::Shader::ImageAtomicSize> size) {
171 auto it = used_images.find(offset); 171 auto it = used_images.find(offset);
172 if (it == used_images.end()) { 172 if (it == used_images.end()) {
173 return nullptr; 173 return nullptr;
@@ -175,14 +175,6 @@ Image* ShaderIR::TryUseExistingImage(u64 offset, Tegra::Shader::ImageType type,
175 auto& image = it->second; 175 auto& image = it->second;
176 ASSERT(image.GetType() == type); 176 ASSERT(image.GetType() == type);
177 177
178 if (size) {
179 // We know the size, if it's known it has to be the same as before, otherwise we can set it.
180 if (image.IsSizeKnown()) {
181 ASSERT(image.GetSize() == size);
182 } else {
183 image.SetSize(*size);
184 }
185 }
186 return &image; 178 return &image;
187} 179}
188 180