summaryrefslogtreecommitdiff
path: root/src/shader_recompiler/ir_opt
diff options
context:
space:
mode:
authorGravatar lat9nq2021-04-05 22:25:22 -0400
committerGravatar ameerj2021-07-22 21:51:26 -0400
commit0bb85f6a753c769266c95c4ba146b25b9eaaaffd (patch)
treee5d818ae7dc1d0025bb115c7a63235d866e53286 /src/shader_recompiler/ir_opt
parentshader: Fix FCMP immediate variant (diff)
downloadyuzu-0bb85f6a753c769266c95c4ba146b25b9eaaaffd.tar.gz
yuzu-0bb85f6a753c769266c95c4ba146b25b9eaaaffd.tar.xz
yuzu-0bb85f6a753c769266c95c4ba146b25b9eaaaffd.zip
shader_recompiler,video_core: Cleanup some GCC and Clang errors
Mostly fixing unused *, implicit conversion, braced scalar init, fpermissive, and some others. Some Clang errors likely remain in video_core, and std::ranges is still a pertinent issue in shader_recompiler shader_recompiler: cmake: Force bracket depth to 1024 on Clang Increases the maximum fold expression depth thread_worker: Include condition_variable Don't use list initializers in control flow Co-authored-by: ReinUsesLisp <reinuseslisp@airmail.cc>
Diffstat (limited to 'src/shader_recompiler/ir_opt')
-rw-r--r--src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp20
-rw-r--r--src/shader_recompiler/ir_opt/constant_propagation_pass.cpp49
-rw-r--r--src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp42
-rw-r--r--src/shader_recompiler/ir_opt/identity_removal_pass.cpp3
-rw-r--r--src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp2
-rw-r--r--src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp4
-rw-r--r--src/shader_recompiler/ir_opt/texture_pass.cpp32
-rw-r--r--src/shader_recompiler/ir_opt/verification_pass.cpp4
8 files changed, 80 insertions, 76 deletions
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
index 1c03ee82a..edbfcd308 100644
--- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
+++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
@@ -6,6 +6,7 @@
6#include "shader_recompiler/frontend/ir/microinstruction.h" 6#include "shader_recompiler/frontend/ir/microinstruction.h"
7#include "shader_recompiler/frontend/ir/modifiers.h" 7#include "shader_recompiler/frontend/ir/modifiers.h"
8#include "shader_recompiler/frontend/ir/program.h" 8#include "shader_recompiler/frontend/ir/program.h"
9#include "shader_recompiler/ir_opt/passes.h"
9#include "shader_recompiler/shader_info.h" 10#include "shader_recompiler/shader_info.h"
10 11
11namespace Shader::Optimization { 12namespace Shader::Optimization {
@@ -22,8 +23,8 @@ void AddConstantBufferDescriptor(Info& info, u32 index, u32 count) {
22 auto& cbufs{info.constant_buffer_descriptors}; 23 auto& cbufs{info.constant_buffer_descriptors};
23 cbufs.insert(std::ranges::lower_bound(cbufs, index, {}, &ConstantBufferDescriptor::index), 24 cbufs.insert(std::ranges::lower_bound(cbufs, index, {}, &ConstantBufferDescriptor::index),
24 ConstantBufferDescriptor{ 25 ConstantBufferDescriptor{
25 .index{index}, 26 .index = index,
26 .count{1}, 27 .count = 1,
27 }); 28 });
28} 29}
29 30
@@ -91,7 +92,7 @@ void SetAttribute(Info& info, IR::Attribute attribute) {
91} 92}
92 93
93void VisitUsages(Info& info, IR::Inst& inst) { 94void VisitUsages(Info& info, IR::Inst& inst) {
94 switch (inst.Opcode()) { 95 switch (inst.GetOpcode()) {
95 case IR::Opcode::CompositeConstructF16x2: 96 case IR::Opcode::CompositeConstructF16x2:
96 case IR::Opcode::CompositeConstructF16x3: 97 case IR::Opcode::CompositeConstructF16x3:
97 case IR::Opcode::CompositeConstructF16x4: 98 case IR::Opcode::CompositeConstructF16x4:
@@ -209,7 +210,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
209 default: 210 default:
210 break; 211 break;
211 } 212 }
212 switch (inst.Opcode()) { 213 switch (inst.GetOpcode()) {
213 case IR::Opcode::GetCbufU8: 214 case IR::Opcode::GetCbufU8:
214 case IR::Opcode::GetCbufS8: 215 case IR::Opcode::GetCbufS8:
215 case IR::Opcode::UndefU8: 216 case IR::Opcode::UndefU8:
@@ -236,7 +237,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
236 default: 237 default:
237 break; 238 break;
238 } 239 }
239 switch (inst.Opcode()) { 240 switch (inst.GetOpcode()) {
240 case IR::Opcode::GetCbufU16: 241 case IR::Opcode::GetCbufU16:
241 case IR::Opcode::GetCbufS16: 242 case IR::Opcode::GetCbufS16:
242 case IR::Opcode::UndefU16: 243 case IR::Opcode::UndefU16:
@@ -271,7 +272,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
271 default: 272 default:
272 break; 273 break;
273 } 274 }
274 switch (inst.Opcode()) { 275 switch (inst.GetOpcode()) {
275 case IR::Opcode::UndefU64: 276 case IR::Opcode::UndefU64:
276 case IR::Opcode::LoadGlobalU8: 277 case IR::Opcode::LoadGlobalU8:
277 case IR::Opcode::LoadGlobalS8: 278 case IR::Opcode::LoadGlobalS8:
@@ -314,7 +315,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
314 default: 315 default:
315 break; 316 break;
316 } 317 }
317 switch (inst.Opcode()) { 318 switch (inst.GetOpcode()) {
318 case IR::Opcode::DemoteToHelperInvocation: 319 case IR::Opcode::DemoteToHelperInvocation:
319 info.uses_demote_to_helper_invocation = true; 320 info.uses_demote_to_helper_invocation = true;
320 break; 321 break;
@@ -361,7 +362,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
361 } else { 362 } else {
362 throw NotImplementedException("Constant buffer with non-immediate index"); 363 throw NotImplementedException("Constant buffer with non-immediate index");
363 } 364 }
364 switch (inst.Opcode()) { 365 switch (inst.GetOpcode()) {
365 case IR::Opcode::GetCbufU8: 366 case IR::Opcode::GetCbufU8:
366 case IR::Opcode::GetCbufS8: 367 case IR::Opcode::GetCbufS8:
367 info.used_constant_buffer_types |= IR::Type::U8; 368 info.used_constant_buffer_types |= IR::Type::U8;
@@ -443,7 +444,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
443} 444}
444 445
445void VisitFpModifiers(Info& info, IR::Inst& inst) { 446void VisitFpModifiers(Info& info, IR::Inst& inst) {
446 switch (inst.Opcode()) { 447 switch (inst.GetOpcode()) {
447 case IR::Opcode::FPAdd16: 448 case IR::Opcode::FPAdd16:
448 case IR::Opcode::FPFma16: 449 case IR::Opcode::FPFma16:
449 case IR::Opcode::FPMul16: 450 case IR::Opcode::FPMul16:
@@ -540,7 +541,6 @@ void GatherInfoFromHeader(Environment& env, Info& info) {
540 info.stores_position |= header.vtg.omap_systemb.position != 0; 541 info.stores_position |= header.vtg.omap_systemb.position != 0;
541 } 542 }
542} 543}
543
544} // Anonymous namespace 544} // Anonymous namespace
545 545
546void CollectShaderInfoPass(Environment& env, IR::Program& program) { 546void CollectShaderInfoPass(Environment& env, IR::Program& program) {
diff --git a/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp b/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp
index 1720d7a09..61fbbe04c 100644
--- a/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp
+++ b/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp
@@ -58,7 +58,7 @@ bool FoldCommutative(IR::Inst& inst, ImmFn&& imm_fn) {
58 } 58 }
59 if (is_lhs_immediate && !is_rhs_immediate) { 59 if (is_lhs_immediate && !is_rhs_immediate) {
60 IR::Inst* const rhs_inst{rhs.InstRecursive()}; 60 IR::Inst* const rhs_inst{rhs.InstRecursive()};
61 if (rhs_inst->Opcode() == inst.Opcode() && rhs_inst->Arg(1).IsImmediate()) { 61 if (rhs_inst->GetOpcode() == inst.GetOpcode() && rhs_inst->Arg(1).IsImmediate()) {
62 const auto combined{imm_fn(Arg<T>(lhs), Arg<T>(rhs_inst->Arg(1)))}; 62 const auto combined{imm_fn(Arg<T>(lhs), Arg<T>(rhs_inst->Arg(1)))};
63 inst.SetArg(0, rhs_inst->Arg(0)); 63 inst.SetArg(0, rhs_inst->Arg(0));
64 inst.SetArg(1, IR::Value{combined}); 64 inst.SetArg(1, IR::Value{combined});
@@ -70,7 +70,7 @@ bool FoldCommutative(IR::Inst& inst, ImmFn&& imm_fn) {
70 } 70 }
71 if (!is_lhs_immediate && is_rhs_immediate) { 71 if (!is_lhs_immediate && is_rhs_immediate) {
72 const IR::Inst* const lhs_inst{lhs.InstRecursive()}; 72 const IR::Inst* const lhs_inst{lhs.InstRecursive()};
73 if (lhs_inst->Opcode() == inst.Opcode() && lhs_inst->Arg(1).IsImmediate()) { 73 if (lhs_inst->GetOpcode() == inst.GetOpcode() && lhs_inst->Arg(1).IsImmediate()) {
74 const auto combined{imm_fn(Arg<T>(rhs), Arg<T>(lhs_inst->Arg(1)))}; 74 const auto combined{imm_fn(Arg<T>(rhs), Arg<T>(lhs_inst->Arg(1)))};
75 inst.SetArg(0, lhs_inst->Arg(0)); 75 inst.SetArg(0, lhs_inst->Arg(0));
76 inst.SetArg(1, IR::Value{combined}); 76 inst.SetArg(1, IR::Value{combined});
@@ -123,7 +123,8 @@ bool FoldXmadMultiply(IR::Block& block, IR::Inst& inst) {
123 return false; 123 return false;
124 } 124 }
125 IR::Inst* const lhs_shl{lhs_arg.InstRecursive()}; 125 IR::Inst* const lhs_shl{lhs_arg.InstRecursive()};
126 if (lhs_shl->Opcode() != IR::Opcode::ShiftLeftLogical32 || lhs_shl->Arg(1) != IR::Value{16U}) { 126 if (lhs_shl->GetOpcode() != IR::Opcode::ShiftLeftLogical32 ||
127 lhs_shl->Arg(1) != IR::Value{16U}) {
127 return false; 128 return false;
128 } 129 }
129 if (lhs_shl->Arg(0).IsImmediate()) { 130 if (lhs_shl->Arg(0).IsImmediate()) {
@@ -131,7 +132,7 @@ bool FoldXmadMultiply(IR::Block& block, IR::Inst& inst) {
131 } 132 }
132 IR::Inst* const lhs_mul{lhs_shl->Arg(0).InstRecursive()}; 133 IR::Inst* const lhs_mul{lhs_shl->Arg(0).InstRecursive()};
133 IR::Inst* const rhs_mul{rhs_arg.InstRecursive()}; 134 IR::Inst* const rhs_mul{rhs_arg.InstRecursive()};
134 if (lhs_mul->Opcode() != IR::Opcode::IMul32 || rhs_mul->Opcode() != IR::Opcode::IMul32) { 135 if (lhs_mul->GetOpcode() != IR::Opcode::IMul32 || rhs_mul->GetOpcode() != IR::Opcode::IMul32) {
135 return false; 136 return false;
136 } 137 }
137 if (lhs_mul->Arg(1).Resolve() != rhs_mul->Arg(1).Resolve()) { 138 if (lhs_mul->Arg(1).Resolve() != rhs_mul->Arg(1).Resolve()) {
@@ -143,10 +144,10 @@ bool FoldXmadMultiply(IR::Block& block, IR::Inst& inst) {
143 } 144 }
144 IR::Inst* const lhs_bfe{lhs_mul->Arg(0).InstRecursive()}; 145 IR::Inst* const lhs_bfe{lhs_mul->Arg(0).InstRecursive()};
145 IR::Inst* const rhs_bfe{rhs_mul->Arg(0).InstRecursive()}; 146 IR::Inst* const rhs_bfe{rhs_mul->Arg(0).InstRecursive()};
146 if (lhs_bfe->Opcode() != IR::Opcode::BitFieldUExtract) { 147 if (lhs_bfe->GetOpcode() != IR::Opcode::BitFieldUExtract) {
147 return false; 148 return false;
148 } 149 }
149 if (rhs_bfe->Opcode() != IR::Opcode::BitFieldUExtract) { 150 if (rhs_bfe->GetOpcode() != IR::Opcode::BitFieldUExtract) {
150 return false; 151 return false;
151 } 152 }
152 if (lhs_bfe->Arg(1) != IR::Value{16U} || lhs_bfe->Arg(2) != IR::Value{16U}) { 153 if (lhs_bfe->Arg(1) != IR::Value{16U} || lhs_bfe->Arg(2) != IR::Value{16U}) {
@@ -194,8 +195,9 @@ void FoldISub32(IR::Inst& inst) {
194 // ISub32 is generally used to subtract two constant buffers, compare and replace this with 195 // ISub32 is generally used to subtract two constant buffers, compare and replace this with
195 // zero if they equal. 196 // zero if they equal.
196 const auto equal_cbuf{[](IR::Inst* a, IR::Inst* b) { 197 const auto equal_cbuf{[](IR::Inst* a, IR::Inst* b) {
197 return a->Opcode() == IR::Opcode::GetCbufU32 && b->Opcode() == IR::Opcode::GetCbufU32 && 198 return a->GetOpcode() == IR::Opcode::GetCbufU32 &&
198 a->Arg(0) == b->Arg(0) && a->Arg(1) == b->Arg(1); 199 b->GetOpcode() == IR::Opcode::GetCbufU32 && a->Arg(0) == b->Arg(0) &&
200 a->Arg(1) == b->Arg(1);
199 }}; 201 }};
200 IR::Inst* op_a{inst.Arg(0).InstRecursive()}; 202 IR::Inst* op_a{inst.Arg(0).InstRecursive()};
201 IR::Inst* op_b{inst.Arg(1).InstRecursive()}; 203 IR::Inst* op_b{inst.Arg(1).InstRecursive()};
@@ -204,15 +206,15 @@ void FoldISub32(IR::Inst& inst) {
204 return; 206 return;
205 } 207 }
206 // It's also possible a value is being added to a cbuf and then subtracted 208 // It's also possible a value is being added to a cbuf and then subtracted
207 if (op_b->Opcode() == IR::Opcode::IAdd32) { 209 if (op_b->GetOpcode() == IR::Opcode::IAdd32) {
208 // Canonicalize local variables to simplify the following logic 210 // Canonicalize local variables to simplify the following logic
209 std::swap(op_a, op_b); 211 std::swap(op_a, op_b);
210 } 212 }
211 if (op_b->Opcode() != IR::Opcode::GetCbufU32) { 213 if (op_b->GetOpcode() != IR::Opcode::GetCbufU32) {
212 return; 214 return;
213 } 215 }
214 IR::Inst* const inst_cbuf{op_b}; 216 IR::Inst* const inst_cbuf{op_b};
215 if (op_a->Opcode() != IR::Opcode::IAdd32) { 217 if (op_a->GetOpcode() != IR::Opcode::IAdd32) {
216 return; 218 return;
217 } 219 }
218 IR::Value add_op_a{op_a->Arg(0)}; 220 IR::Value add_op_a{op_a->Arg(0)};
@@ -250,7 +252,8 @@ void FoldFPMul32(IR::Inst& inst) {
250 } 252 }
251 IR::Inst* const lhs_op{lhs_value.InstRecursive()}; 253 IR::Inst* const lhs_op{lhs_value.InstRecursive()};
252 IR::Inst* const rhs_op{rhs_value.InstRecursive()}; 254 IR::Inst* const rhs_op{rhs_value.InstRecursive()};
253 if (lhs_op->Opcode() != IR::Opcode::FPMul32 || rhs_op->Opcode() != IR::Opcode::FPRecip32) { 255 if (lhs_op->GetOpcode() != IR::Opcode::FPMul32 ||
256 rhs_op->GetOpcode() != IR::Opcode::FPRecip32) {
254 return; 257 return;
255 } 258 }
256 const IR::Value recip_source{rhs_op->Arg(0)}; 259 const IR::Value recip_source{rhs_op->Arg(0)};
@@ -260,8 +263,8 @@ void FoldFPMul32(IR::Inst& inst) {
260 } 263 }
261 IR::Inst* const attr_a{recip_source.InstRecursive()}; 264 IR::Inst* const attr_a{recip_source.InstRecursive()};
262 IR::Inst* const attr_b{lhs_mul_source.InstRecursive()}; 265 IR::Inst* const attr_b{lhs_mul_source.InstRecursive()};
263 if (attr_a->Opcode() != IR::Opcode::GetAttribute || 266 if (attr_a->GetOpcode() != IR::Opcode::GetAttribute ||
264 attr_b->Opcode() != IR::Opcode::GetAttribute) { 267 attr_b->GetOpcode() != IR::Opcode::GetAttribute) {
265 return; 268 return;
266 } 269 }
267 if (attr_a->Arg(0).Attribute() == attr_b->Arg(0).Attribute()) { 270 if (attr_a->Arg(0).Attribute() == attr_b->Arg(0).Attribute()) {
@@ -304,7 +307,7 @@ void FoldLogicalNot(IR::Inst& inst) {
304 return; 307 return;
305 } 308 }
306 IR::Inst* const arg{value.InstRecursive()}; 309 IR::Inst* const arg{value.InstRecursive()};
307 if (arg->Opcode() == IR::Opcode::LogicalNot) { 310 if (arg->GetOpcode() == IR::Opcode::LogicalNot) {
308 inst.ReplaceUsesWith(arg->Arg(0)); 311 inst.ReplaceUsesWith(arg->Arg(0));
309 } 312 }
310} 313}
@@ -317,12 +320,12 @@ void FoldBitCast(IR::Inst& inst, IR::Opcode reverse) {
317 return; 320 return;
318 } 321 }
319 IR::Inst* const arg_inst{value.InstRecursive()}; 322 IR::Inst* const arg_inst{value.InstRecursive()};
320 if (arg_inst->Opcode() == reverse) { 323 if (arg_inst->GetOpcode() == reverse) {
321 inst.ReplaceUsesWith(arg_inst->Arg(0)); 324 inst.ReplaceUsesWith(arg_inst->Arg(0));
322 return; 325 return;
323 } 326 }
324 if constexpr (op == IR::Opcode::BitCastF32U32) { 327 if constexpr (op == IR::Opcode::BitCastF32U32) {
325 if (arg_inst->Opcode() == IR::Opcode::GetCbufU32) { 328 if (arg_inst->GetOpcode() == IR::Opcode::GetCbufU32) {
326 // Replace the bitcast with a typed constant buffer read 329 // Replace the bitcast with a typed constant buffer read
327 inst.ReplaceOpcode(IR::Opcode::GetCbufF32); 330 inst.ReplaceOpcode(IR::Opcode::GetCbufF32);
328 inst.SetArg(0, arg_inst->Arg(0)); 331 inst.SetArg(0, arg_inst->Arg(0));
@@ -338,7 +341,7 @@ void FoldInverseFunc(IR::Inst& inst, IR::Opcode reverse) {
338 return; 341 return;
339 } 342 }
340 IR::Inst* const arg_inst{value.InstRecursive()}; 343 IR::Inst* const arg_inst{value.InstRecursive()};
341 if (arg_inst->Opcode() == reverse) { 344 if (arg_inst->GetOpcode() == reverse) {
342 inst.ReplaceUsesWith(arg_inst->Arg(0)); 345 inst.ReplaceUsesWith(arg_inst->Arg(0));
343 return; 346 return;
344 } 347 }
@@ -347,7 +350,7 @@ void FoldInverseFunc(IR::Inst& inst, IR::Opcode reverse) {
347template <typename Func, size_t... I> 350template <typename Func, size_t... I>
348IR::Value EvalImmediates(const IR::Inst& inst, Func&& func, std::index_sequence<I...>) { 351IR::Value EvalImmediates(const IR::Inst& inst, Func&& func, std::index_sequence<I...>) {
349 using Traits = LambdaTraits<decltype(func)>; 352 using Traits = LambdaTraits<decltype(func)>;
350 return IR::Value{func(Arg<Traits::ArgType<I>>(inst.Arg(I))...)}; 353 return IR::Value{func(Arg<typename Traits::template ArgType<I>>(inst.Arg(I))...)};
351} 354}
352 355
353void FoldBranchConditional(IR::Inst& inst) { 356void FoldBranchConditional(IR::Inst& inst) {
@@ -357,7 +360,7 @@ void FoldBranchConditional(IR::Inst& inst) {
357 return; 360 return;
358 } 361 }
359 const IR::Inst* cond_inst{cond.InstRecursive()}; 362 const IR::Inst* cond_inst{cond.InstRecursive()};
360 if (cond_inst->Opcode() == IR::Opcode::LogicalNot) { 363 if (cond_inst->GetOpcode() == IR::Opcode::LogicalNot) {
361 const IR::Value true_label{inst.Arg(1)}; 364 const IR::Value true_label{inst.Arg(1)};
362 const IR::Value false_label{inst.Arg(2)}; 365 const IR::Value false_label{inst.Arg(2)};
363 // Remove negation on the conditional (take the parameter out of LogicalNot) and swap 366 // Remove negation on the conditional (take the parameter out of LogicalNot) and swap
@@ -371,10 +374,10 @@ void FoldBranchConditional(IR::Inst& inst) {
371std::optional<IR::Value> FoldCompositeExtractImpl(IR::Value inst_value, IR::Opcode insert, 374std::optional<IR::Value> FoldCompositeExtractImpl(IR::Value inst_value, IR::Opcode insert,
372 IR::Opcode construct, u32 first_index) { 375 IR::Opcode construct, u32 first_index) {
373 IR::Inst* const inst{inst_value.InstRecursive()}; 376 IR::Inst* const inst{inst_value.InstRecursive()};
374 if (inst->Opcode() == construct) { 377 if (inst->GetOpcode() == construct) {
375 return inst->Arg(first_index); 378 return inst->Arg(first_index);
376 } 379 }
377 if (inst->Opcode() != insert) { 380 if (inst->GetOpcode() != insert) {
378 return std::nullopt; 381 return std::nullopt;
379 } 382 }
380 IR::Value value_index{inst->Arg(2)}; 383 IR::Value value_index{inst->Arg(2)};
@@ -410,7 +413,7 @@ void FoldCompositeExtract(IR::Inst& inst, IR::Opcode construct, IR::Opcode inser
410} 413}
411 414
412void ConstantPropagation(IR::Block& block, IR::Inst& inst) { 415void ConstantPropagation(IR::Block& block, IR::Inst& inst) {
413 switch (inst.Opcode()) { 416 switch (inst.GetOpcode()) {
414 case IR::Opcode::GetRegister: 417 case IR::Opcode::GetRegister:
415 return FoldGetRegister(inst); 418 return FoldGetRegister(inst);
416 case IR::Opcode::GetPred: 419 case IR::Opcode::GetPred:
diff --git a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
index 0858a0bdd..90a65dd16 100644
--- a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
+++ b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
@@ -57,7 +57,7 @@ struct StorageInfo {
57 57
58/// Returns true when the instruction is a global memory instruction 58/// Returns true when the instruction is a global memory instruction
59bool IsGlobalMemory(const IR::Inst& inst) { 59bool IsGlobalMemory(const IR::Inst& inst) {
60 switch (inst.Opcode()) { 60 switch (inst.GetOpcode()) {
61 case IR::Opcode::LoadGlobalS8: 61 case IR::Opcode::LoadGlobalS8:
62 case IR::Opcode::LoadGlobalU8: 62 case IR::Opcode::LoadGlobalU8:
63 case IR::Opcode::LoadGlobalS16: 63 case IR::Opcode::LoadGlobalS16:
@@ -80,7 +80,7 @@ bool IsGlobalMemory(const IR::Inst& inst) {
80 80
81/// Returns true when the instruction is a global memory instruction 81/// Returns true when the instruction is a global memory instruction
82bool IsGlobalMemoryWrite(const IR::Inst& inst) { 82bool IsGlobalMemoryWrite(const IR::Inst& inst) {
83 switch (inst.Opcode()) { 83 switch (inst.GetOpcode()) {
84 case IR::Opcode::WriteGlobalS8: 84 case IR::Opcode::WriteGlobalS8:
85 case IR::Opcode::WriteGlobalU8: 85 case IR::Opcode::WriteGlobalU8:
86 case IR::Opcode::WriteGlobalS16: 86 case IR::Opcode::WriteGlobalS16:
@@ -140,7 +140,7 @@ bool MeetsBias(const StorageBufferAddr& storage_buffer, const Bias& bias) noexce
140void DiscardGlobalMemory(IR::Block& block, IR::Inst& inst) { 140void DiscardGlobalMemory(IR::Block& block, IR::Inst& inst) {
141 IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)}; 141 IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
142 const IR::Value zero{u32{0}}; 142 const IR::Value zero{u32{0}};
143 switch (inst.Opcode()) { 143 switch (inst.GetOpcode()) {
144 case IR::Opcode::LoadGlobalS8: 144 case IR::Opcode::LoadGlobalS8:
145 case IR::Opcode::LoadGlobalU8: 145 case IR::Opcode::LoadGlobalU8:
146 case IR::Opcode::LoadGlobalS16: 146 case IR::Opcode::LoadGlobalS16:
@@ -164,7 +164,7 @@ void DiscardGlobalMemory(IR::Block& block, IR::Inst& inst) {
164 inst.Invalidate(); 164 inst.Invalidate();
165 break; 165 break;
166 default: 166 default:
167 throw LogicError("Invalid opcode to discard its global memory operation {}", inst.Opcode()); 167 throw LogicError("Invalid opcode to discard its global memory operation {}", inst.GetOpcode());
168 } 168 }
169} 169}
170 170
@@ -184,7 +184,7 @@ std::optional<LowAddrInfo> TrackLowAddress(IR::Inst* inst) {
184 // This address is expected to either be a PackUint2x32 or a IAdd64 184 // This address is expected to either be a PackUint2x32 or a IAdd64
185 IR::Inst* addr_inst{addr.InstRecursive()}; 185 IR::Inst* addr_inst{addr.InstRecursive()};
186 s32 imm_offset{0}; 186 s32 imm_offset{0};
187 if (addr_inst->Opcode() == IR::Opcode::IAdd64) { 187 if (addr_inst->GetOpcode() == IR::Opcode::IAdd64) {
188 // If it's an IAdd64, get the immediate offset it is applying and grab the address 188 // If it's an IAdd64, get the immediate offset it is applying and grab the address
189 // instruction. This expects for the instruction to be canonicalized having the address on 189 // instruction. This expects for the instruction to be canonicalized having the address on
190 // the first argument and the immediate offset on the second one. 190 // the first argument and the immediate offset on the second one.
@@ -200,7 +200,7 @@ std::optional<LowAddrInfo> TrackLowAddress(IR::Inst* inst) {
200 addr_inst = iadd_addr.Inst(); 200 addr_inst = iadd_addr.Inst();
201 } 201 }
202 // With IAdd64 handled, now PackUint2x32 is expected without exceptions 202 // With IAdd64 handled, now PackUint2x32 is expected without exceptions
203 if (addr_inst->Opcode() != IR::Opcode::PackUint2x32) { 203 if (addr_inst->GetOpcode() != IR::Opcode::PackUint2x32) {
204 return std::nullopt; 204 return std::nullopt;
205 } 205 }
206 // PackUint2x32 is expected to be generated from a vector 206 // PackUint2x32 is expected to be generated from a vector
@@ -210,20 +210,20 @@ std::optional<LowAddrInfo> TrackLowAddress(IR::Inst* inst) {
210 } 210 }
211 // This vector is expected to be a CompositeConstructU32x2 211 // This vector is expected to be a CompositeConstructU32x2
212 IR::Inst* const vector_inst{vector.InstRecursive()}; 212 IR::Inst* const vector_inst{vector.InstRecursive()};
213 if (vector_inst->Opcode() != IR::Opcode::CompositeConstructU32x2) { 213 if (vector_inst->GetOpcode() != IR::Opcode::CompositeConstructU32x2) {
214 return std::nullopt; 214 return std::nullopt;
215 } 215 }
216 // Grab the first argument from the CompositeConstructU32x2, this is the low address. 216 // Grab the first argument from the CompositeConstructU32x2, this is the low address.
217 return LowAddrInfo{ 217 return LowAddrInfo{
218 .value{IR::U32{vector_inst->Arg(0)}}, 218 .value{IR::U32{vector_inst->Arg(0)}},
219 .imm_offset{imm_offset}, 219 .imm_offset = imm_offset,
220 }; 220 };
221} 221}
222 222
223/// Tries to track the storage buffer address used by a global memory instruction 223/// Tries to track the storage buffer address used by a global memory instruction
224std::optional<StorageBufferAddr> Track(const IR::Value& value, const Bias* bias) { 224std::optional<StorageBufferAddr> Track(const IR::Value& value, const Bias* bias) {
225 const auto pred{[bias](const IR::Inst* inst) -> std::optional<StorageBufferAddr> { 225 const auto pred{[bias](const IR::Inst* inst) -> std::optional<StorageBufferAddr> {
226 if (inst->Opcode() != IR::Opcode::GetCbufU32) { 226 if (inst->GetOpcode() != IR::Opcode::GetCbufU32) {
227 return std::nullopt; 227 return std::nullopt;
228 } 228 }
229 const IR::Value index{inst->Arg(0)}; 229 const IR::Value index{inst->Arg(0)};
@@ -256,9 +256,9 @@ void CollectStorageBuffers(IR::Block& block, IR::Inst& inst, StorageInfo& info)
256 // NVN puts storage buffers in a specific range, we have to bias towards these addresses to 256 // NVN puts storage buffers in a specific range, we have to bias towards these addresses to
257 // avoid getting false positives 257 // avoid getting false positives
258 static constexpr Bias nvn_bias{ 258 static constexpr Bias nvn_bias{
259 .index{0}, 259 .index = 0,
260 .offset_begin{0x110}, 260 .offset_begin = 0x110,
261 .offset_end{0x610}, 261 .offset_end = 0x610,
262 }; 262 };
263 // Track the low address of the instruction 263 // Track the low address of the instruction
264 const std::optional<LowAddrInfo> low_addr_info{TrackLowAddress(&inst)}; 264 const std::optional<LowAddrInfo> low_addr_info{TrackLowAddress(&inst)};
@@ -286,8 +286,8 @@ void CollectStorageBuffers(IR::Block& block, IR::Inst& inst, StorageInfo& info)
286 info.set.insert(*storage_buffer); 286 info.set.insert(*storage_buffer);
287 info.to_replace.push_back(StorageInst{ 287 info.to_replace.push_back(StorageInst{
288 .storage_buffer{*storage_buffer}, 288 .storage_buffer{*storage_buffer},
289 .inst{&inst}, 289 .inst = &inst,
290 .block{&block}, 290 .block = &block,
291 }); 291 });
292} 292}
293 293
@@ -312,7 +312,7 @@ IR::U32 StorageOffset(IR::Block& block, IR::Inst& inst, StorageBufferAddr buffer
312/// Replace a global memory load instruction with its storage buffer equivalent 312/// Replace a global memory load instruction with its storage buffer equivalent
313void ReplaceLoad(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index, 313void ReplaceLoad(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
314 const IR::U32& offset) { 314 const IR::U32& offset) {
315 const IR::Opcode new_opcode{GlobalToStorage(inst.Opcode())}; 315 const IR::Opcode new_opcode{GlobalToStorage(inst.GetOpcode())};
316 const auto it{IR::Block::InstructionList::s_iterator_to(inst)}; 316 const auto it{IR::Block::InstructionList::s_iterator_to(inst)};
317 const IR::Value value{&*block.PrependNewInst(it, new_opcode, {storage_index, offset})}; 317 const IR::Value value{&*block.PrependNewInst(it, new_opcode, {storage_index, offset})};
318 inst.ReplaceUsesWith(value); 318 inst.ReplaceUsesWith(value);
@@ -321,7 +321,7 @@ void ReplaceLoad(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
321/// Replace a global memory write instruction with its storage buffer equivalent 321/// Replace a global memory write instruction with its storage buffer equivalent
322void ReplaceWrite(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index, 322void ReplaceWrite(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
323 const IR::U32& offset) { 323 const IR::U32& offset) {
324 const IR::Opcode new_opcode{GlobalToStorage(inst.Opcode())}; 324 const IR::Opcode new_opcode{GlobalToStorage(inst.GetOpcode())};
325 const auto it{IR::Block::InstructionList::s_iterator_to(inst)}; 325 const auto it{IR::Block::InstructionList::s_iterator_to(inst)};
326 block.PrependNewInst(it, new_opcode, {storage_index, offset, inst.Arg(1)}); 326 block.PrependNewInst(it, new_opcode, {storage_index, offset, inst.Arg(1)});
327 inst.Invalidate(); 327 inst.Invalidate();
@@ -330,7 +330,7 @@ void ReplaceWrite(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index
330/// Replace a global memory instruction with its storage buffer equivalent 330/// Replace a global memory instruction with its storage buffer equivalent
331void Replace(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index, 331void Replace(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
332 const IR::U32& offset) { 332 const IR::U32& offset) {
333 switch (inst.Opcode()) { 333 switch (inst.GetOpcode()) {
334 case IR::Opcode::LoadGlobalS8: 334 case IR::Opcode::LoadGlobalS8:
335 case IR::Opcode::LoadGlobalU8: 335 case IR::Opcode::LoadGlobalU8:
336 case IR::Opcode::LoadGlobalS16: 336 case IR::Opcode::LoadGlobalS16:
@@ -348,7 +348,7 @@ void Replace(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
348 case IR::Opcode::WriteGlobal128: 348 case IR::Opcode::WriteGlobal128:
349 return ReplaceWrite(block, inst, storage_index, offset); 349 return ReplaceWrite(block, inst, storage_index, offset);
350 default: 350 default:
351 throw InvalidArgument("Invalid global memory opcode {}", inst.Opcode()); 351 throw InvalidArgument("Invalid global memory opcode {}", inst.GetOpcode());
352 } 352 }
353} 353}
354} // Anonymous namespace 354} // Anonymous namespace
@@ -366,9 +366,9 @@ void GlobalMemoryToStorageBufferPass(IR::Program& program) {
366 u32 storage_index{}; 366 u32 storage_index{};
367 for (const StorageBufferAddr& storage_buffer : info.set) { 367 for (const StorageBufferAddr& storage_buffer : info.set) {
368 program.info.storage_buffers_descriptors.push_back({ 368 program.info.storage_buffers_descriptors.push_back({
369 .cbuf_index{storage_buffer.index}, 369 .cbuf_index = storage_buffer.index,
370 .cbuf_offset{storage_buffer.offset}, 370 .cbuf_offset = storage_buffer.offset,
371 .count{1}, 371 .count = 1,
372 .is_written{info.writes.contains(storage_buffer)}, 372 .is_written{info.writes.contains(storage_buffer)},
373 }); 373 });
374 ++storage_index; 374 ++storage_index;
diff --git a/src/shader_recompiler/ir_opt/identity_removal_pass.cpp b/src/shader_recompiler/ir_opt/identity_removal_pass.cpp
index 8790b48f2..38af72dfe 100644
--- a/src/shader_recompiler/ir_opt/identity_removal_pass.cpp
+++ b/src/shader_recompiler/ir_opt/identity_removal_pass.cpp
@@ -22,7 +22,8 @@ void IdentityRemovalPass(IR::Program& program) {
22 inst->SetArg(i, arg.Inst()->Arg(0)); 22 inst->SetArg(i, arg.Inst()->Arg(0));
23 } 23 }
24 } 24 }
25 if (inst->Opcode() == IR::Opcode::Identity || inst->Opcode() == IR::Opcode::Void) { 25 if (inst->GetOpcode() == IR::Opcode::Identity ||
26 inst->GetOpcode() == IR::Opcode::Void) {
26 to_invalidate.push_back(&*inst); 27 to_invalidate.push_back(&*inst);
27 inst = block->Instructions().erase(inst); 28 inst = block->Instructions().erase(inst);
28 } else { 29 } else {
diff --git a/src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp b/src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp
index 0d2c91ed6..52576b07f 100644
--- a/src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp
+++ b/src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp
@@ -123,7 +123,7 @@ IR::Opcode Replace(IR::Opcode op) {
123void LowerFp16ToFp32(IR::Program& program) { 123void LowerFp16ToFp32(IR::Program& program) {
124 for (IR::Block* const block : program.blocks) { 124 for (IR::Block* const block : program.blocks) {
125 for (IR::Inst& inst : block->Instructions()) { 125 for (IR::Inst& inst : block->Instructions()) {
126 inst.ReplaceOpcode(Replace(inst.Opcode())); 126 inst.ReplaceOpcode(Replace(inst.GetOpcode()));
127 } 127 }
128 } 128 }
129} 129}
diff --git a/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp b/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp
index ca36253d1..346fcc377 100644
--- a/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp
+++ b/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp
@@ -116,7 +116,7 @@ IR::Opcode UndefOpcode(IndirectBranchVariable) noexcept {
116} 116}
117 117
118[[nodiscard]] bool IsPhi(const IR::Inst& inst) noexcept { 118[[nodiscard]] bool IsPhi(const IR::Inst& inst) noexcept {
119 return inst.Opcode() == IR::Opcode::Phi; 119 return inst.GetOpcode() == IR::Opcode::Phi;
120} 120}
121 121
122enum class Status { 122enum class Status {
@@ -278,7 +278,7 @@ private:
278}; 278};
279 279
280void VisitInst(Pass& pass, IR::Block* block, IR::Inst& inst) { 280void VisitInst(Pass& pass, IR::Block* block, IR::Inst& inst) {
281 switch (inst.Opcode()) { 281 switch (inst.GetOpcode()) {
282 case IR::Opcode::SetRegister: 282 case IR::Opcode::SetRegister:
283 if (const IR::Reg reg{inst.Arg(0).Reg()}; reg != IR::Reg::RZ) { 283 if (const IR::Reg reg{inst.Arg(0).Reg()}; reg != IR::Reg::RZ) {
284 pass.WriteVariable(reg, block, inst.Arg(1)); 284 pass.WriteVariable(reg, block, inst.Arg(1));
diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp
index 290ce4179..c8aee3d3d 100644
--- a/src/shader_recompiler/ir_opt/texture_pass.cpp
+++ b/src/shader_recompiler/ir_opt/texture_pass.cpp
@@ -30,7 +30,7 @@ struct TextureInst {
30using TextureInstVector = boost::container::small_vector<TextureInst, 24>; 30using TextureInstVector = boost::container::small_vector<TextureInst, 24>;
31 31
32IR::Opcode IndexedInstruction(const IR::Inst& inst) { 32IR::Opcode IndexedInstruction(const IR::Inst& inst) {
33 switch (inst.Opcode()) { 33 switch (inst.GetOpcode()) {
34 case IR::Opcode::BindlessImageSampleImplicitLod: 34 case IR::Opcode::BindlessImageSampleImplicitLod:
35 case IR::Opcode::BoundImageSampleImplicitLod: 35 case IR::Opcode::BoundImageSampleImplicitLod:
36 return IR::Opcode::ImageSampleImplicitLod; 36 return IR::Opcode::ImageSampleImplicitLod;
@@ -67,7 +67,7 @@ IR::Opcode IndexedInstruction(const IR::Inst& inst) {
67} 67}
68 68
69bool IsBindless(const IR::Inst& inst) { 69bool IsBindless(const IR::Inst& inst) {
70 switch (inst.Opcode()) { 70 switch (inst.GetOpcode()) {
71 case IR::Opcode::BindlessImageSampleImplicitLod: 71 case IR::Opcode::BindlessImageSampleImplicitLod:
72 case IR::Opcode::BindlessImageSampleExplicitLod: 72 case IR::Opcode::BindlessImageSampleExplicitLod:
73 case IR::Opcode::BindlessImageSampleDrefImplicitLod: 73 case IR::Opcode::BindlessImageSampleDrefImplicitLod:
@@ -91,7 +91,7 @@ bool IsBindless(const IR::Inst& inst) {
91 case IR::Opcode::BoundImageGradient: 91 case IR::Opcode::BoundImageGradient:
92 return false; 92 return false;
93 default: 93 default:
94 throw InvalidArgument("Invalid opcode {}", inst.Opcode()); 94 throw InvalidArgument("Invalid opcode {}", inst.GetOpcode());
95 } 95 }
96} 96}
97 97
@@ -100,7 +100,7 @@ bool IsTextureInstruction(const IR::Inst& inst) {
100} 100}
101 101
102std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) { 102std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
103 if (inst->Opcode() != IR::Opcode::GetCbufU32) { 103 if (inst->GetOpcode() != IR::Opcode::GetCbufU32) {
104 return std::nullopt; 104 return std::nullopt;
105 } 105 }
106 const IR::Value index{inst->Arg(0)}; 106 const IR::Value index{inst->Arg(0)};
@@ -134,14 +134,14 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
134 addr = *track_addr; 134 addr = *track_addr;
135 } else { 135 } else {
136 addr = ConstBufferAddr{ 136 addr = ConstBufferAddr{
137 .index{env.TextureBoundBuffer()}, 137 .index = env.TextureBoundBuffer(),
138 .offset{inst.Arg(0).U32()}, 138 .offset = inst.Arg(0).U32(),
139 }; 139 };
140 } 140 }
141 return TextureInst{ 141 return TextureInst{
142 .cbuf{addr}, 142 .cbuf{addr},
143 .inst{&inst}, 143 .inst = &inst,
144 .block{block}, 144 .block = block,
145 }; 145 };
146} 146}
147 147
@@ -211,7 +211,7 @@ void TexturePass(Environment& env, IR::Program& program) {
211 211
212 const auto& cbuf{texture_inst.cbuf}; 212 const auto& cbuf{texture_inst.cbuf};
213 auto flags{inst->Flags<IR::TextureInstInfo>()}; 213 auto flags{inst->Flags<IR::TextureInstInfo>()};
214 switch (inst->Opcode()) { 214 switch (inst->GetOpcode()) {
215 case IR::Opcode::ImageQueryDimensions: 215 case IR::Opcode::ImageQueryDimensions:
216 flags.type.Assign(env.ReadTextureType(cbuf.index, cbuf.offset)); 216 flags.type.Assign(env.ReadTextureType(cbuf.index, cbuf.offset));
217 inst->SetFlags(flags); 217 inst->SetFlags(flags);
@@ -235,16 +235,16 @@ void TexturePass(Environment& env, IR::Program& program) {
235 u32 index; 235 u32 index;
236 if (flags.type == TextureType::Buffer) { 236 if (flags.type == TextureType::Buffer) {
237 index = descriptors.Add(TextureBufferDescriptor{ 237 index = descriptors.Add(TextureBufferDescriptor{
238 .cbuf_index{cbuf.index}, 238 .cbuf_index = cbuf.index,
239 .cbuf_offset{cbuf.offset}, 239 .cbuf_offset = cbuf.offset,
240 .count{1}, 240 .count = 1,
241 }); 241 });
242 } else { 242 } else {
243 index = descriptors.Add(TextureDescriptor{ 243 index = descriptors.Add(TextureDescriptor{
244 .type{flags.type}, 244 .type = flags.type,
245 .cbuf_index{cbuf.index}, 245 .cbuf_index = cbuf.index,
246 .cbuf_offset{cbuf.offset}, 246 .cbuf_offset = cbuf.offset,
247 .count{1}, 247 .count = 1,
248 }); 248 });
249 } 249 }
250 inst->SetArg(0, IR::Value{index}); 250 inst->SetArg(0, IR::Value{index});
diff --git a/src/shader_recompiler/ir_opt/verification_pass.cpp b/src/shader_recompiler/ir_opt/verification_pass.cpp
index 4080b37cc..dbec96d84 100644
--- a/src/shader_recompiler/ir_opt/verification_pass.cpp
+++ b/src/shader_recompiler/ir_opt/verification_pass.cpp
@@ -14,14 +14,14 @@ namespace Shader::Optimization {
14static void ValidateTypes(const IR::Program& program) { 14static void ValidateTypes(const IR::Program& program) {
15 for (const auto& block : program.blocks) { 15 for (const auto& block : program.blocks) {
16 for (const IR::Inst& inst : *block) { 16 for (const IR::Inst& inst : *block) {
17 if (inst.Opcode() == IR::Opcode::Phi) { 17 if (inst.GetOpcode() == IR::Opcode::Phi) {
18 // Skip validation on phi nodes 18 // Skip validation on phi nodes
19 continue; 19 continue;
20 } 20 }
21 const size_t num_args{inst.NumArgs()}; 21 const size_t num_args{inst.NumArgs()};
22 for (size_t i = 0; i < num_args; ++i) { 22 for (size_t i = 0; i < num_args; ++i) {
23 const IR::Type t1{inst.Arg(i).Type()}; 23 const IR::Type t1{inst.Arg(i).Type()};
24 const IR::Type t2{IR::ArgTypeOf(inst.Opcode(), i)}; 24 const IR::Type t2{IR::ArgTypeOf(inst.GetOpcode(), i)};
25 if (!IR::AreTypesCompatible(t1, t2)) { 25 if (!IR::AreTypesCompatible(t1, t2)) {
26 throw LogicError("Invalid types in block:\n{}", IR::DumpBlock(*block)); 26 throw LogicError("Invalid types in block:\n{}", IR::DumpBlock(*block));
27 } 27 }