summaryrefslogtreecommitdiff
path: root/src/shader_recompiler/backend/glsl
diff options
context:
space:
mode:
authorGravatar ameerj2021-05-30 17:27:00 -0400
committerGravatar ameerj2021-07-22 21:51:37 -0400
commit1269a0cf8b3844c1a9bb06c843a7698b0a9643d5 (patch)
treea0716589fa3952bdeb0f1d19b4bb455d9cdd86e5 /src/shader_recompiler/backend/glsl
parentglsl: Fix ATOM and implement ATOMS (diff)
downloadyuzu-1269a0cf8b3844c1a9bb06c843a7698b0a9643d5.tar.gz
yuzu-1269a0cf8b3844c1a9bb06c843a7698b0a9643d5.tar.xz
yuzu-1269a0cf8b3844c1a9bb06c843a7698b0a9643d5.zip
glsl: Rework variable allocator to allow for variable reuse
Diffstat (limited to 'src/shader_recompiler/backend/glsl')
-rw-r--r--src/shader_recompiler/backend/glsl/emit_context.h36
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl.cpp29
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp11
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp2
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp14
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_image.cpp26
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp10
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_memory.cpp28
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp10
-rw-r--r--src/shader_recompiler/backend/glsl/reg_alloc.cpp191
-rw-r--r--src/shader_recompiler/backend/glsl/reg_alloc.h84
-rw-r--r--src/shader_recompiler/backend/glsl/var_alloc.cpp290
-rw-r--r--src/shader_recompiler/backend/glsl/var_alloc.h100
13 files changed, 480 insertions, 351 deletions
diff --git a/src/shader_recompiler/backend/glsl/emit_context.h b/src/shader_recompiler/backend/glsl/emit_context.h
index 7ae7c8766..2f1062954 100644
--- a/src/shader_recompiler/backend/glsl/emit_context.h
+++ b/src/shader_recompiler/backend/glsl/emit_context.h
@@ -10,7 +10,7 @@
10 10
11#include <fmt/format.h> 11#include <fmt/format.h>
12 12
13#include "shader_recompiler/backend/glsl/reg_alloc.h" 13#include "shader_recompiler/backend/glsl/var_alloc.h"
14#include "shader_recompiler/stage.h" 14#include "shader_recompiler/stage.h"
15 15
16namespace Shader { 16namespace Shader {
@@ -35,81 +35,81 @@ public:
35 explicit EmitContext(IR::Program& program, Bindings& bindings, const Profile& profile_, 35 explicit EmitContext(IR::Program& program, Bindings& bindings, const Profile& profile_,
36 const RuntimeInfo& runtime_info_); 36 const RuntimeInfo& runtime_info_);
37 37
38 template <Type type, typename... Args> 38 template <GlslVarType type, typename... Args>
39 void Add(const char* format_str, IR::Inst& inst, Args&&... args) { 39 void Add(const char* format_str, IR::Inst& inst, Args&&... args) {
40 code += fmt::format(format_str, reg_alloc.Define(inst, type), std::forward<Args>(args)...); 40 code += fmt::format(format_str, var_alloc.Define(inst, type), std::forward<Args>(args)...);
41 // TODO: Remove this 41 // TODO: Remove this
42 code += '\n'; 42 code += '\n';
43 } 43 }
44 44
45 template <typename... Args> 45 template <typename... Args>
46 void AddU1(const char* format_str, IR::Inst& inst, Args&&... args) { 46 void AddU1(const char* format_str, IR::Inst& inst, Args&&... args) {
47 Add<Type::U1>(format_str, inst, args...); 47 Add<GlslVarType::U1>(format_str, inst, args...);
48 } 48 }
49 49
50 template <typename... Args> 50 template <typename... Args>
51 void AddF16x2(const char* format_str, IR::Inst& inst, Args&&... args) { 51 void AddF16x2(const char* format_str, IR::Inst& inst, Args&&... args) {
52 Add<Type::F16x2>(format_str, inst, args...); 52 Add<GlslVarType::F16x2>(format_str, inst, args...);
53 } 53 }
54 54
55 template <typename... Args> 55 template <typename... Args>
56 void AddU32(const char* format_str, IR::Inst& inst, Args&&... args) { 56 void AddU32(const char* format_str, IR::Inst& inst, Args&&... args) {
57 Add<Type::U32>(format_str, inst, args...); 57 Add<GlslVarType::U32>(format_str, inst, args...);
58 } 58 }
59 59
60 template <typename... Args> 60 template <typename... Args>
61 void AddS32(const char* format_str, IR::Inst& inst, Args&&... args) { 61 void AddS32(const char* format_str, IR::Inst& inst, Args&&... args) {
62 Add<Type::S32>(format_str, inst, args...); 62 Add<GlslVarType::S32>(format_str, inst, args...);
63 } 63 }
64 64
65 template <typename... Args> 65 template <typename... Args>
66 void AddF32(const char* format_str, IR::Inst& inst, Args&&... args) { 66 void AddF32(const char* format_str, IR::Inst& inst, Args&&... args) {
67 Add<Type::F32>(format_str, inst, args...); 67 Add<GlslVarType::F32>(format_str, inst, args...);
68 } 68 }
69 69
70 template <typename... Args> 70 template <typename... Args>
71 void AddS64(const char* format_str, IR::Inst& inst, Args&&... args) { 71 void AddS64(const char* format_str, IR::Inst& inst, Args&&... args) {
72 Add<Type::S64>(format_str, inst, args...); 72 Add<GlslVarType::S64>(format_str, inst, args...);
73 } 73 }
74 74
75 template <typename... Args> 75 template <typename... Args>
76 void AddU64(const char* format_str, IR::Inst& inst, Args&&... args) { 76 void AddU64(const char* format_str, IR::Inst& inst, Args&&... args) {
77 Add<Type::U64>(format_str, inst, args...); 77 Add<GlslVarType::U64>(format_str, inst, args...);
78 } 78 }
79 79
80 template <typename... Args> 80 template <typename... Args>
81 void AddF64(const char* format_str, IR::Inst& inst, Args&&... args) { 81 void AddF64(const char* format_str, IR::Inst& inst, Args&&... args) {
82 Add<Type::F64>(format_str, inst, args...); 82 Add<GlslVarType::F64>(format_str, inst, args...);
83 } 83 }
84 84
85 template <typename... Args> 85 template <typename... Args>
86 void AddU32x2(const char* format_str, IR::Inst& inst, Args&&... args) { 86 void AddU32x2(const char* format_str, IR::Inst& inst, Args&&... args) {
87 Add<Type::U32x2>(format_str, inst, args...); 87 Add<GlslVarType::U32x2>(format_str, inst, args...);
88 } 88 }
89 89
90 template <typename... Args> 90 template <typename... Args>
91 void AddF32x2(const char* format_str, IR::Inst& inst, Args&&... args) { 91 void AddF32x2(const char* format_str, IR::Inst& inst, Args&&... args) {
92 Add<Type::F32x2>(format_str, inst, args...); 92 Add<GlslVarType::F32x2>(format_str, inst, args...);
93 } 93 }
94 94
95 template <typename... Args> 95 template <typename... Args>
96 void AddU32x3(const char* format_str, IR::Inst& inst, Args&&... args) { 96 void AddU32x3(const char* format_str, IR::Inst& inst, Args&&... args) {
97 Add<Type::U32x3>(format_str, inst, args...); 97 Add<GlslVarType::U32x3>(format_str, inst, args...);
98 } 98 }
99 99
100 template <typename... Args> 100 template <typename... Args>
101 void AddF32x3(const char* format_str, IR::Inst& inst, Args&&... args) { 101 void AddF32x3(const char* format_str, IR::Inst& inst, Args&&... args) {
102 Add<Type::F32x3>(format_str, inst, args...); 102 Add<GlslVarType::F32x3>(format_str, inst, args...);
103 } 103 }
104 104
105 template <typename... Args> 105 template <typename... Args>
106 void AddU32x4(const char* format_str, IR::Inst& inst, Args&&... args) { 106 void AddU32x4(const char* format_str, IR::Inst& inst, Args&&... args) {
107 Add<Type::U32x4>(format_str, inst, args...); 107 Add<GlslVarType::U32x4>(format_str, inst, args...);
108 } 108 }
109 109
110 template <typename... Args> 110 template <typename... Args>
111 void AddF32x4(const char* format_str, IR::Inst& inst, Args&&... args) { 111 void AddF32x4(const char* format_str, IR::Inst& inst, Args&&... args) {
112 Add<Type::F32x4>(format_str, inst, args...); 112 Add<GlslVarType::F32x4>(format_str, inst, args...);
113 } 113 }
114 114
115 template <typename... Args> 115 template <typename... Args>
@@ -121,7 +121,7 @@ public:
121 121
122 std::string header; 122 std::string header;
123 std::string code; 123 std::string code;
124 RegAlloc reg_alloc; 124 VarAlloc var_alloc;
125 const Info& info; 125 const Info& info;
126 const Profile& profile; 126 const Profile& profile;
127 const RuntimeInfo& runtime_info; 127 const RuntimeInfo& runtime_info;
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl.cpp b/src/shader_recompiler/backend/glsl/emit_glsl.cpp
index bac4b830d..4304ee4d5 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl.cpp
@@ -33,7 +33,7 @@ void SetDefinition(EmitContext& ctx, IR::Inst* inst, Args... args) {
33template <typename ArgType> 33template <typename ArgType>
34auto Arg(EmitContext& ctx, const IR::Value& arg) { 34auto Arg(EmitContext& ctx, const IR::Value& arg) {
35 if constexpr (std::is_same_v<ArgType, std::string_view>) { 35 if constexpr (std::is_same_v<ArgType, std::string_view>) {
36 return ctx.reg_alloc.Consume(arg); 36 return ctx.var_alloc.Consume(arg);
37 } else if constexpr (std::is_same_v<ArgType, const IR::Value&>) { 37 } else if constexpr (std::is_same_v<ArgType, const IR::Value&>) {
38 return arg; 38 return arg;
39 } else if constexpr (std::is_same_v<ArgType, u32>) { 39 } else if constexpr (std::is_same_v<ArgType, u32>) {
@@ -131,7 +131,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) {
131 } 131 }
132 break; 132 break;
133 case IR::AbstractSyntaxNode::Type::If: 133 case IR::AbstractSyntaxNode::Type::If:
134 ctx.Add("if ({}){{", ctx.reg_alloc.Consume(node.data.if_node.cond)); 134 ctx.Add("if ({}){{", ctx.var_alloc.Consume(node.data.if_node.cond));
135 break; 135 break;
136 case IR::AbstractSyntaxNode::Type::EndIf: 136 case IR::AbstractSyntaxNode::Type::EndIf:
137 ctx.Add("}}"); 137 ctx.Add("}}");
@@ -142,7 +142,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) {
142 ctx.Add("break;"); 142 ctx.Add("break;");
143 } 143 }
144 } else { 144 } else {
145 ctx.Add("if({}){{break;}}", ctx.reg_alloc.Consume(node.data.break_node.cond)); 145 ctx.Add("if({}){{break;}}", ctx.var_alloc.Consume(node.data.break_node.cond));
146 } 146 }
147 break; 147 break;
148 case IR::AbstractSyntaxNode::Type::Return: 148 case IR::AbstractSyntaxNode::Type::Return:
@@ -153,7 +153,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) {
153 ctx.Add("for(;;){{"); 153 ctx.Add("for(;;){{");
154 break; 154 break;
155 case IR::AbstractSyntaxNode::Type::Repeat: 155 case IR::AbstractSyntaxNode::Type::Repeat:
156 ctx.Add("if({}){{", ctx.reg_alloc.Consume(node.data.repeat.cond)); 156 ctx.Add("if({}){{", ctx.var_alloc.Consume(node.data.repeat.cond));
157 ctx.Add("continue;\n}}else{{"); 157 ctx.Add("continue;\n}}else{{");
158 ctx.Add("break;\n}}\n}}"); 158 ctx.Add("break;\n}}\n}}");
159 break; 159 break;
@@ -171,6 +171,23 @@ std::string GlslVersionSpecifier(const EmitContext& ctx) {
171 } 171 }
172 return ""; 172 return "";
173} 173}
174
175void DefineVariables(const EmitContext& ctx, std::string& header) {
176 for (u32 i = 0; i < static_cast<u32>(GlslVarType::Void); ++i) {
177 const auto type{static_cast<GlslVarType>(i)};
178 const auto& tracker{ctx.var_alloc.GetUseTracker(type)};
179 const auto type_name{ctx.var_alloc.GetGlslType(type)};
180 // Temps/return types that are never used are stored at index 0
181 if (tracker.uses_temp) {
182 header += fmt::format("{}{}={}(0);", type_name, ctx.var_alloc.Representation(0, type),
183 type_name);
184 }
185 for (u32 index = 1; index <= tracker.num_used; ++index) {
186 header += fmt::format("{}{}={}(0);", type_name,
187 ctx.var_alloc.Representation(index, type), type_name);
188 }
189 }
190}
174} // Anonymous namespace 191} // Anonymous namespace
175 192
176std::string EmitGLSL(const Profile& profile, const RuntimeInfo& runtime_info, IR::Program& program, 193std::string EmitGLSL(const Profile& profile, const RuntimeInfo& runtime_info, IR::Program& program,
@@ -190,9 +207,7 @@ std::string EmitGLSL(const Profile& profile, const RuntimeInfo& runtime_info, IR
190 if (program.stage == Stage::VertexA || program.stage == Stage::VertexB) { 207 if (program.stage == Stage::VertexA || program.stage == Stage::VertexB) {
191 ctx.header += "gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);"; 208 ctx.header += "gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);";
192 } 209 }
193 for (size_t index = 0; index < ctx.reg_alloc.num_used_registers; ++index) { 210 DefineVariables(ctx, ctx.header);
194 ctx.header += fmt::format("{} R{};", ctx.reg_alloc.reg_types[index], index);
195 }
196 if (ctx.uses_cc_carry) { 211 if (ctx.uses_cc_carry) {
197 ctx.header += "uint carry;"; 212 ctx.header += "uint carry;";
198 } 213 }
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
index 16791be84..918f90058 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
@@ -20,14 +20,14 @@ for (;;){{
20 20
21void SharedCasFunction(EmitContext& ctx, IR::Inst& inst, std::string_view offset, 21void SharedCasFunction(EmitContext& ctx, IR::Inst& inst, std::string_view offset,
22 std::string_view value, std::string_view function) { 22 std::string_view value, std::string_view function) {
23 const auto ret{ctx.reg_alloc.Define(inst, Type::U32)}; 23 const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
24 const std::string smem{fmt::format("smem[{}/4]", offset)}; 24 const std::string smem{fmt::format("smem[{}/4]", offset)};
25 ctx.Add(cas_loop.data(), ret, smem, ret, smem, function, smem, value, ret); 25 ctx.Add(cas_loop.data(), ret, smem, ret, smem, function, smem, value, ret);
26} 26}
27 27
28void SsboCasFunction(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 28void SsboCasFunction(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
29 const IR::Value& offset, std::string_view value, std::string_view function) { 29 const IR::Value& offset, std::string_view value, std::string_view function) {
30 const auto ret{ctx.reg_alloc.Define(inst, Type::U32)}; 30 const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
31 const std::string ssbo{fmt::format("ssbo{}[{}]", binding.U32(), offset.U32())}; 31 const std::string ssbo{fmt::format("ssbo{}[{}]", binding.U32(), offset.U32())};
32 ctx.Add(cas_loop.data(), ret, ssbo, ret, ssbo, function, ssbo, value, ret); 32 ctx.Add(cas_loop.data(), ret, ssbo, ret, ssbo, function, ssbo, value, ret);
33} 33}
@@ -36,7 +36,7 @@ void SsboCasFunctionF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& bindi
36 const IR::Value& offset, std::string_view value, 36 const IR::Value& offset, std::string_view value,
37 std::string_view function) { 37 std::string_view function) {
38 const std::string ssbo{fmt::format("ssbo{}[{}]", binding.U32(), offset.U32())}; 38 const std::string ssbo{fmt::format("ssbo{}[{}]", binding.U32(), offset.U32())};
39 const auto ret{ctx.reg_alloc.Define(inst, Type::U32)}; 39 const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
40 ctx.Add(cas_loop.data(), ret, ssbo, ret, ssbo, function, ssbo, value, ret); 40 ctx.Add(cas_loop.data(), ret, ssbo, ret, ssbo, function, ssbo, value, ret);
41 ctx.AddF32("{}=uintBitsToFloat({});", inst, ret); 41 ctx.AddF32("{}=uintBitsToFloat({});", inst, ret);
42} 42}
@@ -102,9 +102,8 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_vi
102void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset, 102void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
103 std::string_view value) { 103 std::string_view value) {
104 // LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic"); 104 // LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic");
105 const auto ret{ctx.reg_alloc.Define(inst, Type::U64)}; 105 ctx.AddU64("{}=packUint2x32(uvec2(smem[{}/4],smem[({}+4)/4]));", inst, pointer_offset,
106 ctx.Add("{}=packUint2x32(uvec2(smem[{}/4],smem[({}+4)/4]));", ret, pointer_offset, 106 pointer_offset);
107 pointer_offset);
108 ctx.Add("smem[{}/4]=unpackUint2x32({}).x;smem[({}+4)/4]=unpackUint2x32({}).y;", pointer_offset, 107 ctx.Add("smem[{}/4]=unpackUint2x32({}).x;smem[({}+4)/4]=unpackUint2x32({}).y;", pointer_offset,
109 value, pointer_offset, value); 108 value, pointer_offset, value);
110} 109}
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp
index 3e8c648b1..2b08aa593 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp
@@ -26,7 +26,7 @@ void EmitIdentity(EmitContext&, IR::Inst& inst, const IR::Value& value) {
26} 26}
27 27
28void EmitConditionRef(EmitContext& ctx, IR::Inst& inst, const IR::Value& value) { 28void EmitConditionRef(EmitContext& ctx, IR::Inst& inst, const IR::Value& value) {
29 ctx.AddU1("{}={};", inst, ctx.reg_alloc.Consume(value)); 29 ctx.AddU1("{}={};", inst, ctx.var_alloc.Consume(value));
30} 30}
31 31
32void EmitBitCastU16F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst) { 32void EmitBitCastU16F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst) {
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
index 46ce413bf..8688686e8 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
@@ -29,7 +29,7 @@ void EmitGetCbufU8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst&
29 ctx.stage_name, binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()), 29 ctx.stage_name, binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()),
30 (offset.U32() % 4) * 8); 30 (offset.U32() % 4) * 8);
31 } else { 31 } else {
32 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 32 const auto offset_var{ctx.var_alloc.Consume(offset)};
33 ctx.AddU32( 33 ctx.AddU32(
34 "{}=bitfieldExtract(floatBitsToUint({}_cbuf{}[{}/16][({}/4)%4]),int(({}%4)*8),8);", 34 "{}=bitfieldExtract(floatBitsToUint({}_cbuf{}[{}/16][({}/4)%4]),int(({}%4)*8),8);",
35 inst, ctx.stage_name, binding.U32(), offset_var, offset_var, offset_var); 35 inst, ctx.stage_name, binding.U32(), offset_var, offset_var, offset_var);
@@ -44,7 +44,7 @@ void EmitGetCbufS8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst&
44 ctx.stage_name, binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()), 44 ctx.stage_name, binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()),
45 (offset.U32() % 4) * 8); 45 (offset.U32() % 4) * 8);
46 } else { 46 } else {
47 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 47 const auto offset_var{ctx.var_alloc.Consume(offset)};
48 ctx.AddU32( 48 ctx.AddU32(
49 "{}=bitfieldExtract(floatBitsToInt({}_cbuf{}[{}/16][({}/4)%4]),int(({}%4)*8),8);", inst, 49 "{}=bitfieldExtract(floatBitsToInt({}_cbuf{}[{}/16][({}/4)%4]),int(({}%4)*8),8);", inst,
50 ctx.stage_name, binding.U32(), offset_var, offset_var, offset_var); 50 ctx.stage_name, binding.U32(), offset_var, offset_var, offset_var);
@@ -59,7 +59,7 @@ void EmitGetCbufU16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst
59 ctx.stage_name, binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()), 59 ctx.stage_name, binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()),
60 ((offset.U32() / 2) % 2) * 16); 60 ((offset.U32() / 2) % 2) * 16);
61 } else { 61 } else {
62 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 62 const auto offset_var{ctx.var_alloc.Consume(offset)};
63 ctx.AddU32("{}=bitfieldExtract(floatBitsToUint({}_cbuf{}[{}/16][({}/4)%4]),int((({}/" 63 ctx.AddU32("{}=bitfieldExtract(floatBitsToUint({}_cbuf{}[{}/16][({}/4)%4]),int((({}/"
64 "2)%2)*16),16);", 64 "2)%2)*16),16);",
65 inst, ctx.stage_name, binding.U32(), offset_var, offset_var, offset_var); 65 inst, ctx.stage_name, binding.U32(), offset_var, offset_var, offset_var);
@@ -74,7 +74,7 @@ void EmitGetCbufS16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst
74 ctx.stage_name, binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()), 74 ctx.stage_name, binding.U32(), offset.U32() / 16, OffsetSwizzle(offset.U32()),
75 ((offset.U32() / 2) % 2) * 16); 75 ((offset.U32() / 2) % 2) * 16);
76 } else { 76 } else {
77 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 77 const auto offset_var{ctx.var_alloc.Consume(offset)};
78 ctx.AddU32( 78 ctx.AddU32(
79 "{}=bitfieldExtract(floatBitsToInt({}_cbuf{}[{}/16][({}/4)%4]),int((({}/2)%2)*16),16);", 79 "{}=bitfieldExtract(floatBitsToInt({}_cbuf{}[{}/16][({}/4)%4]),int((({}/2)%2)*16),16);",
80 inst, ctx.stage_name, binding.U32(), offset_var, offset_var, offset_var); 80 inst, ctx.stage_name, binding.U32(), offset_var, offset_var, offset_var);
@@ -87,7 +87,7 @@ void EmitGetCbufU32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
87 ctx.AddU32("{}=floatBitsToUint({}_cbuf{}[{}].{});", inst, ctx.stage_name, binding.U32(), 87 ctx.AddU32("{}=floatBitsToUint({}_cbuf{}[{}].{});", inst, ctx.stage_name, binding.U32(),
88 offset.U32() / 16, OffsetSwizzle(offset.U32())); 88 offset.U32() / 16, OffsetSwizzle(offset.U32()));
89 } else { 89 } else {
90 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 90 const auto offset_var{ctx.var_alloc.Consume(offset)};
91 ctx.AddU32("{}=floatBitsToUint({}_cbuf{}[{}/16][({}/4)%4]);", inst, ctx.stage_name, 91 ctx.AddU32("{}=floatBitsToUint({}_cbuf{}[{}/16][({}/4)%4]);", inst, ctx.stage_name,
92 binding.U32(), offset_var, offset_var); 92 binding.U32(), offset_var, offset_var);
93 } 93 }
@@ -99,7 +99,7 @@ void EmitGetCbufF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
99 ctx.AddF32("{}={}_cbuf{}[{}].{};", inst, ctx.stage_name, binding.U32(), offset.U32() / 16, 99 ctx.AddF32("{}={}_cbuf{}[{}].{};", inst, ctx.stage_name, binding.U32(), offset.U32() / 16,
100 OffsetSwizzle(offset.U32())); 100 OffsetSwizzle(offset.U32()));
101 } else { 101 } else {
102 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 102 const auto offset_var{ctx.var_alloc.Consume(offset)};
103 ctx.AddF32("{}={}_cbuf{}[{}/16][({}/4)%4];", inst, ctx.stage_name, binding.U32(), 103 ctx.AddF32("{}={}_cbuf{}[{}/16][({}/4)%4];", inst, ctx.stage_name, binding.U32(),
104 offset_var, offset_var); 104 offset_var, offset_var);
105 } 105 }
@@ -114,7 +114,7 @@ void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding
114 ctx.stage_name, binding.U32(), (offset.U32() + 4) / 16, 114 ctx.stage_name, binding.U32(), (offset.U32() + 4) / 16,
115 OffsetSwizzle(offset.U32() + 4)); 115 OffsetSwizzle(offset.U32() + 4));
116 } else { 116 } else {
117 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 117 const auto offset_var{ctx.var_alloc.Consume(offset)};
118 ctx.AddU32x2("{}=uvec2(floatBitsToUint({}_cbuf{}[{}/16][({}/" 118 ctx.AddU32x2("{}=uvec2(floatBitsToUint({}_cbuf{}[{}/16][({}/"
119 "4)%4]),floatBitsToUint({}_cbuf{}[({}+4)/16][(({}+4)/4)%4]));", 119 "4)%4]),floatBitsToUint({}_cbuf{}[({}+4)/16][(({}+4)/4)%4]));",
120 inst, ctx.stage_name, binding.U32(), offset_var, offset_var, ctx.stage_name, 120 inst, ctx.stage_name, binding.U32(), offset_var, offset_var, ctx.stage_name,
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
index e63e3f2bd..eb427d8b5 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
@@ -104,12 +104,12 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse
104 } 104 }
105 const auto texture{Texture(ctx, info, index)}; 105 const auto texture{Texture(ctx, info, index)};
106 const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; 106 const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""};
107 const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; 107 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
108 const auto sparse_inst{PrepareSparse(inst)}; 108 const auto sparse_inst{PrepareSparse(inst)};
109 if (!sparse_inst) { 109 if (!sparse_inst) {
110 if (!offset.IsEmpty()) { 110 if (!offset.IsEmpty()) {
111 ctx.Add("{}=textureOffset({},{},{}{});", texel, texture, coords, 111 ctx.Add("{}=textureOffset({},{},{}{});", texel, texture, coords,
112 CastToIntVec(ctx.reg_alloc.Consume(offset), info), bias); 112 CastToIntVec(ctx.var_alloc.Consume(offset), info), bias);
113 } else { 113 } else {
114 if (ctx.stage == Stage::Fragment) { 114 if (ctx.stage == Stage::Fragment) {
115 ctx.Add("{}=texture({},{}{});", texel, texture, coords, bias); 115 ctx.Add("{}=texture({},{}{});", texel, texture, coords, bias);
@@ -122,7 +122,7 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse
122 // TODO: Query sparseTexels extension support 122 // TODO: Query sparseTexels extension support
123 if (!offset.IsEmpty()) { 123 if (!offset.IsEmpty()) {
124 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureOffsetARB({},{},{},{}{}));", 124 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureOffsetARB({},{},{},{}{}));",
125 *sparse_inst, texture, coords, CastToIntVec(ctx.reg_alloc.Consume(offset), info), 125 *sparse_inst, texture, coords, CastToIntVec(ctx.var_alloc.Consume(offset), info),
126 texel, bias); 126 texel, bias);
127 } else { 127 } else {
128 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureARB({},{},{}{}));", *sparse_inst, 128 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureARB({},{},{}{}));", *sparse_inst,
@@ -143,12 +143,12 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse
143 throw NotImplementedException("Lod clamp samples"); 143 throw NotImplementedException("Lod clamp samples");
144 } 144 }
145 const auto texture{Texture(ctx, info, index)}; 145 const auto texture{Texture(ctx, info, index)};
146 const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; 146 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
147 const auto sparse_inst{PrepareSparse(inst)}; 147 const auto sparse_inst{PrepareSparse(inst)};
148 if (!sparse_inst) { 148 if (!sparse_inst) {
149 if (!offset.IsEmpty()) { 149 if (!offset.IsEmpty()) {
150 ctx.Add("{}=textureLodOffset({},{},{},{});", texel, texture, coords, lod_lc, 150 ctx.Add("{}=textureLodOffset({},{},{},{});", texel, texture, coords, lod_lc,
151 CastToIntVec(ctx.reg_alloc.Consume(offset), info)); 151 CastToIntVec(ctx.var_alloc.Consume(offset), info));
152 } else { 152 } else {
153 ctx.Add("{}=textureLod({},{},{});", texel, texture, coords, lod_lc); 153 ctx.Add("{}=textureLod({},{},{});", texel, texture, coords, lod_lc);
154 } 154 }
@@ -158,7 +158,7 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse
158 if (!offset.IsEmpty()) { 158 if (!offset.IsEmpty()) {
159 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));", 159 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));",
160 *sparse_inst, texture, CastToIntVec(coords, info), lod_lc, 160 *sparse_inst, texture, CastToIntVec(coords, info), lod_lc,
161 CastToIntVec(ctx.reg_alloc.Consume(offset), info), texel); 161 CastToIntVec(ctx.var_alloc.Consume(offset), info), texel);
162 } else { 162 } else {
163 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureLodARB({},{},{},{}));", *sparse_inst, 163 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureLodARB({},{},{},{}));", *sparse_inst,
164 texture, coords, lod_lc, texel); 164 texture, coords, lod_lc, texel);
@@ -232,7 +232,7 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins
232 [[maybe_unused]] const IR::Value& offset2) { 232 [[maybe_unused]] const IR::Value& offset2) {
233 const auto info{inst.Flags<IR::TextureInstInfo>()}; 233 const auto info{inst.Flags<IR::TextureInstInfo>()};
234 const auto texture{Texture(ctx, info, index)}; 234 const auto texture{Texture(ctx, info, index)};
235 const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; 235 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
236 const auto sparse_inst{PrepareSparse(inst)}; 236 const auto sparse_inst{PrepareSparse(inst)};
237 if (!sparse_inst) { 237 if (!sparse_inst) {
238 if (offset.IsEmpty()) { 238 if (offset.IsEmpty()) {
@@ -242,7 +242,7 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins
242 } 242 }
243 if (offset2.IsEmpty()) { 243 if (offset2.IsEmpty()) {
244 ctx.Add("{}=textureGatherOffset({},{},{},int({}));", texel, texture, coords, 244 ctx.Add("{}=textureGatherOffset({},{},{},int({}));", texel, texture, coords,
245 CastToIntVec(ctx.reg_alloc.Consume(offset), info), info.gather_component); 245 CastToIntVec(ctx.var_alloc.Consume(offset), info), info.gather_component);
246 return; 246 return;
247 } 247 }
248 // PTP 248 // PTP
@@ -259,7 +259,7 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins
259 if (offset2.IsEmpty()) { 259 if (offset2.IsEmpty()) {
260 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},{},int({})));", 260 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},{},int({})));",
261 *sparse_inst, texture, CastToIntVec(coords, info), 261 *sparse_inst, texture, CastToIntVec(coords, info),
262 CastToIntVec(ctx.reg_alloc.Consume(offset), info), texel, info.gather_component); 262 CastToIntVec(ctx.var_alloc.Consume(offset), info), texel, info.gather_component);
263 } 263 }
264 // PTP 264 // PTP
265 const auto offsets{PtpOffsets(offset, offset2)}; 265 const auto offsets{PtpOffsets(offset, offset2)};
@@ -276,7 +276,7 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR:
276 [[maybe_unused]] std::string_view dref) { 276 [[maybe_unused]] std::string_view dref) {
277 const auto info{inst.Flags<IR::TextureInstInfo>()}; 277 const auto info{inst.Flags<IR::TextureInstInfo>()};
278 const auto texture{Texture(ctx, info, index)}; 278 const auto texture{Texture(ctx, info, index)};
279 const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; 279 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
280 const auto sparse_inst{PrepareSparse(inst)}; 280 const auto sparse_inst{PrepareSparse(inst)};
281 if (!sparse_inst) { 281 if (!sparse_inst) {
282 if (offset.IsEmpty()) { 282 if (offset.IsEmpty()) {
@@ -285,7 +285,7 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR:
285 } 285 }
286 if (offset2.IsEmpty()) { 286 if (offset2.IsEmpty()) {
287 ctx.Add("{}=textureGatherOffset({},{},{},{});", texel, texture, coords, dref, 287 ctx.Add("{}=textureGatherOffset({},{},{},{});", texel, texture, coords, dref,
288 CastToIntVec(ctx.reg_alloc.Consume(offset), info)); 288 CastToIntVec(ctx.var_alloc.Consume(offset), info));
289 return; 289 return;
290 } 290 }
291 // PTP 291 // PTP
@@ -301,7 +301,7 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR:
301 if (offset2.IsEmpty()) { 301 if (offset2.IsEmpty()) {
302 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},,{},{}));", 302 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},,{},{}));",
303 *sparse_inst, texture, CastToIntVec(coords, info), dref, 303 *sparse_inst, texture, CastToIntVec(coords, info), dref,
304 CastToIntVec(ctx.reg_alloc.Consume(offset), info), texel); 304 CastToIntVec(ctx.var_alloc.Consume(offset), info), texel);
305 } 305 }
306 // PTP 306 // PTP
307 const auto offsets{PtpOffsets(offset, offset2)}; 307 const auto offsets{PtpOffsets(offset, offset2)};
@@ -323,7 +323,7 @@ void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst
323 } 323 }
324 const auto texture{Texture(ctx, info, index)}; 324 const auto texture{Texture(ctx, info, index)};
325 const auto sparse_inst{PrepareSparse(inst)}; 325 const auto sparse_inst{PrepareSparse(inst)};
326 const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; 326 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
327 if (!sparse_inst) { 327 if (!sparse_inst) {
328 if (!offset.empty()) { 328 if (!offset.empty()) {
329 ctx.Add("{}=texelFetchOffset({},{},int({}),{});", texel, texture, 329 ctx.Add("{}=texelFetchOffset({},{},int({}),{});", texel, texture,
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp
index 4a3d66c90..1c7413cd4 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp
@@ -29,7 +29,7 @@ void SetSignFlag(EmitContext& ctx, IR::Inst& inst, std::string_view result) {
29} 29}
30} // Anonymous namespace 30} // Anonymous namespace
31void EmitIAdd32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) { 31void EmitIAdd32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
32 const auto result{ctx.reg_alloc.Define(inst, Type::U32)}; 32 const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
33 if (IR::Inst* const carry{inst.GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp)}) { 33 if (IR::Inst* const carry{inst.GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp)}) {
34 ctx.uses_cc_carry = true; 34 ctx.uses_cc_carry = true;
35 ctx.Add("{}=uaddCarry({},{},carry);", result, a, b); 35 ctx.Add("{}=uaddCarry({},{},carry);", result, a, b);
@@ -130,7 +130,7 @@ void EmitBitFieldInsert(EmitContext& ctx, IR::Inst& inst, std::string_view base,
130 130
131void EmitBitFieldSExtract(EmitContext& ctx, IR::Inst& inst, std::string_view base, 131void EmitBitFieldSExtract(EmitContext& ctx, IR::Inst& inst, std::string_view base,
132 std::string_view offset, std::string_view count) { 132 std::string_view offset, std::string_view count) {
133 const auto result{ctx.reg_alloc.Define(inst, Type::U32)}; 133 const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
134 ctx.Add("{}=uint(bitfieldExtract(int({}),int({}),int({})));", result, base, offset, count); 134 ctx.Add("{}=uint(bitfieldExtract(int({}),int({}),int({})));", result, base, offset, count);
135 SetZeroFlag(ctx, inst, result); 135 SetZeroFlag(ctx, inst, result);
136 SetSignFlag(ctx, inst, result); 136 SetSignFlag(ctx, inst, result);
@@ -138,7 +138,7 @@ void EmitBitFieldSExtract(EmitContext& ctx, IR::Inst& inst, std::string_view bas
138 138
139void EmitBitFieldUExtract(EmitContext& ctx, IR::Inst& inst, std::string_view base, 139void EmitBitFieldUExtract(EmitContext& ctx, IR::Inst& inst, std::string_view base,
140 std::string_view offset, std::string_view count) { 140 std::string_view offset, std::string_view count) {
141 const auto result{ctx.reg_alloc.Define(inst, Type::U32)}; 141 const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
142 ctx.Add("{}=uint(bitfieldExtract(uint({}),int({}),int({})));", result, base, offset, count); 142 ctx.Add("{}=uint(bitfieldExtract(uint({}),int({}),int({})));", result, base, offset, count);
143 SetZeroFlag(ctx, inst, result); 143 SetZeroFlag(ctx, inst, result);
144 SetSignFlag(ctx, inst, result); 144 SetSignFlag(ctx, inst, result);
@@ -184,7 +184,7 @@ void EmitUMax32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::strin
184 184
185void EmitSClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view min, 185void EmitSClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view min,
186 std::string_view max) { 186 std::string_view max) {
187 const auto result{ctx.reg_alloc.Define(inst, Type::U32)}; 187 const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
188 ctx.Add("{}=clamp(int({}),int({}),int({}));", result, value, min, max); 188 ctx.Add("{}=clamp(int({}),int({}),int({}));", result, value, min, max);
189 SetZeroFlag(ctx, inst, result); 189 SetZeroFlag(ctx, inst, result);
190 SetSignFlag(ctx, inst, result); 190 SetSignFlag(ctx, inst, result);
@@ -192,7 +192,7 @@ void EmitSClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value, std:
192 192
193void EmitUClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view min, 193void EmitUClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view min,
194 std::string_view max) { 194 std::string_view max) {
195 const auto result{ctx.reg_alloc.Define(inst, Type::U32)}; 195 const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
196 ctx.Add("{}=clamp(uint({}),uint({}),uint({}));", result, value, min, max); 196 ctx.Add("{}=clamp(uint({}),uint({}),uint({}));", result, value, min, max);
197 SetZeroFlag(ctx, inst, result); 197 SetZeroFlag(ctx, inst, result);
198 SetSignFlag(ctx, inst, result); 198 SetSignFlag(ctx, inst, result);
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_memory.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_memory.cpp
index c66b7b788..8ce186733 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_memory.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_memory.cpp
@@ -12,7 +12,7 @@ namespace Shader::Backend::GLSL {
12void EmitLoadStorageU8([[maybe_unused]] EmitContext& ctx, IR::Inst& inst, 12void EmitLoadStorageU8([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
13 [[maybe_unused]] const IR::Value& binding, 13 [[maybe_unused]] const IR::Value& binding,
14 [[maybe_unused]] const IR::Value& offset) { 14 [[maybe_unused]] const IR::Value& offset) {
15 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 15 const auto offset_var{ctx.var_alloc.Consume(offset)};
16 ctx.AddU32("{}=bitfieldExtract(ssbo{}[{}/4],int({}%4)*8,8);", inst, binding.U32(), offset_var, 16 ctx.AddU32("{}=bitfieldExtract(ssbo{}[{}/4],int({}%4)*8,8);", inst, binding.U32(), offset_var,
17 offset_var); 17 offset_var);
18} 18}
@@ -20,7 +20,7 @@ void EmitLoadStorageU8([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
20void EmitLoadStorageS8([[maybe_unused]] EmitContext& ctx, IR::Inst& inst, 20void EmitLoadStorageS8([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
21 [[maybe_unused]] const IR::Value& binding, 21 [[maybe_unused]] const IR::Value& binding,
22 [[maybe_unused]] const IR::Value& offset) { 22 [[maybe_unused]] const IR::Value& offset) {
23 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 23 const auto offset_var{ctx.var_alloc.Consume(offset)};
24 ctx.AddS32("{}=bitfieldExtract(int(ssbo{}[{}/4]),int({}%4)*8,8);", inst, binding.U32(), 24 ctx.AddS32("{}=bitfieldExtract(int(ssbo{}[{}/4]),int({}%4)*8,8);", inst, binding.U32(),
25 offset_var, offset_var); 25 offset_var, offset_var);
26} 26}
@@ -28,7 +28,7 @@ void EmitLoadStorageS8([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
28void EmitLoadStorageU16([[maybe_unused]] EmitContext& ctx, IR::Inst& inst, 28void EmitLoadStorageU16([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
29 [[maybe_unused]] const IR::Value& binding, 29 [[maybe_unused]] const IR::Value& binding,
30 [[maybe_unused]] const IR::Value& offset) { 30 [[maybe_unused]] const IR::Value& offset) {
31 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 31 const auto offset_var{ctx.var_alloc.Consume(offset)};
32 ctx.AddU32("{}=bitfieldExtract(ssbo{}[{}/4],int(({}/2)%2)*16,16);", inst, binding.U32(), 32 ctx.AddU32("{}=bitfieldExtract(ssbo{}[{}/4],int(({}/2)%2)*16,16);", inst, binding.U32(),
33 offset_var, offset_var); 33 offset_var, offset_var);
34} 34}
@@ -36,27 +36,27 @@ void EmitLoadStorageU16([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
36void EmitLoadStorageS16([[maybe_unused]] EmitContext& ctx, IR::Inst& inst, 36void EmitLoadStorageS16([[maybe_unused]] EmitContext& ctx, IR::Inst& inst,
37 [[maybe_unused]] const IR::Value& binding, 37 [[maybe_unused]] const IR::Value& binding,
38 [[maybe_unused]] const IR::Value& offset) { 38 [[maybe_unused]] const IR::Value& offset) {
39 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 39 const auto offset_var{ctx.var_alloc.Consume(offset)};
40 ctx.AddS32("{}=bitfieldExtract(int(ssbo{}[{}/4]),int(({}/2)%2)*16,16);", inst, binding.U32(), 40 ctx.AddS32("{}=bitfieldExtract(int(ssbo{}[{}/4]),int(({}/2)%2)*16,16);", inst, binding.U32(),
41 offset_var, offset_var); 41 offset_var, offset_var);
42} 42}
43 43
44void EmitLoadStorage32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 44void EmitLoadStorage32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
45 const IR::Value& offset) { 45 const IR::Value& offset) {
46 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 46 const auto offset_var{ctx.var_alloc.Consume(offset)};
47 ctx.AddU32("{}=ssbo{}[{}/4];", inst, binding.U32(), offset_var); 47 ctx.AddU32("{}=ssbo{}[{}/4];", inst, binding.U32(), offset_var);
48} 48}
49 49
50void EmitLoadStorage64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 50void EmitLoadStorage64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
51 const IR::Value& offset) { 51 const IR::Value& offset) {
52 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 52 const auto offset_var{ctx.var_alloc.Consume(offset)};
53 ctx.AddU32x2("{}=uvec2(ssbo{}[{}/4],ssbo{}[({}+4)/4]);", inst, binding.U32(), offset_var, 53 ctx.AddU32x2("{}=uvec2(ssbo{}[{}/4],ssbo{}[({}+4)/4]);", inst, binding.U32(), offset_var,
54 binding.U32(), offset_var); 54 binding.U32(), offset_var);
55} 55}
56 56
57void EmitLoadStorage128(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 57void EmitLoadStorage128(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
58 const IR::Value& offset) { 58 const IR::Value& offset) {
59 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 59 const auto offset_var{ctx.var_alloc.Consume(offset)};
60 ctx.AddU32x4("{}=uvec4(ssbo{}[{}/4],ssbo{}[({}+4)/4],ssbo{}[({}+8)/4],ssbo{}[({}+12)/4]);", 60 ctx.AddU32x4("{}=uvec4(ssbo{}[{}/4],ssbo{}[({}+4)/4],ssbo{}[({}+8)/4],ssbo{}[({}+12)/4]);",
61 inst, binding.U32(), offset_var, binding.U32(), offset_var, binding.U32(), 61 inst, binding.U32(), offset_var, binding.U32(), offset_var, binding.U32(),
62 offset_var, binding.U32(), offset_var); 62 offset_var, binding.U32(), offset_var);
@@ -66,7 +66,7 @@ void EmitWriteStorageU8([[maybe_unused]] EmitContext& ctx,
66 [[maybe_unused]] const IR::Value& binding, 66 [[maybe_unused]] const IR::Value& binding,
67 [[maybe_unused]] const IR::Value& offset, 67 [[maybe_unused]] const IR::Value& offset,
68 [[maybe_unused]] std::string_view value) { 68 [[maybe_unused]] std::string_view value) {
69 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 69 const auto offset_var{ctx.var_alloc.Consume(offset)};
70 ctx.Add("ssbo{}[{}/4]=bitfieldInsert(ssbo{}[{}/4],{},int({}%4)*8,8);", binding.U32(), 70 ctx.Add("ssbo{}[{}/4]=bitfieldInsert(ssbo{}[{}/4],{},int({}%4)*8,8);", binding.U32(),
71 offset_var, binding.U32(), offset_var, value, offset_var); 71 offset_var, binding.U32(), offset_var, value, offset_var);
72} 72}
@@ -75,7 +75,7 @@ void EmitWriteStorageS8([[maybe_unused]] EmitContext& ctx,
75 [[maybe_unused]] const IR::Value& binding, 75 [[maybe_unused]] const IR::Value& binding,
76 [[maybe_unused]] const IR::Value& offset, 76 [[maybe_unused]] const IR::Value& offset,
77 [[maybe_unused]] std::string_view value) { 77 [[maybe_unused]] std::string_view value) {
78 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 78 const auto offset_var{ctx.var_alloc.Consume(offset)};
79 ctx.Add("ssbo{}[{}/4]=bitfieldInsert(ssbo{}[{}/4],{},int({}%4)*8,8);", binding.U32(), 79 ctx.Add("ssbo{}[{}/4]=bitfieldInsert(ssbo{}[{}/4],{},int({}%4)*8,8);", binding.U32(),
80 offset_var, binding.U32(), offset_var, value, offset_var); 80 offset_var, binding.U32(), offset_var, value, offset_var);
81} 81}
@@ -84,7 +84,7 @@ void EmitWriteStorageU16([[maybe_unused]] EmitContext& ctx,
84 [[maybe_unused]] const IR::Value& binding, 84 [[maybe_unused]] const IR::Value& binding,
85 [[maybe_unused]] const IR::Value& offset, 85 [[maybe_unused]] const IR::Value& offset,
86 [[maybe_unused]] std::string_view value) { 86 [[maybe_unused]] std::string_view value) {
87 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 87 const auto offset_var{ctx.var_alloc.Consume(offset)};
88 ctx.Add("ssbo{}[{}/4]=bitfieldInsert(ssbo{}[{}/4],{},int(({}/2)%2)*16,16);", binding.U32(), 88 ctx.Add("ssbo{}[{}/4]=bitfieldInsert(ssbo{}[{}/4],{},int(({}/2)%2)*16,16);", binding.U32(),
89 offset_var, binding.U32(), offset_var, value, offset_var); 89 offset_var, binding.U32(), offset_var, value, offset_var);
90} 90}
@@ -93,20 +93,20 @@ void EmitWriteStorageS16([[maybe_unused]] EmitContext& ctx,
93 [[maybe_unused]] const IR::Value& binding, 93 [[maybe_unused]] const IR::Value& binding,
94 [[maybe_unused]] const IR::Value& offset, 94 [[maybe_unused]] const IR::Value& offset,
95 [[maybe_unused]] std::string_view value) { 95 [[maybe_unused]] std::string_view value) {
96 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 96 const auto offset_var{ctx.var_alloc.Consume(offset)};
97 ctx.Add("ssbo{}[{}/4]=bitfieldInsert(ssbo{}[{}/4],{},int(({}/2)%2)*16,16);", binding.U32(), 97 ctx.Add("ssbo{}[{}/4]=bitfieldInsert(ssbo{}[{}/4],{},int(({}/2)%2)*16,16);", binding.U32(),
98 offset_var, binding.U32(), offset_var, value, offset_var); 98 offset_var, binding.U32(), offset_var, value, offset_var);
99} 99}
100 100
101void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 101void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
102 std::string_view value) { 102 std::string_view value) {
103 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 103 const auto offset_var{ctx.var_alloc.Consume(offset)};
104 ctx.Add("ssbo{}[{}/4]={};", binding.U32(), offset_var, value); 104 ctx.Add("ssbo{}[{}/4]={};", binding.U32(), offset_var, value);
105} 105}
106 106
107void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 107void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
108 std::string_view value) { 108 std::string_view value) {
109 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 109 const auto offset_var{ctx.var_alloc.Consume(offset)};
110 ctx.Add("ssbo{}[{}/4]={}.x;", binding.U32(), offset_var, value); 110 ctx.Add("ssbo{}[{}/4]={}.x;", binding.U32(), offset_var, value);
111 ctx.Add("ssbo{}[({}+4)/4]={}.y;", binding.U32(), offset_var, value); 111 ctx.Add("ssbo{}[({}+4)/4]={}.y;", binding.U32(), offset_var, value);
112} 112}
@@ -115,7 +115,7 @@ void EmitWriteStorage128([[maybe_unused]] EmitContext& ctx,
115 [[maybe_unused]] const IR::Value& binding, 115 [[maybe_unused]] const IR::Value& binding,
116 [[maybe_unused]] const IR::Value& offset, 116 [[maybe_unused]] const IR::Value& offset,
117 [[maybe_unused]] std::string_view value) { 117 [[maybe_unused]] std::string_view value) {
118 const auto offset_var{ctx.reg_alloc.Consume(offset)}; 118 const auto offset_var{ctx.var_alloc.Consume(offset)};
119 ctx.Add("ssbo{}[{}/4]={}.x;", binding.U32(), offset_var, value); 119 ctx.Add("ssbo{}[{}/4]={}.x;", binding.U32(), offset_var, value);
120 ctx.Add("ssbo{}[({}+4)/4]={}.y;", binding.U32(), offset_var, value); 120 ctx.Add("ssbo{}[({}+4)/4]={}.y;", binding.U32(), offset_var, value);
121 ctx.Add("ssbo{}[({}+8)/4]={}.z;", binding.U32(), offset_var, value); 121 ctx.Add("ssbo{}[({}+8)/4]={}.z;", binding.U32(), offset_var, value);
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp
index 22ea9c9b1..806c4777b 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_not_implemented.cpp
@@ -21,11 +21,11 @@ static void NotImplemented() {
21void EmitPhi(EmitContext& ctx, IR::Inst& phi) { 21void EmitPhi(EmitContext& ctx, IR::Inst& phi) {
22 const size_t num_args{phi.NumArgs()}; 22 const size_t num_args{phi.NumArgs()};
23 for (size_t i = 0; i < num_args; ++i) { 23 for (size_t i = 0; i < num_args; ++i) {
24 ctx.reg_alloc.Consume(phi.Arg(i)); 24 ctx.var_alloc.Consume(phi.Arg(i));
25 } 25 }
26 if (!phi.Definition<Id>().is_valid) { 26 if (!phi.Definition<Id>().is_valid) {
27 // The phi node wasn't forward defined 27 // The phi node wasn't forward defined
28 ctx.Add("{};", ctx.reg_alloc.Define(phi, phi.Arg(0).Type())); 28 ctx.Add("{};", ctx.var_alloc.Define(phi, phi.Arg(0).Type()));
29 } 29 }
30} 30}
31 31
@@ -42,10 +42,10 @@ void EmitPhiMove(EmitContext& ctx, const IR::Value& phi_value, const IR::Value&
42 const auto phi_type{phi.Arg(0).Type()}; 42 const auto phi_type{phi.Arg(0).Type()};
43 if (!phi.Definition<Id>().is_valid) { 43 if (!phi.Definition<Id>().is_valid) {
44 // The phi node wasn't forward defined 44 // The phi node wasn't forward defined
45 ctx.Add("{};", ctx.reg_alloc.Define(phi, phi_type)); 45 ctx.Add("{};", ctx.var_alloc.Define(phi, phi_type));
46 } 46 }
47 const auto phi_reg{ctx.reg_alloc.Consume(IR::Value{&phi})}; 47 const auto phi_reg{ctx.var_alloc.Consume(IR::Value{&phi})};
48 const auto val_reg{ctx.reg_alloc.Consume(value)}; 48 const auto val_reg{ctx.var_alloc.Consume(value)};
49 if (phi_reg == val_reg) { 49 if (phi_reg == val_reg) {
50 return; 50 return;
51 } 51 }
diff --git a/src/shader_recompiler/backend/glsl/reg_alloc.cpp b/src/shader_recompiler/backend/glsl/reg_alloc.cpp
deleted file mode 100644
index b1de022d4..000000000
--- a/src/shader_recompiler/backend/glsl/reg_alloc.cpp
+++ /dev/null
@@ -1,191 +0,0 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <string>
6#include <string_view>
7
8#include <fmt/format.h>
9
10#include "shader_recompiler/backend/glsl/reg_alloc.h"
11#include "shader_recompiler/exception.h"
12#include "shader_recompiler/frontend/ir/value.h"
13
14namespace Shader::Backend::GLSL {
15namespace {
16std::string Representation(Id id) {
17 if (id.is_condition_code != 0) {
18 throw NotImplementedException("Condition code");
19 }
20 if (id.is_spill != 0) {
21 throw NotImplementedException("Spilling");
22 }
23 const u32 index{static_cast<u32>(id.index)};
24 return fmt::format("R{}", index);
25}
26
27std::string FormatFloat(std::string_view value, IR::Type type) {
28 // TODO: Confirm FP64 nan/inf
29 if (type == IR::Type::F32) {
30 if (value == "nan") {
31 return "uintBitsToFloat(0x7fc00000)";
32 }
33 if (value == "inf") {
34 return "uintBitsToFloat(0x7f800000)";
35 }
36 if (value == "-inf") {
37 return "uintBitsToFloat(0xff800000)";
38 }
39 }
40 if (value.find_first_of('e') != std::string_view::npos) {
41 // scientific notation
42 const auto cast{type == IR::Type::F32 ? "float" : "double"};
43 return fmt::format("{}({})", cast, value);
44 }
45 const bool needs_dot{value.find_first_of('.') == std::string_view::npos};
46 const bool needs_suffix{!value.ends_with('f')};
47 const auto suffix{type == IR::Type::F32 ? "f" : "lf"};
48 return fmt::format("{}{}{}", value, needs_dot ? "." : "", needs_suffix ? suffix : "");
49}
50
51std::string MakeImm(const IR::Value& value) {
52 switch (value.Type()) {
53 case IR::Type::U1:
54 return fmt::format("{}", value.U1() ? "true" : "false");
55 case IR::Type::U32:
56 return fmt::format("{}u", value.U32());
57 case IR::Type::F32:
58 return FormatFloat(fmt::format("{}", value.F32()), IR::Type::F32);
59 case IR::Type::U64:
60 return fmt::format("{}ul", value.U64());
61 case IR::Type::F64:
62 return FormatFloat(fmt::format("{}", value.F64()), IR::Type::F64);
63 case IR::Type::Void:
64 return "";
65 default:
66 throw NotImplementedException("Immediate type {}", value.Type());
67 }
68}
69} // Anonymous namespace
70
71std::string RegAlloc::Define(IR::Inst& inst) {
72 const Id id{Alloc()};
73 inst.SetDefinition<Id>(id);
74 return Representation(id);
75}
76
77std::string RegAlloc::Define(IR::Inst& inst, Type type) {
78 const Id id{Alloc()};
79 std::string type_str = "";
80 if (!register_defined[id.index]) {
81 register_defined[id.index] = true;
82 // type_str = GetGlslType(type);
83 reg_types.push_back(GetGlslType(type));
84 ++num_used_registers;
85 }
86 inst.SetDefinition<Id>(id);
87 return type_str + Representation(id);
88}
89
90std::string RegAlloc::Define(IR::Inst& inst, IR::Type type) {
91 return Define(inst, RegType(type));
92}
93
94std::string RegAlloc::Consume(const IR::Value& value) {
95 return value.IsImmediate() ? MakeImm(value) : Consume(*value.InstRecursive());
96}
97
98std::string RegAlloc::Consume(IR::Inst& inst) {
99 inst.DestructiveRemoveUsage();
100 // TODO: reuse variables of same type if possible
101 // if (!inst.HasUses()) {
102 // Free(id);
103 // }
104 return Representation(inst.Definition<Id>());
105}
106
107Type RegAlloc::RegType(IR::Type type) {
108 switch (type) {
109 case IR::Type::U1:
110 return Type::U1;
111 case IR::Type::U32:
112 return Type::U32;
113 case IR::Type::F32:
114 return Type::F32;
115 case IR::Type::U64:
116 return Type::U64;
117 case IR::Type::F64:
118 return Type::F64;
119 default:
120 throw NotImplementedException("IR type {}", type);
121 }
122}
123
124std::string RegAlloc::GetGlslType(Type type) {
125 switch (type) {
126 case Type::U1:
127 return "bool ";
128 case Type::F16x2:
129 return "f16vec2 ";
130 case Type::U32:
131 return "uint ";
132 case Type::S32:
133 return "int ";
134 case Type::F32:
135 return "float ";
136 case Type::S64:
137 return "int64_t ";
138 case Type::U64:
139 return "uint64_t ";
140 case Type::F64:
141 return "double ";
142 case Type::U32x2:
143 return "uvec2 ";
144 case Type::F32x2:
145 return "vec2 ";
146 case Type::U32x3:
147 return "uvec3 ";
148 case Type::F32x3:
149 return "vec3 ";
150 case Type::U32x4:
151 return "uvec4 ";
152 case Type::F32x4:
153 return "vec4 ";
154 case Type::Void:
155 return "";
156 default:
157 throw NotImplementedException("Type {}", type);
158 }
159}
160
161std::string RegAlloc::GetGlslType(IR::Type type) {
162 return GetGlslType(RegType(type));
163}
164
165Id RegAlloc::Alloc() {
166 if (num_used_registers < NUM_REGS) {
167 for (size_t reg = 0; reg < NUM_REGS; ++reg) {
168 if (register_use[reg]) {
169 continue;
170 }
171 register_use[reg] = true;
172 Id ret{};
173 ret.is_valid.Assign(1);
174 ret.is_long.Assign(0);
175 ret.is_spill.Assign(0);
176 ret.is_condition_code.Assign(0);
177 ret.index.Assign(static_cast<u32>(reg));
178 return ret;
179 }
180 }
181 throw NotImplementedException("Register spilling");
182}
183
184void RegAlloc::Free(Id id) {
185 if (id.is_spill != 0) {
186 throw NotImplementedException("Free spill");
187 }
188 register_use[id.index] = false;
189}
190
191} // namespace Shader::Backend::GLSL
diff --git a/src/shader_recompiler/backend/glsl/reg_alloc.h b/src/shader_recompiler/backend/glsl/reg_alloc.h
deleted file mode 100644
index 6c293f9d1..000000000
--- a/src/shader_recompiler/backend/glsl/reg_alloc.h
+++ /dev/null
@@ -1,84 +0,0 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <bitset>
8#include <vector>
9
10#include "common/bit_field.h"
11#include "common/common_types.h"
12
13namespace Shader::IR {
14class Inst;
15class Value;
16enum class Type;
17} // namespace Shader::IR
18
19namespace Shader::Backend::GLSL {
20enum class Type : u32 {
21 U1,
22 F16x2,
23 S32,
24 U32,
25 F32,
26 S64,
27 U64,
28 F64,
29 U32x2,
30 F32x2,
31 U32x3,
32 F32x3,
33 U32x4,
34 F32x4,
35 Void,
36};
37
38struct Id {
39 union {
40 u32 raw;
41 BitField<0, 1, u32> is_valid;
42 BitField<1, 1, u32> is_long;
43 BitField<2, 1, u32> is_spill;
44 BitField<3, 1, u32> is_condition_code;
45 BitField<4, 1, u32> is_null;
46 BitField<5, 27, u32> index;
47 };
48
49 bool operator==(Id rhs) const noexcept {
50 return raw == rhs.raw;
51 }
52 bool operator!=(Id rhs) const noexcept {
53 return !operator==(rhs);
54 }
55};
56static_assert(sizeof(Id) == sizeof(u32));
57
58class RegAlloc {
59public:
60 std::string Define(IR::Inst& inst);
61 std::string Define(IR::Inst& inst, Type type);
62 std::string Define(IR::Inst& inst, IR::Type type);
63
64 std::string Consume(const IR::Value& value);
65 std::string Consume(IR::Inst& inst);
66
67 std::string GetGlslType(Type type);
68 std::string GetGlslType(IR::Type type);
69
70 size_t num_used_registers{};
71 std::vector<std::string> reg_types;
72
73private:
74 static constexpr size_t NUM_REGS = 4096;
75
76 Type RegType(IR::Type type);
77 Id Alloc();
78 void Free(Id id);
79
80 std::bitset<NUM_REGS> register_use{};
81 std::bitset<NUM_REGS> register_defined{};
82};
83
84} // namespace Shader::Backend::GLSL
diff --git a/src/shader_recompiler/backend/glsl/var_alloc.cpp b/src/shader_recompiler/backend/glsl/var_alloc.cpp
new file mode 100644
index 000000000..8c6944f07
--- /dev/null
+++ b/src/shader_recompiler/backend/glsl/var_alloc.cpp
@@ -0,0 +1,290 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <string>
6#include <string_view>
7
8#include <fmt/format.h>
9
10#include "shader_recompiler/backend/glsl/var_alloc.h"
11#include "shader_recompiler/exception.h"
12#include "shader_recompiler/frontend/ir/value.h"
13
14namespace Shader::Backend::GLSL {
15namespace {
16std::string TypePrefix(GlslVarType type) {
17 switch (type) {
18 case GlslVarType::U1:
19 return "b_";
20 case GlslVarType::F16x2:
21 return "f16x2_";
22 case GlslVarType::U32:
23 return "u_";
24 case GlslVarType::S32:
25 return "s_";
26 case GlslVarType::F32:
27 return "f_";
28 case GlslVarType::S64:
29 return "s64_";
30 case GlslVarType::U64:
31 return "u64_";
32 case GlslVarType::F64:
33 return "d_";
34 case GlslVarType::U32x2:
35 return "u2_";
36 case GlslVarType::F32x2:
37 return "f2_";
38 case GlslVarType::U32x3:
39 return "u3_";
40 case GlslVarType::F32x3:
41 return "f3_";
42 case GlslVarType::U32x4:
43 return "u4_";
44 case GlslVarType::F32x4:
45 return "f4_";
46 case GlslVarType::Void:
47 return "";
48 default:
49 throw NotImplementedException("Type {}", type);
50 }
51}
52
53std::string FormatFloat(std::string_view value, IR::Type type) {
54 // TODO: Confirm FP64 nan/inf
55 if (type == IR::Type::F32) {
56 if (value == "nan") {
57 return "uintBitsToFloat(0x7fc00000)";
58 }
59 if (value == "inf") {
60 return "uintBitsToFloat(0x7f800000)";
61 }
62 if (value == "-inf") {
63 return "uintBitsToFloat(0xff800000)";
64 }
65 }
66 if (value.find_first_of('e') != std::string_view::npos) {
67 // scientific notation
68 const auto cast{type == IR::Type::F32 ? "float" : "double"};
69 return fmt::format("{}({})", cast, value);
70 }
71 const bool needs_dot{value.find_first_of('.') == std::string_view::npos};
72 const bool needs_suffix{!value.ends_with('f')};
73 const auto suffix{type == IR::Type::F32 ? "f" : "lf"};
74 return fmt::format("{}{}{}", value, needs_dot ? "." : "", needs_suffix ? suffix : "");
75}
76
77std::string MakeImm(const IR::Value& value) {
78 switch (value.Type()) {
79 case IR::Type::U1:
80 return fmt::format("{}", value.U1() ? "true" : "false");
81 case IR::Type::U32:
82 return fmt::format("{}u", value.U32());
83 case IR::Type::F32:
84 return FormatFloat(fmt::format("{}", value.F32()), IR::Type::F32);
85 case IR::Type::U64:
86 return fmt::format("{}ul", value.U64());
87 case IR::Type::F64:
88 return FormatFloat(fmt::format("{}", value.F64()), IR::Type::F64);
89 case IR::Type::Void:
90 return "";
91 default:
92 throw NotImplementedException("Immediate type {}", value.Type());
93 }
94}
95} // Anonymous namespace
96
97std::string VarAlloc::Representation(u32 index, GlslVarType type) const {
98 const auto prefix{TypePrefix(type)};
99 return fmt::format("{}{}", prefix, index);
100}
101
102std::string VarAlloc::Representation(Id id) const {
103 return Representation(id.index, id.type);
104}
105
106std::string VarAlloc::Define(IR::Inst& inst, GlslVarType type) {
107 if (inst.HasUses()) {
108 inst.SetDefinition<Id>(Alloc(type));
109 return Representation(inst.Definition<Id>());
110 } else {
111 Id id{};
112 id.type.Assign(type);
113 // id.is_null.Assign(1);
114 GetUseTracker(type).uses_temp = true;
115 inst.SetDefinition<Id>(id);
116 }
117 return Representation(inst.Definition<Id>());
118}
119
120std::string VarAlloc::Define(IR::Inst& inst, IR::Type type) {
121 return Define(inst, RegType(type));
122}
123
124std::string VarAlloc::Consume(const IR::Value& value) {
125 return value.IsImmediate() ? MakeImm(value) : ConsumeInst(*value.InstRecursive());
126}
127
128std::string VarAlloc::ConsumeInst(IR::Inst& inst) {
129 inst.DestructiveRemoveUsage();
130 if (!inst.HasUses()) {
131 Free(inst.Definition<Id>());
132 }
133 return Representation(inst.Definition<Id>());
134}
135
136std::string VarAlloc::GetGlslType(IR::Type type) const {
137 return GetGlslType(RegType(type));
138}
139
140Id VarAlloc::Alloc(GlslVarType type) {
141 auto& use_tracker{GetUseTracker(type)};
142 if (use_tracker.num_used < NUM_VARS) {
143 for (size_t var = 1; var < NUM_VARS; ++var) {
144 if (use_tracker.var_use[var]) {
145 continue;
146 }
147 use_tracker.num_used = std::max(use_tracker.num_used, var + 1);
148 use_tracker.var_use[var] = true;
149 Id ret{};
150 ret.is_valid.Assign(1);
151 ret.type.Assign(type);
152 ret.index.Assign(static_cast<u32>(var));
153 return ret;
154 }
155 }
156 throw NotImplementedException("Variable spilling");
157}
158
159void VarAlloc::Free(Id id) {
160 if (id.is_valid == 0) {
161 // throw LogicError("Freeing invalid variable");
162 return;
163 }
164 auto& use_tracker{GetUseTracker(id.type)};
165 use_tracker.var_use[id.index] = false;
166}
167
168GlslVarType VarAlloc::RegType(IR::Type type) const {
169 switch (type) {
170 case IR::Type::U1:
171 return GlslVarType::U1;
172 case IR::Type::U32:
173 return GlslVarType::U32;
174 case IR::Type::F32:
175 return GlslVarType::F32;
176 case IR::Type::U64:
177 return GlslVarType::U64;
178 case IR::Type::F64:
179 return GlslVarType::F64;
180 default:
181 throw NotImplementedException("IR type {}", type);
182 }
183}
184
185std::string VarAlloc::GetGlslType(GlslVarType type) const {
186 switch (type) {
187 case GlslVarType::U1:
188 return "bool ";
189 case GlslVarType::F16x2:
190 return "f16vec2 ";
191 case GlslVarType::U32:
192 return "uint ";
193 case GlslVarType::S32:
194 return "int ";
195 case GlslVarType::F32:
196 return "float ";
197 case GlslVarType::S64:
198 return "int64_t ";
199 case GlslVarType::U64:
200 return "uint64_t ";
201 case GlslVarType::F64:
202 return "double ";
203 case GlslVarType::U32x2:
204 return "uvec2 ";
205 case GlslVarType::F32x2:
206 return "vec2 ";
207 case GlslVarType::U32x3:
208 return "uvec3 ";
209 case GlslVarType::F32x3:
210 return "vec3 ";
211 case GlslVarType::U32x4:
212 return "uvec4 ";
213 case GlslVarType::F32x4:
214 return "vec4 ";
215 case GlslVarType::Void:
216 return "";
217 default:
218 throw NotImplementedException("Type {}", type);
219 }
220}
221
222VarAlloc::UseTracker& VarAlloc::GetUseTracker(GlslVarType type) {
223 switch (type) {
224 case GlslVarType::U1:
225 return var_bool;
226 case GlslVarType::U32:
227 return var_u32;
228 case GlslVarType::S32:
229 return var_s32;
230 case GlslVarType::F32:
231 return var_f32;
232 case GlslVarType::S64:
233 return var_s64;
234 case GlslVarType::U64:
235 return var_u64;
236 case GlslVarType::F64:
237 return var_f64;
238 case GlslVarType::U32x2:
239 return var_u32x2;
240 case GlslVarType::F32x2:
241 return var_f32x2;
242 case GlslVarType::U32x3:
243 return var_u32x3;
244 case GlslVarType::F32x3:
245 return var_f32x3;
246 case GlslVarType::U32x4:
247 return var_u32x4;
248 case GlslVarType::F32x4:
249 return var_f32x4;
250 default:
251 throw NotImplementedException("Type {}", type);
252 }
253}
254
255const VarAlloc::UseTracker& VarAlloc::GetUseTracker(GlslVarType type) const {
256 switch (type) {
257 case GlslVarType::U1:
258 return var_bool;
259 case GlslVarType::F16x2:
260 return var_f16x2;
261 case GlslVarType::U32:
262 return var_u32;
263 case GlslVarType::S32:
264 return var_s32;
265 case GlslVarType::F32:
266 return var_f32;
267 case GlslVarType::S64:
268 return var_s64;
269 case GlslVarType::U64:
270 return var_u64;
271 case GlslVarType::F64:
272 return var_f64;
273 case GlslVarType::U32x2:
274 return var_u32x2;
275 case GlslVarType::F32x2:
276 return var_f32x2;
277 case GlslVarType::U32x3:
278 return var_u32x3;
279 case GlslVarType::F32x3:
280 return var_f32x3;
281 case GlslVarType::U32x4:
282 return var_u32x4;
283 case GlslVarType::F32x4:
284 return var_f32x4;
285 default:
286 throw NotImplementedException("Type {}", type);
287 }
288}
289
290} // namespace Shader::Backend::GLSL
diff --git a/src/shader_recompiler/backend/glsl/var_alloc.h b/src/shader_recompiler/backend/glsl/var_alloc.h
new file mode 100644
index 000000000..29d78a571
--- /dev/null
+++ b/src/shader_recompiler/backend/glsl/var_alloc.h
@@ -0,0 +1,100 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <bitset>
8#include <string>
9#include <vector>
10
11#include "common/bit_field.h"
12#include "common/common_types.h"
13
14namespace Shader::IR {
15class Inst;
16class Value;
17enum class Type;
18} // namespace Shader::IR
19
20namespace Shader::Backend::GLSL {
21enum class GlslVarType : u32 {
22 U1,
23 F16x2,
24 S32,
25 U32,
26 F32,
27 S64,
28 U64,
29 F64,
30 U32x2,
31 F32x2,
32 U32x3,
33 F32x3,
34 U32x4,
35 F32x4,
36 Void,
37};
38
39struct Id {
40 union {
41 u32 raw;
42 BitField<0, 1, u32> is_valid;
43 BitField<1, 4, GlslVarType> type;
44 BitField<5, 27, u32> index;
45 };
46
47 bool operator==(Id rhs) const noexcept {
48 return raw == rhs.raw;
49 }
50 bool operator!=(Id rhs) const noexcept {
51 return !operator==(rhs);
52 }
53};
54static_assert(sizeof(Id) == sizeof(u32));
55
56class VarAlloc {
57public:
58 static constexpr size_t NUM_VARS = 511;
59 struct UseTracker {
60 size_t num_used{};
61 std::bitset<NUM_VARS> var_use{};
62 bool uses_temp{};
63 };
64
65 std::string Define(IR::Inst& inst, GlslVarType type);
66 std::string Define(IR::Inst& inst, IR::Type type);
67
68 std::string Consume(const IR::Value& value);
69 std::string ConsumeInst(IR::Inst& inst);
70
71 std::string GetGlslType(GlslVarType type) const;
72 std::string GetGlslType(IR::Type type) const;
73
74 const UseTracker& GetUseTracker(GlslVarType type) const;
75 std::string Representation(u32 index, GlslVarType type) const;
76
77private:
78 GlslVarType RegType(IR::Type type) const;
79 Id Alloc(GlslVarType type);
80 void Free(Id id);
81 UseTracker& GetUseTracker(GlslVarType type);
82 std::string Representation(Id id) const;
83
84 UseTracker var_bool{};
85 UseTracker var_f16x2{};
86 UseTracker var_s32{};
87 UseTracker var_u32{};
88 UseTracker var_u32x2{};
89 UseTracker var_u32x3{};
90 UseTracker var_u32x4{};
91 UseTracker var_f32{};
92 UseTracker var_f32x2{};
93 UseTracker var_f32x3{};
94 UseTracker var_f32x4{};
95 UseTracker var_u64{};
96 UseTracker var_s64{};
97 UseTracker var_f64{};
98};
99
100} // namespace Shader::Backend::GLSL