summaryrefslogtreecommitdiff
path: root/src/shader_recompiler/backend/glsl/emit_glsl.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2021-07-25 11:39:04 -0700
committerGravatar GitHub2021-07-25 11:39:04 -0700
commit98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f (patch)
tree816faa96c2c4d291825063433331a8ea4b3d08f1 /src/shader_recompiler/backend/glsl/emit_glsl.cpp
parentMerge pull request #6699 from lat9nq/common-threads (diff)
parentshader: Support out of bound local memory reads and immediate writes (diff)
downloadyuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.tar.gz
yuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.tar.xz
yuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.zip
Merge pull request #6585 from ameerj/hades
Shader Decompiler Rewrite
Diffstat (limited to 'src/shader_recompiler/backend/glsl/emit_glsl.cpp')
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl.cpp252
1 files changed, 252 insertions, 0 deletions
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl.cpp b/src/shader_recompiler/backend/glsl/emit_glsl.cpp
new file mode 100644
index 000000000..8a430d573
--- /dev/null
+++ b/src/shader_recompiler/backend/glsl/emit_glsl.cpp
@@ -0,0 +1,252 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <string>
7#include <tuple>
8#include <type_traits>
9
10#include "common/div_ceil.h"
11#include "common/settings.h"
12#include "shader_recompiler/backend/glsl/emit_context.h"
13#include "shader_recompiler/backend/glsl/emit_glsl.h"
14#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
15#include "shader_recompiler/frontend/ir/ir_emitter.h"
16
17namespace Shader::Backend::GLSL {
18namespace {
19template <class Func>
20struct FuncTraits {};
21
22template <class ReturnType_, class... Args>
23struct FuncTraits<ReturnType_ (*)(Args...)> {
24 using ReturnType = ReturnType_;
25
26 static constexpr size_t NUM_ARGS = sizeof...(Args);
27
28 template <size_t I>
29 using ArgType = std::tuple_element_t<I, std::tuple<Args...>>;
30};
31
32template <auto func, typename... Args>
33void SetDefinition(EmitContext& ctx, IR::Inst* inst, Args... args) {
34 inst->SetDefinition<Id>(func(ctx, std::forward<Args>(args)...));
35}
36
37template <typename ArgType>
38auto Arg(EmitContext& ctx, const IR::Value& arg) {
39 if constexpr (std::is_same_v<ArgType, std::string_view>) {
40 return ctx.var_alloc.Consume(arg);
41 } else if constexpr (std::is_same_v<ArgType, const IR::Value&>) {
42 return arg;
43 } else if constexpr (std::is_same_v<ArgType, u32>) {
44 return arg.U32();
45 } else if constexpr (std::is_same_v<ArgType, IR::Attribute>) {
46 return arg.Attribute();
47 } else if constexpr (std::is_same_v<ArgType, IR::Patch>) {
48 return arg.Patch();
49 } else if constexpr (std::is_same_v<ArgType, IR::Reg>) {
50 return arg.Reg();
51 }
52}
53
54template <auto func, bool is_first_arg_inst, size_t... I>
55void Invoke(EmitContext& ctx, IR::Inst* inst, std::index_sequence<I...>) {
56 using Traits = FuncTraits<decltype(func)>;
57 if constexpr (std::is_same_v<typename Traits::ReturnType, Id>) {
58 if constexpr (is_first_arg_inst) {
59 SetDefinition<func>(
60 ctx, inst, *inst,
61 Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
62 } else {
63 SetDefinition<func>(
64 ctx, inst, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
65 }
66 } else {
67 if constexpr (is_first_arg_inst) {
68 func(ctx, *inst, Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
69 } else {
70 func(ctx, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
71 }
72 }
73}
74
75template <auto func>
76void Invoke(EmitContext& ctx, IR::Inst* inst) {
77 using Traits = FuncTraits<decltype(func)>;
78 static_assert(Traits::NUM_ARGS >= 1, "Insufficient arguments");
79 if constexpr (Traits::NUM_ARGS == 1) {
80 Invoke<func, false>(ctx, inst, std::make_index_sequence<0>{});
81 } else {
82 using FirstArgType = typename Traits::template ArgType<1>;
83 static constexpr bool is_first_arg_inst = std::is_same_v<FirstArgType, IR::Inst&>;
84 using Indices = std::make_index_sequence<Traits::NUM_ARGS - (is_first_arg_inst ? 2 : 1)>;
85 Invoke<func, is_first_arg_inst>(ctx, inst, Indices{});
86 }
87}
88
89void EmitInst(EmitContext& ctx, IR::Inst* inst) {
90 switch (inst->GetOpcode()) {
91#define OPCODE(name, result_type, ...) \
92 case IR::Opcode::name: \
93 return Invoke<&Emit##name>(ctx, inst);
94#include "shader_recompiler/frontend/ir/opcodes.inc"
95#undef OPCODE
96 }
97 throw LogicError("Invalid opcode {}", inst->GetOpcode());
98}
99
100bool IsReference(IR::Inst& inst) {
101 return inst.GetOpcode() == IR::Opcode::Reference;
102}
103
104void PrecolorInst(IR::Inst& phi) {
105 // Insert phi moves before references to avoid overwritting other phis
106 const size_t num_args{phi.NumArgs()};
107 for (size_t i = 0; i < num_args; ++i) {
108 IR::Block& phi_block{*phi.PhiBlock(i)};
109 auto it{std::find_if_not(phi_block.rbegin(), phi_block.rend(), IsReference).base()};
110 IR::IREmitter ir{phi_block, it};
111 const IR::Value arg{phi.Arg(i)};
112 if (arg.IsImmediate()) {
113 ir.PhiMove(phi, arg);
114 } else {
115 ir.PhiMove(phi, IR::Value{arg.InstRecursive()});
116 }
117 }
118 for (size_t i = 0; i < num_args; ++i) {
119 IR::IREmitter{*phi.PhiBlock(i)}.Reference(IR::Value{&phi});
120 }
121}
122
123void Precolor(const IR::Program& program) {
124 for (IR::Block* const block : program.blocks) {
125 for (IR::Inst& phi : block->Instructions()) {
126 if (!IR::IsPhi(phi)) {
127 break;
128 }
129 PrecolorInst(phi);
130 }
131 }
132}
133
134void EmitCode(EmitContext& ctx, const IR::Program& program) {
135 for (const IR::AbstractSyntaxNode& node : program.syntax_list) {
136 switch (node.type) {
137 case IR::AbstractSyntaxNode::Type::Block:
138 for (IR::Inst& inst : node.data.block->Instructions()) {
139 EmitInst(ctx, &inst);
140 }
141 break;
142 case IR::AbstractSyntaxNode::Type::If:
143 ctx.Add("if({}){{", ctx.var_alloc.Consume(node.data.if_node.cond));
144 break;
145 case IR::AbstractSyntaxNode::Type::EndIf:
146 ctx.Add("}}");
147 break;
148 case IR::AbstractSyntaxNode::Type::Break:
149 if (node.data.break_node.cond.IsImmediate()) {
150 if (node.data.break_node.cond.U1()) {
151 ctx.Add("break;");
152 }
153 } else {
154 ctx.Add("if({}){{break;}}", ctx.var_alloc.Consume(node.data.break_node.cond));
155 }
156 break;
157 case IR::AbstractSyntaxNode::Type::Return:
158 case IR::AbstractSyntaxNode::Type::Unreachable:
159 ctx.Add("return;");
160 break;
161 case IR::AbstractSyntaxNode::Type::Loop:
162 ctx.Add("for(;;){{");
163 break;
164 case IR::AbstractSyntaxNode::Type::Repeat:
165 if (Settings::values.disable_shader_loop_safety_checks) {
166 ctx.Add("if(!{}){{break;}}}}", ctx.var_alloc.Consume(node.data.repeat.cond));
167 } else {
168 ctx.Add("if(--loop{}<0 || !{}){{break;}}}}", ctx.num_safety_loop_vars++,
169 ctx.var_alloc.Consume(node.data.repeat.cond));
170 }
171 break;
172 default:
173 throw NotImplementedException("AbstractSyntaxNode Type {}", node.type);
174 }
175 }
176}
177
178std::string GlslVersionSpecifier(const EmitContext& ctx) {
179 if (ctx.uses_y_direction || ctx.info.stores.Legacy() || ctx.info.loads.Legacy()) {
180 return " compatibility";
181 }
182 return "";
183}
184
185bool IsPreciseType(GlslVarType type) {
186 switch (type) {
187 case GlslVarType::PrecF32:
188 case GlslVarType::PrecF64:
189 return true;
190 default:
191 return false;
192 }
193}
194
195void DefineVariables(const EmitContext& ctx, std::string& header) {
196 for (u32 i = 0; i < static_cast<u32>(GlslVarType::Void); ++i) {
197 const auto type{static_cast<GlslVarType>(i)};
198 const auto& tracker{ctx.var_alloc.GetUseTracker(type)};
199 const auto type_name{ctx.var_alloc.GetGlslType(type)};
200 const bool has_precise_bug{ctx.stage == Stage::Fragment && ctx.profile.has_gl_precise_bug};
201 const auto precise{!has_precise_bug && IsPreciseType(type) ? "precise " : ""};
202 // Temps/return types that are never used are stored at index 0
203 if (tracker.uses_temp) {
204 header += fmt::format("{}{} t{}={}(0);", precise, type_name,
205 ctx.var_alloc.Representation(0, type), type_name);
206 }
207 for (u32 index = 0; index < tracker.num_used; ++index) {
208 header += fmt::format("{}{} {}={}(0);", precise, type_name,
209 ctx.var_alloc.Representation(index, type), type_name);
210 }
211 }
212 for (u32 i = 0; i < ctx.num_safety_loop_vars; ++i) {
213 header += fmt::format("int loop{}=0x2000;", i);
214 }
215}
216} // Anonymous namespace
217
218std::string EmitGLSL(const Profile& profile, const RuntimeInfo& runtime_info, IR::Program& program,
219 Bindings& bindings) {
220 EmitContext ctx{program, bindings, profile, runtime_info};
221 Precolor(program);
222 EmitCode(ctx, program);
223 const std::string version{fmt::format("#version 450{}\n", GlslVersionSpecifier(ctx))};
224 ctx.header.insert(0, version);
225 if (program.shared_memory_size > 0) {
226 const auto requested_size{program.shared_memory_size};
227 const auto max_size{profile.gl_max_compute_smem_size};
228 const bool needs_clamp{requested_size > max_size};
229 if (needs_clamp) {
230 LOG_WARNING(Shader_GLSL, "Requested shared memory size ({}) exceeds device limit ({})",
231 requested_size, max_size);
232 }
233 const auto smem_size{needs_clamp ? max_size : requested_size};
234 ctx.header += fmt::format("shared uint smem[{}];", Common::DivCeil(smem_size, 4U));
235 }
236 ctx.header += "void main(){\n";
237 if (program.local_memory_size > 0) {
238 ctx.header += fmt::format("uint lmem[{}];", Common::DivCeil(program.local_memory_size, 4U));
239 }
240 DefineVariables(ctx, ctx.header);
241 if (ctx.uses_cc_carry) {
242 ctx.header += "uint carry;";
243 }
244 if (program.info.uses_subgroup_shuffles) {
245 ctx.header += "bool shfl_in_bounds;";
246 }
247 ctx.code.insert(0, ctx.header);
248 ctx.code += '}';
249 return ctx.code;
250}
251
252} // namespace Shader::Backend::GLSL