summaryrefslogtreecommitdiff
path: root/src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp')
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp294
1 files changed, 294 insertions, 0 deletions
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp
new file mode 100644
index 000000000..f55c26b76
--- /dev/null
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp
@@ -0,0 +1,294 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "shader_recompiler/backend/glasm/emit_context.h"
6#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
7#include "shader_recompiler/frontend/ir/value.h"
8
9namespace Shader::Backend::GLASM {
10namespace {
11void BitwiseLogicalOp(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b,
12 std::string_view lop) {
13 const auto zero = inst.GetAssociatedPseudoOperation(IR::Opcode::GetZeroFromOp);
14 const auto sign = inst.GetAssociatedPseudoOperation(IR::Opcode::GetSignFromOp);
15 if (zero) {
16 zero->Invalidate();
17 }
18 if (sign) {
19 sign->Invalidate();
20 }
21 if (zero || sign) {
22 ctx.reg_alloc.InvalidateConditionCodes();
23 }
24 const auto ret{ctx.reg_alloc.Define(inst)};
25 ctx.Add("{}.S {}.x,{},{};", lop, ret, a, b);
26 if (zero) {
27 ctx.Add("SEQ.S {},{},0;", *zero, ret);
28 }
29 if (sign) {
30 ctx.Add("SLT.S {},{},0;", *sign, ret);
31 }
32}
33} // Anonymous namespace
34
35void EmitIAdd32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
36 const std::array flags{
37 inst.GetAssociatedPseudoOperation(IR::Opcode::GetZeroFromOp),
38 inst.GetAssociatedPseudoOperation(IR::Opcode::GetSignFromOp),
39 inst.GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp),
40 inst.GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp),
41 };
42 for (IR::Inst* const flag_inst : flags) {
43 if (flag_inst) {
44 flag_inst->Invalidate();
45 }
46 }
47 const bool cc{inst.HasAssociatedPseudoOperation()};
48 const std::string_view cc_mod{cc ? ".CC" : ""};
49 if (cc) {
50 ctx.reg_alloc.InvalidateConditionCodes();
51 }
52 const auto ret{ctx.reg_alloc.Define(inst)};
53 ctx.Add("ADD.S{} {}.x,{},{};", cc_mod, ret, a, b);
54 if (!cc) {
55 return;
56 }
57 static constexpr std::array<std::string_view, 4> masks{"", "SF", "CF", "OF"};
58 for (size_t flag_index = 0; flag_index < flags.size(); ++flag_index) {
59 if (!flags[flag_index]) {
60 continue;
61 }
62 const auto flag_ret{ctx.reg_alloc.Define(*flags[flag_index])};
63 if (flag_index == 0) {
64 ctx.Add("SEQ.S {}.x,{}.x,0;", flag_ret, ret);
65 } else {
66 // We could use conditional execution here, but it's broken on Nvidia's compiler
67 ctx.Add("IF {}.x;"
68 "MOV.S {}.x,-1;"
69 "ELSE;"
70 "MOV.S {}.x,0;"
71 "ENDIF;",
72 masks[flag_index], flag_ret, flag_ret);
73 }
74 }
75}
76
77void EmitIAdd64(EmitContext& ctx, IR::Inst& inst, Register a, Register b) {
78 ctx.LongAdd("ADD.S64 {}.x,{}.x,{}.x;", inst, a, b);
79}
80
81void EmitISub32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
82 ctx.Add("SUB.S {}.x,{},{};", inst, a, b);
83}
84
85void EmitISub64(EmitContext& ctx, IR::Inst& inst, Register a, Register b) {
86 ctx.LongAdd("SUB.S64 {}.x,{}.x,{}.x;", inst, a, b);
87}
88
89void EmitIMul32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
90 ctx.Add("MUL.S {}.x,{},{};", inst, a, b);
91}
92
93void EmitINeg32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
94 if (value.type != Type::Register && static_cast<s32>(value.imm_u32) < 0) {
95 ctx.Add("MOV.S {},{};", inst, -static_cast<s32>(value.imm_u32));
96 } else {
97 ctx.Add("MOV.S {},-{};", inst, value);
98 }
99}
100
101void EmitINeg64(EmitContext& ctx, IR::Inst& inst, Register value) {
102 ctx.LongAdd("MOV.S64 {},-{};", inst, value);
103}
104
105void EmitIAbs32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
106 ctx.Add("ABS.S {},{};", inst, value);
107}
108
109void EmitShiftLeftLogical32(EmitContext& ctx, IR::Inst& inst, ScalarU32 base, ScalarU32 shift) {
110 ctx.Add("SHL.U {}.x,{},{};", inst, base, shift);
111}
112
113void EmitShiftLeftLogical64(EmitContext& ctx, IR::Inst& inst, ScalarRegister base,
114 ScalarU32 shift) {
115 ctx.LongAdd("SHL.U64 {}.x,{},{};", inst, base, shift);
116}
117
118void EmitShiftRightLogical32(EmitContext& ctx, IR::Inst& inst, ScalarU32 base, ScalarU32 shift) {
119 ctx.Add("SHR.U {}.x,{},{};", inst, base, shift);
120}
121
122void EmitShiftRightLogical64(EmitContext& ctx, IR::Inst& inst, ScalarRegister base,
123 ScalarU32 shift) {
124 ctx.LongAdd("SHR.U64 {}.x,{},{};", inst, base, shift);
125}
126
127void EmitShiftRightArithmetic32(EmitContext& ctx, IR::Inst& inst, ScalarS32 base, ScalarS32 shift) {
128 ctx.Add("SHR.S {}.x,{},{};", inst, base, shift);
129}
130
131void EmitShiftRightArithmetic64(EmitContext& ctx, IR::Inst& inst, ScalarRegister base,
132 ScalarS32 shift) {
133 ctx.LongAdd("SHR.S64 {}.x,{},{};", inst, base, shift);
134}
135
136void EmitBitwiseAnd32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
137 BitwiseLogicalOp(ctx, inst, a, b, "AND");
138}
139
140void EmitBitwiseOr32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
141 BitwiseLogicalOp(ctx, inst, a, b, "OR");
142}
143
144void EmitBitwiseXor32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
145 BitwiseLogicalOp(ctx, inst, a, b, "XOR");
146}
147
148void EmitBitFieldInsert(EmitContext& ctx, IR::Inst& inst, ScalarS32 base, ScalarS32 insert,
149 ScalarS32 offset, ScalarS32 count) {
150 const Register ret{ctx.reg_alloc.Define(inst)};
151 if (count.type != Type::Register && offset.type != Type::Register) {
152 ctx.Add("BFI.S {},{{{},{},0,0}},{},{};", ret, count, offset, insert, base);
153 } else {
154 ctx.Add("MOV.S RC.x,{};"
155 "MOV.S RC.y,{};"
156 "BFI.S {},RC,{},{};",
157 count, offset, ret, insert, base);
158 }
159}
160
161void EmitBitFieldSExtract(EmitContext& ctx, IR::Inst& inst, ScalarS32 base, ScalarS32 offset,
162 ScalarS32 count) {
163 const Register ret{ctx.reg_alloc.Define(inst)};
164 if (count.type != Type::Register && offset.type != Type::Register) {
165 ctx.Add("BFE.S {},{{{},{},0,0}},{};", ret, count, offset, base);
166 } else {
167 ctx.Add("MOV.S RC.x,{};"
168 "MOV.S RC.y,{};"
169 "BFE.S {},RC,{};",
170 count, offset, ret, base);
171 }
172}
173
174void EmitBitFieldUExtract(EmitContext& ctx, IR::Inst& inst, ScalarU32 base, ScalarU32 offset,
175 ScalarU32 count) {
176 const auto zero = inst.GetAssociatedPseudoOperation(IR::Opcode::GetZeroFromOp);
177 const auto sign = inst.GetAssociatedPseudoOperation(IR::Opcode::GetSignFromOp);
178 if (zero) {
179 zero->Invalidate();
180 }
181 if (sign) {
182 sign->Invalidate();
183 }
184 if (zero || sign) {
185 ctx.reg_alloc.InvalidateConditionCodes();
186 }
187 const Register ret{ctx.reg_alloc.Define(inst)};
188 if (count.type != Type::Register && offset.type != Type::Register) {
189 ctx.Add("BFE.U {},{{{},{},0,0}},{};", ret, count, offset, base);
190 } else {
191 ctx.Add("MOV.U RC.x,{};"
192 "MOV.U RC.y,{};"
193 "BFE.U {},RC,{};",
194 count, offset, ret, base);
195 }
196 if (zero) {
197 ctx.Add("SEQ.S {},{},0;", *zero, ret);
198 }
199 if (sign) {
200 ctx.Add("SLT.S {},{},0;", *sign, ret);
201 }
202}
203
204void EmitBitReverse32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
205 ctx.Add("BFR {},{};", inst, value);
206}
207
208void EmitBitCount32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
209 ctx.Add("BTC {},{};", inst, value);
210}
211
212void EmitBitwiseNot32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
213 ctx.Add("NOT.S {},{};", inst, value);
214}
215
216void EmitFindSMsb32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
217 ctx.Add("BTFM.S {},{};", inst, value);
218}
219
220void EmitFindUMsb32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value) {
221 ctx.Add("BTFM.U {},{};", inst, value);
222}
223
224void EmitSMin32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
225 ctx.Add("MIN.S {},{},{};", inst, a, b);
226}
227
228void EmitUMin32(EmitContext& ctx, IR::Inst& inst, ScalarU32 a, ScalarU32 b) {
229 ctx.Add("MIN.U {},{},{};", inst, a, b);
230}
231
232void EmitSMax32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
233 ctx.Add("MAX.S {},{},{};", inst, a, b);
234}
235
236void EmitUMax32(EmitContext& ctx, IR::Inst& inst, ScalarU32 a, ScalarU32 b) {
237 ctx.Add("MAX.U {},{},{};", inst, a, b);
238}
239
240void EmitSClamp32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value, ScalarS32 min, ScalarS32 max) {
241 const Register ret{ctx.reg_alloc.Define(inst)};
242 ctx.Add("MIN.S RC.x,{},{};"
243 "MAX.S {}.x,RC.x,{};",
244 max, value, ret, min);
245}
246
247void EmitUClamp32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value, ScalarU32 min, ScalarU32 max) {
248 const Register ret{ctx.reg_alloc.Define(inst)};
249 ctx.Add("MIN.U RC.x,{},{};"
250 "MAX.U {}.x,RC.x,{};",
251 max, value, ret, min);
252}
253
254void EmitSLessThan(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs) {
255 ctx.Add("SLT.S {}.x,{},{};", inst, lhs, rhs);
256}
257
258void EmitULessThan(EmitContext& ctx, IR::Inst& inst, ScalarU32 lhs, ScalarU32 rhs) {
259 ctx.Add("SLT.U {}.x,{},{};", inst, lhs, rhs);
260}
261
262void EmitIEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs) {
263 ctx.Add("SEQ.S {}.x,{},{};", inst, lhs, rhs);
264}
265
266void EmitSLessThanEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs) {
267 ctx.Add("SLE.S {}.x,{},{};", inst, lhs, rhs);
268}
269
270void EmitULessThanEqual(EmitContext& ctx, IR::Inst& inst, ScalarU32 lhs, ScalarU32 rhs) {
271 ctx.Add("SLE.U {}.x,{},{};", inst, lhs, rhs);
272}
273
274void EmitSGreaterThan(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs) {
275 ctx.Add("SGT.S {}.x,{},{};", inst, lhs, rhs);
276}
277
278void EmitUGreaterThan(EmitContext& ctx, IR::Inst& inst, ScalarU32 lhs, ScalarU32 rhs) {
279 ctx.Add("SGT.U {}.x,{},{};", inst, lhs, rhs);
280}
281
282void EmitINotEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs) {
283 ctx.Add("SNE.U {}.x,{},{};", inst, lhs, rhs);
284}
285
286void EmitSGreaterThanEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs) {
287 ctx.Add("SGE.S {}.x,{},{};", inst, lhs, rhs);
288}
289
290void EmitUGreaterThanEqual(EmitContext& ctx, IR::Inst& inst, ScalarU32 lhs, ScalarU32 rhs) {
291 ctx.Add("SGE.U {}.x,{},{};", inst, lhs, rhs);
292}
293
294} // namespace Shader::Backend::GLASM