diff options
| author | 2021-05-19 16:32:21 -0300 | |
|---|---|---|
| committer | 2021-07-22 21:51:32 -0400 | |
| commit | 91a3c2c1c095880a3582f9362943db84b40064f7 (patch) | |
| tree | 06518558b413bdea89ad867864b6915b61d48b05 /src/shader_recompiler | |
| parent | glasm: Implement stores to gl_ViewportIndex (diff) | |
| download | yuzu-91a3c2c1c095880a3582f9362943db84b40064f7.tar.gz yuzu-91a3c2c1c095880a3582f9362943db84b40064f7.tar.xz yuzu-91a3c2c1c095880a3582f9362943db84b40064f7.zip | |
glasm: Implement PREC on relevant instructions
Diffstat (limited to 'src/shader_recompiler')
| -rw-r--r-- | src/shader_recompiler/backend/glasm/emit_glasm_floating_point.cpp | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_floating_point.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_floating_point.cpp index d2c324ad6..4ed58619d 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_floating_point.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm_floating_point.cpp | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | #include "shader_recompiler/backend/glasm/emit_context.h" | 7 | #include "shader_recompiler/backend/glasm/emit_context.h" |
| 8 | #include "shader_recompiler/backend/glasm/emit_glasm_instructions.h" | 8 | #include "shader_recompiler/backend/glasm/emit_glasm_instructions.h" |
| 9 | #include "shader_recompiler/frontend/ir/modifiers.h" | ||
| 9 | #include "shader_recompiler/frontend/ir/value.h" | 10 | #include "shader_recompiler/frontend/ir/value.h" |
| 10 | 11 | ||
| 11 | namespace Shader::Backend::GLASM { | 12 | namespace Shader::Backend::GLASM { |
| @@ -42,6 +43,11 @@ void Clamp(EmitContext& ctx, Register ret, InputType value, InputType min_value, | |||
| 42 | "MIN.{} {}.x,RC.x,{};", | 43 | "MIN.{} {}.x,RC.x,{};", |
| 43 | type, min_value, value, type, ret, max_value); | 44 | type, min_value, value, type, ret, max_value); |
| 44 | } | 45 | } |
| 46 | |||
| 47 | std::string_view Precise(IR::Inst& inst) { | ||
| 48 | const bool precise{inst.Flags<IR::FpControl>().no_contraction}; | ||
| 49 | return precise ? ".PREC" : ""; | ||
| 50 | } | ||
| 45 | } // Anonymous namespace | 51 | } // Anonymous namespace |
| 46 | 52 | ||
| 47 | void EmitFPAbs16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | 53 | void EmitFPAbs16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, |
| @@ -63,11 +69,11 @@ void EmitFPAdd16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& i | |||
| 63 | } | 69 | } |
| 64 | 70 | ||
| 65 | void EmitFPAdd32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b) { | 71 | void EmitFPAdd32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b) { |
| 66 | ctx.Add("ADD.F {}.x,{},{};", inst, a, b); | 72 | ctx.Add("ADD.F{} {}.x,{},{};", Precise(inst), ctx.reg_alloc.Define(inst), a, b); |
| 67 | } | 73 | } |
| 68 | 74 | ||
| 69 | void EmitFPAdd64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b) { | 75 | void EmitFPAdd64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b) { |
| 70 | ctx.LongAdd("ADD.F64 {}.x,{},{};", inst, a, b); | 76 | ctx.Add("ADD.F64{} {}.x,{},{};", Precise(inst), ctx.reg_alloc.LongDefine(inst), a, b); |
| 71 | } | 77 | } |
| 72 | 78 | ||
| 73 | void EmitFPFma16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | 79 | void EmitFPFma16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, |
| @@ -77,11 +83,11 @@ void EmitFPFma16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& i | |||
| 77 | } | 83 | } |
| 78 | 84 | ||
| 79 | void EmitFPFma32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b, ScalarF32 c) { | 85 | void EmitFPFma32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b, ScalarF32 c) { |
| 80 | ctx.Add("MAD.F {}.x,{},{},{};", inst, a, b, c); | 86 | ctx.Add("MAD.F{} {}.x,{},{},{};", Precise(inst), ctx.reg_alloc.Define(inst), a, b, c); |
| 81 | } | 87 | } |
| 82 | 88 | ||
| 83 | void EmitFPFma64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b, ScalarF64 c) { | 89 | void EmitFPFma64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b, ScalarF64 c) { |
| 84 | ctx.LongAdd("MAD.F64 {}.x,{},{},{};", inst, a, b, c); | 90 | ctx.Add("MAD.F64{} {}.x,{},{},{};", Precise(inst), ctx.reg_alloc.LongDefine(inst), a, b, c); |
| 85 | } | 91 | } |
| 86 | 92 | ||
| 87 | void EmitFPMax32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b) { | 93 | void EmitFPMax32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b) { |
| @@ -106,11 +112,11 @@ void EmitFPMul16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& i | |||
| 106 | } | 112 | } |
| 107 | 113 | ||
| 108 | void EmitFPMul32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b) { | 114 | void EmitFPMul32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b) { |
| 109 | ctx.Add("MUL.F {}.x,{},{};", inst, a, b); | 115 | ctx.Add("MUL.F{} {}.x,{},{};", Precise(inst), ctx.reg_alloc.Define(inst), a, b); |
| 110 | } | 116 | } |
| 111 | 117 | ||
| 112 | void EmitFPMul64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b) { | 118 | void EmitFPMul64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b) { |
| 113 | ctx.LongAdd("MUL.F64 {}.x,{},{};", inst, a, b); | 119 | ctx.Add("MUL.F64{} {}.x,{},{};", Precise(inst), ctx.reg_alloc.LongDefine(inst), a, b); |
| 114 | } | 120 | } |
| 115 | 121 | ||
| 116 | void EmitFPNeg16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) { | 122 | void EmitFPNeg16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) { |