diff options
| author | 2021-06-14 11:32:28 -0400 | |
|---|---|---|
| committer | 2021-07-22 21:51:38 -0400 | |
| commit | ae4e452759573d145738688d9284077934e61ae4 (patch) | |
| tree | 843caa97bc872322d3e80739492805062e6aee5e /src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp | |
| parent | glsl: Add LoopSafety instructions (diff) | |
| download | yuzu-ae4e452759573d145738688d9284077934e61ae4.tar.gz yuzu-ae4e452759573d145738688d9284077934e61ae4.tar.xz yuzu-ae4e452759573d145738688d9284077934e61ae4.zip | |
glsl: Add Shader_GLSL logging
Diffstat (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp')
| -rw-r--r-- | src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp index 850eee1e1..9152ace98 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp | |||
| @@ -98,7 +98,7 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_vi | |||
| 98 | 98 | ||
| 99 | void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset, | 99 | void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset, |
| 100 | std::string_view value) { | 100 | std::string_view value) { |
| 101 | // LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic"); | 101 | LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); |
| 102 | ctx.AddU64("{}=packUint2x32(uvec2(smem[{}>>2],smem[({}+4)>>2]));", inst, pointer_offset, | 102 | ctx.AddU64("{}=packUint2x32(uvec2(smem[{}>>2],smem[({}+4)>>2]));", inst, pointer_offset, |
| 103 | pointer_offset); | 103 | pointer_offset); |
| 104 | ctx.Add("smem[{}>>2]=unpackUint2x32({}).x;smem[({}+4)>>2]=unpackUint2x32({}).y;", | 104 | ctx.Add("smem[{}>>2]=unpackUint2x32({}).x;smem[({}+4)>>2]=unpackUint2x32({}).y;", |
| @@ -171,7 +171,7 @@ void EmitStorageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Val | |||
| 171 | 171 | ||
| 172 | void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | 172 | void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, |
| 173 | const IR::Value& offset, std::string_view value) { | 173 | const IR::Value& offset, std::string_view value) { |
| 174 | // LOG_WARNING(..., "Op falling to non-atomic"); | 174 | LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); |
| 175 | ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | 175 | ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, |
| 176 | ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | 176 | ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, |
| 177 | binding.U32(), ctx.var_alloc.Consume(offset)); | 177 | binding.U32(), ctx.var_alloc.Consume(offset)); |
| @@ -182,7 +182,7 @@ void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& | |||
| 182 | 182 | ||
| 183 | void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | 183 | void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, |
| 184 | const IR::Value& offset, std::string_view value) { | 184 | const IR::Value& offset, std::string_view value) { |
| 185 | // LOG_WARNING(..., "Op falling to non-atomic"); | 185 | LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); |
| 186 | ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | 186 | ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, |
| 187 | ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | 187 | ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, |
| 188 | binding.U32(), ctx.var_alloc.Consume(offset)); | 188 | binding.U32(), ctx.var_alloc.Consume(offset)); |
| @@ -195,7 +195,7 @@ void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& | |||
| 195 | 195 | ||
| 196 | void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | 196 | void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, |
| 197 | const IR::Value& offset, std::string_view value) { | 197 | const IR::Value& offset, std::string_view value) { |
| 198 | // LOG_WARNING(..., "Op falling to non-atomic"); | 198 | LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); |
| 199 | ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | 199 | ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, |
| 200 | ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | 200 | ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, |
| 201 | binding.U32(), ctx.var_alloc.Consume(offset)); | 201 | binding.U32(), ctx.var_alloc.Consume(offset)); |
| @@ -207,7 +207,7 @@ void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& | |||
| 207 | 207 | ||
| 208 | void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | 208 | void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, |
| 209 | const IR::Value& offset, std::string_view value) { | 209 | const IR::Value& offset, std::string_view value) { |
| 210 | // LOG_WARNING(..., "Op falling to non-atomic"); | 210 | LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); |
| 211 | ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | 211 | ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, |
| 212 | ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | 212 | ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, |
| 213 | binding.U32(), ctx.var_alloc.Consume(offset)); | 213 | binding.U32(), ctx.var_alloc.Consume(offset)); |
| @@ -220,8 +220,7 @@ void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& | |||
| 220 | 220 | ||
| 221 | void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | 221 | void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, |
| 222 | const IR::Value& offset, std::string_view value) { | 222 | const IR::Value& offset, std::string_view value) { |
| 223 | // LOG_WARNING(..., "Op falling to non-atomic"); | 223 | LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); |
| 224 | |||
| 225 | ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | 224 | ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, |
| 226 | ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | 225 | ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, |
| 227 | binding.U32(), ctx.var_alloc.Consume(offset)); | 226 | binding.U32(), ctx.var_alloc.Consume(offset)); |