summaryrefslogtreecommitdiff
path: root/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp')
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp13
1 files changed, 6 insertions, 7 deletions
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
index 850eee1e1..9152ace98 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
@@ -98,7 +98,7 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_vi
98 98
99void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset, 99void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
100 std::string_view value) { 100 std::string_view value) {
101 // LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic"); 101 LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic");
102 ctx.AddU64("{}=packUint2x32(uvec2(smem[{}>>2],smem[({}+4)>>2]));", inst, pointer_offset, 102 ctx.AddU64("{}=packUint2x32(uvec2(smem[{}>>2],smem[({}+4)>>2]));", inst, pointer_offset,
103 pointer_offset); 103 pointer_offset);
104 ctx.Add("smem[{}>>2]=unpackUint2x32({}).x;smem[({}+4)>>2]=unpackUint2x32({}).y;", 104 ctx.Add("smem[{}>>2]=unpackUint2x32({}).x;smem[({}+4)>>2]=unpackUint2x32({}).y;",
@@ -171,7 +171,7 @@ void EmitStorageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Val
171 171
172void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 172void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
173 const IR::Value& offset, std::string_view value) { 173 const IR::Value& offset, std::string_view value) {
174 // LOG_WARNING(..., "Op falling to non-atomic"); 174 LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic");
175 ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, 175 ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
176 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, 176 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
177 binding.U32(), ctx.var_alloc.Consume(offset)); 177 binding.U32(), ctx.var_alloc.Consume(offset));
@@ -182,7 +182,7 @@ void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value&
182 182
183void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 183void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
184 const IR::Value& offset, std::string_view value) { 184 const IR::Value& offset, std::string_view value) {
185 // LOG_WARNING(..., "Op falling to non-atomic"); 185 LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic");
186 ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, 186 ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
187 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, 187 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
188 binding.U32(), ctx.var_alloc.Consume(offset)); 188 binding.U32(), ctx.var_alloc.Consume(offset));
@@ -195,7 +195,7 @@ void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value&
195 195
196void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 196void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
197 const IR::Value& offset, std::string_view value) { 197 const IR::Value& offset, std::string_view value) {
198 // LOG_WARNING(..., "Op falling to non-atomic"); 198 LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic");
199 ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, 199 ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
200 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, 200 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
201 binding.U32(), ctx.var_alloc.Consume(offset)); 201 binding.U32(), ctx.var_alloc.Consume(offset));
@@ -207,7 +207,7 @@ void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value&
207 207
208void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 208void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
209 const IR::Value& offset, std::string_view value) { 209 const IR::Value& offset, std::string_view value) {
210 // LOG_WARNING(..., "Op falling to non-atomic"); 210 LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic");
211 ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, 211 ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
212 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, 212 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
213 binding.U32(), ctx.var_alloc.Consume(offset)); 213 binding.U32(), ctx.var_alloc.Consume(offset));
@@ -220,8 +220,7 @@ void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value&
220 220
221void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 221void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
222 const IR::Value& offset, std::string_view value) { 222 const IR::Value& offset, std::string_view value) {
223 // LOG_WARNING(..., "Op falling to non-atomic"); 223 LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic");
224
225 ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, 224 ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
226 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, 225 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
227 binding.U32(), ctx.var_alloc.Consume(offset)); 226 binding.U32(), ctx.var_alloc.Consume(offset));