summaryrefslogtreecommitdiff
path: root/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2022-02-02 10:57:22 -0700
committerGravatar GitHub2022-02-02 10:57:22 -0700
commit09400e4f4e34d1feed9314cee734107eb40442c2 (patch)
treeec77b4867a832ae3df18707cc53da7cd9a5a9987 /src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
parentMerge pull request #7807 from german77/moar-buttons (diff)
parentemit_glsl_atomic: Implement 32x2 fallback atomic ops (diff)
downloadyuzu-09400e4f4e34d1feed9314cee734107eb40442c2.tar.gz
yuzu-09400e4f4e34d1feed9314cee734107eb40442c2.tar.xz
yuzu-09400e4f4e34d1feed9314cee734107eb40442c2.zip
Merge pull request #7806 from ameerj/atomic64-fallbacks
shaders: Implement U32x2 atomic fallbacks when device does not support int64
Diffstat (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp')
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp142
1 files changed, 142 insertions, 0 deletions
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
index dc377b053..a409a7ab3 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
@@ -105,6 +105,13 @@ void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_vi
105 pointer_offset, value, pointer_offset, value); 105 pointer_offset, value, pointer_offset, value);
106} 106}
107 107
108void EmitSharedAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
109 std::string_view value) {
110 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
111 ctx.AddU32x2("{}=uvec2(smem[{}>>2],smem[({}+4)>>2]);", inst, pointer_offset, pointer_offset);
112 ctx.Add("smem[{}>>2]={}.x;smem[({}+4)>>2]={}.y;", pointer_offset, value, pointer_offset, value);
113}
114
108void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 115void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
109 const IR::Value& offset, std::string_view value) { 116 const IR::Value& offset, std::string_view value) {
110 ctx.AddU32("{}=atomicAdd({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(), 117 ctx.AddU32("{}=atomicAdd({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(),
@@ -265,6 +272,97 @@ void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Val
265 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value); 272 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
266} 273}
267 274
275void EmitStorageAtomicIAdd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
276 const IR::Value& offset, std::string_view value) {
277 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
278 ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
279 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
280 ctx.var_alloc.Consume(offset));
281 ctx.Add("{}_ssbo{}[{}>>2]+={}.x;{}_ssbo{}[({}>>2)+1]+={}.y;", ctx.stage_name, binding.U32(),
282 ctx.var_alloc.Consume(offset), value, ctx.stage_name, binding.U32(),
283 ctx.var_alloc.Consume(offset), value);
284}
285
286void EmitStorageAtomicSMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
287 const IR::Value& offset, std::string_view value) {
288 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
289 ctx.AddU32x2("{}=ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
290 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
291 ctx.var_alloc.Consume(offset));
292 ctx.Add("for(int "
293 "i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=uint(min(int({}_ssbo{}[({}>>2)+i]),int({}[i])));}}",
294 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
295 binding.U32(), ctx.var_alloc.Consume(offset), value);
296}
297
298void EmitStorageAtomicUMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
299 const IR::Value& offset, std::string_view value) {
300 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
301 ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
302 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
303 ctx.var_alloc.Consume(offset));
304 ctx.Add("for(int i=0;i<2;++i){{ "
305 "{}_ssbo{}[({}>>2)+i]=min({}_ssbo{}[({}>>2)+i],{}[i]);}}",
306 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
307 binding.U32(), ctx.var_alloc.Consume(offset), value);
308}
309
310void EmitStorageAtomicSMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
311 const IR::Value& offset, std::string_view value) {
312 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
313 ctx.AddU32x2("{}=ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
314 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
315 ctx.var_alloc.Consume(offset));
316 ctx.Add("for(int "
317 "i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=uint(max(int({}_ssbo{}[({}>>2)+i]),int({}[i])));}}",
318 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
319 binding.U32(), ctx.var_alloc.Consume(offset), value);
320}
321
322void EmitStorageAtomicUMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
323 const IR::Value& offset, std::string_view value) {
324 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
325 ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
326 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
327 ctx.var_alloc.Consume(offset));
328 ctx.Add("for(int i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=max({}_ssbo{}[({}>>2)+i],{}[i]);}}",
329 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
330 binding.U32(), ctx.var_alloc.Consume(offset), value);
331}
332
333void EmitStorageAtomicAnd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
334 const IR::Value& offset, std::string_view value) {
335 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
336 ctx.AddU32x2("{}=uvec2(atomicAnd({}_ssbo{}[{}>>2],{}.x),atomicAnd({}_ssbo{}[({}>>2)+1],{}.y));",
337 inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
338 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
339}
340
341void EmitStorageAtomicOr32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
342 const IR::Value& offset, std::string_view value) {
343 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
344 ctx.AddU32x2("{}=uvec2(atomicOr({}_ssbo{}[{}>>2],{}.x),atomicOr({}_ssbo{}[({}>>2)+1],{}.y));",
345 inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
346 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
347}
348
349void EmitStorageAtomicXor32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
350 const IR::Value& offset, std::string_view value) {
351 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
352 ctx.AddU32x2("{}=uvec2(atomicXor({}_ssbo{}[{}>>2],{}.x),atomicXor({}_ssbo{}[({}>>2)+1],{}.y));",
353 inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
354 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
355}
356
357void EmitStorageAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
358 const IR::Value& offset, std::string_view value) {
359 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
360 ctx.AddU32x2("{}=uvec2(atomicExchange({}_ssbo{}[{}>>2],{}.x),atomicExchange({}_ssbo{}[({}>>2)+"
361 "1],{}.y));",
362 inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
363 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
364}
365
268void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 366void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
269 const IR::Value& offset, std::string_view value) { 367 const IR::Value& offset, std::string_view value) {
270 SsboCasFunctionF32(ctx, inst, binding, offset, value, "CasFloatAdd"); 368 SsboCasFunctionF32(ctx, inst, binding, offset, value, "CasFloatAdd");
@@ -388,6 +486,50 @@ void EmitGlobalAtomicExchange64(EmitContext&) {
388 throw NotImplementedException("GLSL Instrucion"); 486 throw NotImplementedException("GLSL Instrucion");
389} 487}
390 488
489void EmitGlobalAtomicIAdd32x2(EmitContext&) {
490 throw NotImplementedException("GLSL Instrucion");
491}
492
493void EmitGlobalAtomicSMin32x2(EmitContext&) {
494 throw NotImplementedException("GLSL Instrucion");
495}
496
497void EmitGlobalAtomicUMin32x2(EmitContext&) {
498 throw NotImplementedException("GLSL Instrucion");
499}
500
501void EmitGlobalAtomicSMax32x2(EmitContext&) {
502 throw NotImplementedException("GLSL Instrucion");
503}
504
505void EmitGlobalAtomicUMax32x2(EmitContext&) {
506 throw NotImplementedException("GLSL Instrucion");
507}
508
509void EmitGlobalAtomicInc32x2(EmitContext&) {
510 throw NotImplementedException("GLSL Instrucion");
511}
512
513void EmitGlobalAtomicDec32x2(EmitContext&) {
514 throw NotImplementedException("GLSL Instrucion");
515}
516
517void EmitGlobalAtomicAnd32x2(EmitContext&) {
518 throw NotImplementedException("GLSL Instrucion");
519}
520
521void EmitGlobalAtomicOr32x2(EmitContext&) {
522 throw NotImplementedException("GLSL Instrucion");
523}
524
525void EmitGlobalAtomicXor32x2(EmitContext&) {
526 throw NotImplementedException("GLSL Instrucion");
527}
528
529void EmitGlobalAtomicExchange32x2(EmitContext&) {
530 throw NotImplementedException("GLSL Instrucion");
531}
532
391void EmitGlobalAtomicAddF32(EmitContext&) { 533void EmitGlobalAtomicAddF32(EmitContext&) {
392 throw NotImplementedException("GLSL Instrucion"); 534 throw NotImplementedException("GLSL Instrucion");
393} 535}