diff options
| author | 2021-07-04 00:34:53 -0400 | |
|---|---|---|
| committer | 2021-07-22 21:51:40 -0400 | |
| commit | 11f04f1022d0820a1fdba38221ecd38f19d86d9e (patch) | |
| tree | c30e87d0a66b0100cb3f7b3ad2fb3bd769654a7a /src/shader_recompiler/backend/spirv | |
| parent | vulkan_device: Add missing include algorithm (diff) | |
| download | yuzu-11f04f1022d0820a1fdba38221ecd38f19d86d9e.tar.gz yuzu-11f04f1022d0820a1fdba38221ecd38f19d86d9e.tar.xz yuzu-11f04f1022d0820a1fdba38221ecd38f19d86d9e.zip | |
shader: Ignore global memory ops on devices lacking int64 support
Diffstat (limited to 'src/shader_recompiler/backend/spirv')
| -rw-r--r-- | src/shader_recompiler/backend/spirv/emit_context.cpp | 2 | ||||
| -rw-r--r-- | src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp | 36 |
2 files changed, 31 insertions, 7 deletions
diff --git a/src/shader_recompiler/backend/spirv/emit_context.cpp b/src/shader_recompiler/backend/spirv/emit_context.cpp index 865f34291..2d29d8c14 100644 --- a/src/shader_recompiler/backend/spirv/emit_context.cpp +++ b/src/shader_recompiler/backend/spirv/emit_context.cpp | |||
| @@ -830,7 +830,7 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) { | |||
| 830 | } | 830 | } |
| 831 | 831 | ||
| 832 | void EmitContext::DefineGlobalMemoryFunctions(const Info& info) { | 832 | void EmitContext::DefineGlobalMemoryFunctions(const Info& info) { |
| 833 | if (!info.uses_global_memory) { | 833 | if (!info.uses_global_memory || !profile.support_int64) { |
| 834 | return; | 834 | return; |
| 835 | } | 835 | } |
| 836 | using DefPtr = Id StorageDefinitions::*; | 836 | using DefPtr = Id StorageDefinitions::*; |
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp index ccebf170d..679ee2684 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp | |||
| @@ -84,15 +84,27 @@ void EmitLoadGlobalS16(EmitContext&) { | |||
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | Id EmitLoadGlobal32(EmitContext& ctx, Id address) { | 86 | Id EmitLoadGlobal32(EmitContext& ctx, Id address) { |
| 87 | return ctx.OpFunctionCall(ctx.U32[1], ctx.load_global_func_u32, address); | 87 | if (ctx.profile.support_int64) { |
| 88 | return ctx.OpFunctionCall(ctx.U32[1], ctx.load_global_func_u32, address); | ||
| 89 | } | ||
| 90 | LOG_WARNING(Shader_SPIRV, "Int64 not supported, ignoring memory operation"); | ||
| 91 | return ctx.Const(0u); | ||
| 88 | } | 92 | } |
| 89 | 93 | ||
| 90 | Id EmitLoadGlobal64(EmitContext& ctx, Id address) { | 94 | Id EmitLoadGlobal64(EmitContext& ctx, Id address) { |
| 91 | return ctx.OpFunctionCall(ctx.U32[2], ctx.load_global_func_u32x2, address); | 95 | if (ctx.profile.support_int64) { |
| 96 | return ctx.OpFunctionCall(ctx.U32[2], ctx.load_global_func_u32x2, address); | ||
| 97 | } | ||
| 98 | LOG_WARNING(Shader_SPIRV, "Int64 not supported, ignoring memory operation"); | ||
| 99 | return ctx.Const(0u, 0u); | ||
| 92 | } | 100 | } |
| 93 | 101 | ||
| 94 | Id EmitLoadGlobal128(EmitContext& ctx, Id address) { | 102 | Id EmitLoadGlobal128(EmitContext& ctx, Id address) { |
| 95 | return ctx.OpFunctionCall(ctx.U32[4], ctx.load_global_func_u32x4, address); | 103 | if (ctx.profile.support_int64) { |
| 104 | return ctx.OpFunctionCall(ctx.U32[4], ctx.load_global_func_u32x4, address); | ||
| 105 | } | ||
| 106 | LOG_WARNING(Shader_SPIRV, "Int64 not supported, ignoring memory operation"); | ||
| 107 | return ctx.Const(0u, 0u, 0u, 0u); | ||
| 96 | } | 108 | } |
| 97 | 109 | ||
| 98 | void EmitWriteGlobalU8(EmitContext&) { | 110 | void EmitWriteGlobalU8(EmitContext&) { |
| @@ -112,15 +124,27 @@ void EmitWriteGlobalS16(EmitContext&) { | |||
| 112 | } | 124 | } |
| 113 | 125 | ||
| 114 | void EmitWriteGlobal32(EmitContext& ctx, Id address, Id value) { | 126 | void EmitWriteGlobal32(EmitContext& ctx, Id address, Id value) { |
| 115 | ctx.OpFunctionCall(ctx.void_id, ctx.write_global_func_u32, address, value); | 127 | if (ctx.profile.support_int64) { |
| 128 | ctx.OpFunctionCall(ctx.void_id, ctx.write_global_func_u32, address, value); | ||
| 129 | return; | ||
| 130 | } | ||
| 131 | LOG_WARNING(Shader_SPIRV, "Int64 not supported, ignoring memory operation"); | ||
| 116 | } | 132 | } |
| 117 | 133 | ||
| 118 | void EmitWriteGlobal64(EmitContext& ctx, Id address, Id value) { | 134 | void EmitWriteGlobal64(EmitContext& ctx, Id address, Id value) { |
| 119 | ctx.OpFunctionCall(ctx.void_id, ctx.write_global_func_u32x2, address, value); | 135 | if (ctx.profile.support_int64) { |
| 136 | ctx.OpFunctionCall(ctx.void_id, ctx.write_global_func_u32x2, address, value); | ||
| 137 | return; | ||
| 138 | } | ||
| 139 | LOG_WARNING(Shader_SPIRV, "Int64 not supported, ignoring memory operation"); | ||
| 120 | } | 140 | } |
| 121 | 141 | ||
| 122 | void EmitWriteGlobal128(EmitContext& ctx, Id address, Id value) { | 142 | void EmitWriteGlobal128(EmitContext& ctx, Id address, Id value) { |
| 123 | ctx.OpFunctionCall(ctx.void_id, ctx.write_global_func_u32x4, address, value); | 143 | if (ctx.profile.support_int64) { |
| 144 | ctx.OpFunctionCall(ctx.void_id, ctx.write_global_func_u32x4, address, value); | ||
| 145 | return; | ||
| 146 | } | ||
| 147 | LOG_WARNING(Shader_SPIRV, "Int64 not supported, ignoring memory operation"); | ||
| 124 | } | 148 | } |
| 125 | 149 | ||
| 126 | Id EmitLoadStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) { | 150 | Id EmitLoadStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) { |