summaryrefslogtreecommitdiff
path: root/src/shader_recompiler/backend/spirv
diff options
context:
space:
mode:
Diffstat (limited to 'src/shader_recompiler/backend/spirv')
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv.cpp8
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp6
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_image.cpp4
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_special.cpp4
4 files changed, 11 insertions, 11 deletions
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
index cba420cda..14a99750d 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
@@ -294,7 +294,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit
294 Id main_func) { 294 Id main_func) {
295 const Info& info{program.info}; 295 const Info& info{program.info};
296 if (info.uses_fp32_denorms_flush && info.uses_fp32_denorms_preserve) { 296 if (info.uses_fp32_denorms_flush && info.uses_fp32_denorms_preserve) {
297 // LOG_ERROR(HW_GPU, "Fp32 denorm flush and preserve on the same shader"); 297 LOG_ERROR(Shader_SPIRV, "Fp32 denorm flush and preserve on the same shader");
298 } else if (info.uses_fp32_denorms_flush) { 298 } else if (info.uses_fp32_denorms_flush) {
299 if (profile.support_fp32_denorm_flush) { 299 if (profile.support_fp32_denorm_flush) {
300 ctx.AddCapability(spv::Capability::DenormFlushToZero); 300 ctx.AddCapability(spv::Capability::DenormFlushToZero);
@@ -307,7 +307,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit
307 ctx.AddCapability(spv::Capability::DenormPreserve); 307 ctx.AddCapability(spv::Capability::DenormPreserve);
308 ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 32U); 308 ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 32U);
309 } else { 309 } else {
310 // LOG_WARNING(HW_GPU, "Fp32 denorm preserve used in shader without host support"); 310 LOG_WARNING(Shader_SPIRV, "Fp32 denorm preserve used in shader without host support");
311 } 311 }
312 } 312 }
313 if (!profile.support_separate_denorm_behavior) { 313 if (!profile.support_separate_denorm_behavior) {
@@ -315,7 +315,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit
315 return; 315 return;
316 } 316 }
317 if (info.uses_fp16_denorms_flush && info.uses_fp16_denorms_preserve) { 317 if (info.uses_fp16_denorms_flush && info.uses_fp16_denorms_preserve) {
318 // LOG_ERROR(HW_GPU, "Fp16 denorm flush and preserve on the same shader"); 318 LOG_ERROR(Shader_SPIRV, "Fp16 denorm flush and preserve on the same shader");
319 } else if (info.uses_fp16_denorms_flush) { 319 } else if (info.uses_fp16_denorms_flush) {
320 if (profile.support_fp16_denorm_flush) { 320 if (profile.support_fp16_denorm_flush) {
321 ctx.AddCapability(spv::Capability::DenormFlushToZero); 321 ctx.AddCapability(spv::Capability::DenormFlushToZero);
@@ -328,7 +328,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit
328 ctx.AddCapability(spv::Capability::DenormPreserve); 328 ctx.AddCapability(spv::Capability::DenormPreserve);
329 ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 16U); 329 ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 16U);
330 } else { 330 } else {
331 // LOG_WARNING(HW_GPU, "Fp16 denorm preserve used in shader without host support"); 331 LOG_WARNING(Shader_SPIRV, "Fp16 denorm preserve used in shader without host support");
332 } 332 }
333 } 333 }
334} 334}
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
index 053800eb7..9af8bb9e1 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
@@ -73,7 +73,7 @@ Id StorageAtomicU64(EmitContext& ctx, const IR::Value& binding, const IR::Value&
73 const auto [scope, semantics]{AtomicArgs(ctx)}; 73 const auto [scope, semantics]{AtomicArgs(ctx)};
74 return (ctx.*atomic_func)(ctx.U64, pointer, scope, semantics, value); 74 return (ctx.*atomic_func)(ctx.U64, pointer, scope, semantics, value);
75 } 75 }
76 // LOG_WARNING(..., "Int64 Atomics not supported, fallback to non-atomic"); 76 LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
77 const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2, 77 const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
78 binding, offset, sizeof(u32[2]))}; 78 binding, offset, sizeof(u32[2]))};
79 const Id original_value{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))}; 79 const Id original_value{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))};
@@ -140,7 +140,7 @@ Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
140 const auto [scope, semantics]{AtomicArgs(ctx)}; 140 const auto [scope, semantics]{AtomicArgs(ctx)};
141 return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value); 141 return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
142 } 142 }
143 // LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic"); 143 LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
144 const Id pointer_1{SharedPointer(ctx, offset, 0)}; 144 const Id pointer_1{SharedPointer(ctx, offset, 0)};
145 const Id pointer_2{SharedPointer(ctx, offset, 1)}; 145 const Id pointer_2{SharedPointer(ctx, offset, 1)};
146 const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)}; 146 const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
@@ -266,7 +266,7 @@ Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const
266 const auto [scope, semantics]{AtomicArgs(ctx)}; 266 const auto [scope, semantics]{AtomicArgs(ctx)};
267 return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value); 267 return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
268 } 268 }
269 // LOG_WARNING(..., "Int64 Atomics not supported, fallback to non-atomic"); 269 LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
270 const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2, 270 const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
271 binding, offset, sizeof(u32[2]))}; 271 binding, offset, sizeof(u32[2]))};
272 const Id original{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))}; 272 const Id original{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))};
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index cf842e1e0..647804814 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -39,7 +39,7 @@ public:
39 } 39 }
40 const std::array values{offset.InstRecursive(), offset2.InstRecursive()}; 40 const std::array values{offset.InstRecursive(), offset2.InstRecursive()};
41 if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) { 41 if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) {
42 // LOG_WARNING("Not all arguments in PTP are immediate, STUBBING"); 42 LOG_WARNING(Shader_SPIRV, "Not all arguments in PTP are immediate, ignoring");
43 return; 43 return;
44 } 44 }
45 const IR::Opcode opcode{values[0]->GetOpcode()}; 45 const IR::Opcode opcode{values[0]->GetOpcode()};
@@ -442,7 +442,7 @@ Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, I
442Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords) { 442Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords) {
443 const auto info{inst->Flags<IR::TextureInstInfo>()}; 443 const auto info{inst->Flags<IR::TextureInstInfo>()};
444 if (info.image_format == ImageFormat::Typeless && !ctx.profile.support_typeless_image_loads) { 444 if (info.image_format == ImageFormat::Typeless && !ctx.profile.support_typeless_image_loads) {
445 // LOG_WARNING(..., "Typeless image read not supported by host"); 445 LOG_WARNING(Shader_SPIRV, "Typeless image read not supported by host");
446 return ctx.ConstantNull(ctx.U32[4]); 446 return ctx.ConstantNull(ctx.U32[4]);
447 } 447 }
448 return Emit(&EmitContext::OpImageSparseRead, &EmitContext::OpImageRead, ctx, inst, ctx.U32[4], 448 return Emit(&EmitContext::OpImageSparseRead, &EmitContext::OpImageRead, ctx, inst, ctx.U32[4],
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp
index 072a3b1bd..9e7eb3cb1 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp
@@ -131,7 +131,7 @@ void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) {
131 if (stream.IsImmediate()) { 131 if (stream.IsImmediate()) {
132 ctx.OpEmitStreamVertex(ctx.Def(stream)); 132 ctx.OpEmitStreamVertex(ctx.Def(stream));
133 } else { 133 } else {
134 // LOG_WARNING(..., "EmitVertex's stream is not constant"); 134 LOG_WARNING(Shader_SPIRV, "Stream is not immediate");
135 ctx.OpEmitStreamVertex(ctx.u32_zero_value); 135 ctx.OpEmitStreamVertex(ctx.u32_zero_value);
136 } 136 }
137 // Restore fixed pipeline point size after emitting the vertex 137 // Restore fixed pipeline point size after emitting the vertex
@@ -142,7 +142,7 @@ void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) {
142 if (stream.IsImmediate()) { 142 if (stream.IsImmediate()) {
143 ctx.OpEndStreamPrimitive(ctx.Def(stream)); 143 ctx.OpEndStreamPrimitive(ctx.Def(stream));
144 } else { 144 } else {
145 // LOG_WARNING(..., "EndPrimitive's stream is not constant"); 145 LOG_WARNING(Shader_SPIRV, "Stream is not immediate");
146 ctx.OpEndStreamPrimitive(ctx.u32_zero_value); 146 ctx.OpEndStreamPrimitive(ctx.u32_zero_value);
147 } 147 }
148} 148}