summaryrefslogtreecommitdiff
path: root/src/shader_recompiler
diff options
context:
space:
mode:
Diffstat (limited to 'src/shader_recompiler')
-rw-r--r--src/shader_recompiler/CMakeLists.txt2
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_composite.cpp2
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_image.cpp2
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_image.cpp39
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_image_atomic.cpp15
-rw-r--r--src/shader_recompiler/backend/spirv/spirv_emit_context.cpp3
-rw-r--r--src/shader_recompiler/backend/spirv/spirv_emit_context.h1
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp3
8 files changed, 47 insertions, 20 deletions
diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt
index 07e75f9d8..83b763447 100644
--- a/src/shader_recompiler/CMakeLists.txt
+++ b/src/shader_recompiler/CMakeLists.txt
@@ -245,8 +245,6 @@ target_link_libraries(shader_recompiler PUBLIC common fmt::fmt sirit)
245 245
246if (MSVC) 246if (MSVC)
247 target_compile_options(shader_recompiler PRIVATE 247 target_compile_options(shader_recompiler PRIVATE
248 /W4
249
250 /we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data 248 /we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data
251 /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data 249 /we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
252 /we4800 # Implicit conversion from 'type' to bool. Possible information loss 250 /we4800 # Implicit conversion from 'type' to bool. Possible information loss
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_composite.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_composite.cpp
index d508ee567..4e8ba4ae6 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_composite.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_composite.cpp
@@ -55,7 +55,7 @@ void CompositeInsert(EmitContext& ctx, IR::Inst& inst, Register composite, Objec
55 "MOV.{} {}.{},{};", 55 "MOV.{} {}.{},{};",
56 type, ret, composite, type, ret, swizzle, object); 56 type, ret, composite, type, ret, swizzle, object);
57 } else { 57 } else {
58 // The return value is alised so we can just insert the object, it doesn't matter if it's 58 // The return value is aliased so we can just insert the object, it doesn't matter if it's
59 // aliased 59 // aliased
60 ctx.Add("MOV.{} {}.{},{};", type, ret, swizzle, object); 60 ctx.Add("MOV.{} {}.{},{};", type, ret, swizzle, object);
61 } 61 }
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
index 3ad668a47..d9872ecc2 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
@@ -558,7 +558,7 @@ void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
558 if (multi_component) { 558 if (multi_component) {
559 if (info.num_derivates >= 3) { 559 if (info.num_derivates >= 3) {
560 const auto offset_vec{ctx.var_alloc.Consume(offset)}; 560 const auto offset_vec{ctx.var_alloc.Consume(offset)};
561 ctx.Add("{}=textureGrad({},{},vec3({}.xz, {}.x),vec3({}.yz, {}.y));", texel, texture, 561 ctx.Add("{}=textureGrad({},{},vec3({}.xz, {}.x),vec3({}.yw, {}.y));", texel, texture,
562 coords, derivatives_vec, offset_vec, derivatives_vec, offset_vec); 562 coords, derivatives_vec, offset_vec, derivatives_vec, offset_vec);
563 return; 563 return;
564 } 564 }
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index 7d901c04b..8decdf399 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -91,6 +91,34 @@ public:
91 } 91 }
92 } 92 }
93 93
94 explicit ImageOperands(EmitContext& ctx, bool has_lod_clamp, Id derivates_1, Id derivates_2,
95 Id offset, Id lod_clamp) {
96 if (!Sirit::ValidId(derivates_1) || !Sirit::ValidId(derivates_2)) {
97 throw LogicError("Derivates must be present");
98 }
99 boost::container::static_vector<Id, 3> deriv_1_accum{
100 ctx.OpCompositeExtract(ctx.F32[1], derivates_1, 0),
101 ctx.OpCompositeExtract(ctx.F32[1], derivates_1, 2),
102 ctx.OpCompositeExtract(ctx.F32[1], derivates_2, 0),
103 };
104 boost::container::static_vector<Id, 3> deriv_2_accum{
105 ctx.OpCompositeExtract(ctx.F32[1], derivates_1, 1),
106 ctx.OpCompositeExtract(ctx.F32[1], derivates_1, 3),
107 ctx.OpCompositeExtract(ctx.F32[1], derivates_2, 1),
108 };
109 const Id derivates_id1{ctx.OpCompositeConstruct(
110 ctx.F32[3], std::span{deriv_1_accum.data(), deriv_1_accum.size()})};
111 const Id derivates_id2{ctx.OpCompositeConstruct(
112 ctx.F32[3], std::span{deriv_2_accum.data(), deriv_2_accum.size()})};
113 Add(spv::ImageOperandsMask::Grad, derivates_id1, derivates_id2);
114 if (Sirit::ValidId(offset)) {
115 Add(spv::ImageOperandsMask::Offset, offset);
116 }
117 if (has_lod_clamp) {
118 Add(spv::ImageOperandsMask::MinLod, lod_clamp);
119 }
120 }
121
94 std::span<const Id> Span() const noexcept { 122 std::span<const Id> Span() const noexcept {
95 return std::span{operands.data(), operands.size()}; 123 return std::span{operands.data(), operands.size()};
96 } 124 }
@@ -176,9 +204,7 @@ Id TextureImage(EmitContext& ctx, IR::TextureInstInfo info, const IR::Value& ind
176 if (def.count > 1) { 204 if (def.count > 1) {
177 throw NotImplementedException("Indirect texture sample"); 205 throw NotImplementedException("Indirect texture sample");
178 } 206 }
179 const Id sampler_id{def.id}; 207 return ctx.OpLoad(ctx.image_buffer_type, def.id);
180 const Id id{ctx.OpLoad(ctx.sampled_texture_buffer_type, sampler_id)};
181 return ctx.OpImage(ctx.image_buffer_type, id);
182 } else { 208 } else {
183 const TextureDefinition& def{ctx.textures.at(info.descriptor_index)}; 209 const TextureDefinition& def{ctx.textures.at(info.descriptor_index)};
184 if (def.count > 1) { 210 if (def.count > 1) {
@@ -524,8 +550,11 @@ Id EmitImageQueryLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, I
524Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, 550Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
525 Id derivates, Id offset, Id lod_clamp) { 551 Id derivates, Id offset, Id lod_clamp) {
526 const auto info{inst->Flags<IR::TextureInstInfo>()}; 552 const auto info{inst->Flags<IR::TextureInstInfo>()};
527 const ImageOperands operands(ctx, info.has_lod_clamp != 0, derivates, info.num_derivates, 553 const auto operands =
528 offset, lod_clamp); 554 info.num_derivates == 3
555 ? ImageOperands(ctx, info.has_lod_clamp != 0, derivates, offset, {}, lod_clamp)
556 : ImageOperands(ctx, info.has_lod_clamp != 0, derivates, info.num_derivates, offset,
557 lod_clamp);
529 return Emit(&EmitContext::OpImageSparseSampleExplicitLod, 558 return Emit(&EmitContext::OpImageSparseSampleExplicitLod,
530 &EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4], 559 &EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4],
531 Texture(ctx, info, index), coords, operands.Mask(), operands.Span()); 560 Texture(ctx, info, index), coords, operands.Mask(), operands.Span());
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image_atomic.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image_atomic.cpp
index 2a12feddc..dde0f6e9c 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image_atomic.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image_atomic.cpp
@@ -7,15 +7,12 @@
7 7
8namespace Shader::Backend::SPIRV { 8namespace Shader::Backend::SPIRV {
9namespace { 9namespace {
10Id Image(EmitContext& ctx, const IR::Value& index, IR::TextureInstInfo info) { 10Id Image(EmitContext& ctx, IR::TextureInstInfo info) {
11 if (!index.IsImmediate()) {
12 throw NotImplementedException("Indirect image indexing");
13 }
14 if (info.type == TextureType::Buffer) { 11 if (info.type == TextureType::Buffer) {
15 const ImageBufferDefinition def{ctx.image_buffers.at(index.U32())}; 12 const ImageBufferDefinition def{ctx.image_buffers.at(info.descriptor_index)};
16 return def.id; 13 return def.id;
17 } else { 14 } else {
18 const ImageDefinition def{ctx.images.at(index.U32())}; 15 const ImageDefinition def{ctx.images.at(info.descriptor_index)};
19 return def.id; 16 return def.id;
20 } 17 }
21} 18}
@@ -28,8 +25,12 @@ std::pair<Id, Id> AtomicArgs(EmitContext& ctx) {
28 25
29Id ImageAtomicU32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id value, 26Id ImageAtomicU32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id value,
30 Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id)) { 27 Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id)) {
28 if (!index.IsImmediate() || index.U32() != 0) {
29 // TODO: handle layers
30 throw NotImplementedException("Image indexing");
31 }
31 const auto info{inst->Flags<IR::TextureInstInfo>()}; 32 const auto info{inst->Flags<IR::TextureInstInfo>()};
32 const Id image{Image(ctx, index, info)}; 33 const Id image{Image(ctx, info)};
33 const Id pointer{ctx.OpImageTexelPointer(ctx.image_u32, image, coords, ctx.Const(0U))}; 34 const Id pointer{ctx.OpImageTexelPointer(ctx.image_u32, image, coords, ctx.Const(0U))};
34 const auto [scope, semantics]{AtomicArgs(ctx)}; 35 const auto [scope, semantics]{AtomicArgs(ctx)};
35 return (ctx.*atomic_func)(ctx.U32[1], pointer, scope, semantics, value); 36 return (ctx.*atomic_func)(ctx.U32[1], pointer, scope, semantics, value);
diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp
index bec5db173..57df6fc34 100644
--- a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp
+++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp
@@ -1242,9 +1242,8 @@ void EmitContext::DefineTextureBuffers(const Info& info, u32& binding) {
1242 } 1242 }
1243 const spv::ImageFormat format{spv::ImageFormat::Unknown}; 1243 const spv::ImageFormat format{spv::ImageFormat::Unknown};
1244 image_buffer_type = TypeImage(F32[1], spv::Dim::Buffer, 0U, false, false, 1, format); 1244 image_buffer_type = TypeImage(F32[1], spv::Dim::Buffer, 0U, false, false, 1, format);
1245 sampled_texture_buffer_type = TypeSampledImage(image_buffer_type);
1246 1245
1247 const Id type{TypePointer(spv::StorageClass::UniformConstant, sampled_texture_buffer_type)}; 1246 const Id type{TypePointer(spv::StorageClass::UniformConstant, image_buffer_type)};
1248 texture_buffers.reserve(info.texture_buffer_descriptors.size()); 1247 texture_buffers.reserve(info.texture_buffer_descriptors.size());
1249 for (const TextureBufferDescriptor& desc : info.texture_buffer_descriptors) { 1248 for (const TextureBufferDescriptor& desc : info.texture_buffer_descriptors) {
1250 if (desc.count != 1) { 1249 if (desc.count != 1) {
diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.h b/src/shader_recompiler/backend/spirv/spirv_emit_context.h
index e63330f11..7c49fd504 100644
--- a/src/shader_recompiler/backend/spirv/spirv_emit_context.h
+++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.h
@@ -206,7 +206,6 @@ public:
206 Id output_u32{}; 206 Id output_u32{};
207 207
208 Id image_buffer_type{}; 208 Id image_buffer_type{};
209 Id sampled_texture_buffer_type{};
210 Id image_u32{}; 209 Id image_u32{};
211 210
212 std::array<UniformDefinitions, Info::MAX_CBUFS> cbufs{}; 211 std::array<UniformDefinitions, Info::MAX_CBUFS> cbufs{};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp
index 753c62098..e593132e6 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp
@@ -161,7 +161,8 @@ enum class SpecialRegister : u64 {
161 LOG_WARNING(Shader, "(STUBBED) SR_AFFINITY"); 161 LOG_WARNING(Shader, "(STUBBED) SR_AFFINITY");
162 return ir.Imm32(0); // This is the default value hardware returns. 162 return ir.Imm32(0); // This is the default value hardware returns.
163 default: 163 default:
164 throw NotImplementedException("S2R special register {}", special_register); 164 LOG_CRITICAL(Shader, "(STUBBED) Special register {}", special_register);
165 return ir.Imm32(0); // This is the default value hardware returns.
165 } 166 }
166} 167}
167} // Anonymous namespace 168} // Anonymous namespace