summaryrefslogtreecommitdiff
path: root/src/shader_recompiler/backend/spirv
diff options
context:
space:
mode:
authorGravatar Wollnashorn2023-04-05 01:29:46 +0200
committerGravatar Wollnashorn2023-04-08 16:12:30 +0200
commit780240e6979b198e7bd10feaad5399b8b4b63762 (patch)
treec145f48c66cf003e618cefc63496e3f5b7637f56 /src/shader_recompiler/backend/spirv
parentMerge pull request #10024 from german77/crysis (diff)
downloadyuzu-780240e6979b198e7bd10feaad5399b8b4b63762.tar.gz
yuzu-780240e6979b198e7bd10feaad5399b8b4b63762.tar.xz
yuzu-780240e6979b198e7bd10feaad5399b8b4b63762.zip
shader_recompiler: Add subpixel offset for correct rounding at `ImageGather`
On AMD a subpixel offset of 1/512 of the texel size is applied to the texture coordinates at a ImageGather call to ensure the rounding at the texel centers is done the same way as in Maxwell or other Nvidia architectures. See https://www.reedbeta.com/blog/texture-gathers-and-coordinate-precision/ for more details why this might be necessary. This should fix shadow artifacts at object edges in Zelda: Breath of the Wild (#9957, #6956).
Diffstat (limited to 'src/shader_recompiler/backend/spirv')
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_image.cpp39
1 files changed, 39 insertions, 0 deletions
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index 02073c420..968901d42 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -261,6 +261,39 @@ Id BitTest(EmitContext& ctx, Id mask, Id bit) {
261 const Id bit_value{ctx.OpBitwiseAnd(ctx.U32[1], shifted, ctx.Const(1u))}; 261 const Id bit_value{ctx.OpBitwiseAnd(ctx.U32[1], shifted, ctx.Const(1u))};
262 return ctx.OpINotEqual(ctx.U1, bit_value, ctx.u32_zero_value); 262 return ctx.OpINotEqual(ctx.U1, bit_value, ctx.u32_zero_value);
263} 263}
264
265Id ImageGatherSubpixelOffset(EmitContext& ctx, const IR::TextureInstInfo& info, Id texture,
266 Id coords) {
267 // Apply a subpixel offset of 1/512 the texel size of the texture to ensure same rounding on
268 // AMD hardware as on Maxwell or other Nvidia architectures.
269 const auto calculate_offset{[&](size_t dim) -> std::array<Id, 2> {
270 const Id nudge{ctx.Const(0x1p-9f)};
271 const Id image_size{ctx.OpImageQuerySizeLod(ctx.U32[dim], texture, ctx.u32_zero_value)};
272 const Id offset_x{ctx.OpFDiv(
273 ctx.F32[1], nudge,
274 ctx.OpConvertUToF(ctx.F32[1], ctx.OpCompositeExtract(ctx.U32[1], image_size, 0)))};
275 const Id offset_y{ctx.OpFDiv(
276 ctx.F32[1], nudge,
277 ctx.OpConvertUToF(ctx.F32[1], ctx.OpCompositeExtract(ctx.U32[1], image_size, 1)))};
278 return {ctx.OpFAdd(ctx.F32[1], ctx.OpCompositeExtract(ctx.F32[1], coords, 0), offset_x),
279 ctx.OpFAdd(ctx.F32[1], ctx.OpCompositeExtract(ctx.F32[1], coords, 1), offset_y)};
280 }};
281 switch (info.type) {
282 case TextureType::Color2D:
283 case TextureType::Color2DRect: {
284 const auto offset{calculate_offset(2)};
285 return ctx.OpCompositeConstruct(ctx.F32[2], offset[0], offset[1]);
286 }
287 case TextureType::ColorArray2D:
288 case TextureType::ColorCube: {
289 const auto offset{calculate_offset(3)};
290 return ctx.OpCompositeConstruct(ctx.F32[3], offset[0], offset[1],
291 ctx.OpCompositeExtract(ctx.F32[1], coords, 2));
292 }
293 default:
294 return coords;
295 }
296}
264} // Anonymous namespace 297} // Anonymous namespace
265 298
266Id EmitBindlessImageSampleImplicitLod(EmitContext&) { 299Id EmitBindlessImageSampleImplicitLod(EmitContext&) {
@@ -423,6 +456,9 @@ Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id
423 const IR::Value& offset, const IR::Value& offset2) { 456 const IR::Value& offset, const IR::Value& offset2) {
424 const auto info{inst->Flags<IR::TextureInstInfo>()}; 457 const auto info{inst->Flags<IR::TextureInstInfo>()};
425 const ImageOperands operands(ctx, offset, offset2); 458 const ImageOperands operands(ctx, offset, offset2);
459 if (ctx.profile.need_gather_subpixel_offset) {
460 coords = ImageGatherSubpixelOffset(ctx, info, TextureImage(ctx, info, index), coords);
461 }
426 return Emit(&EmitContext::OpImageSparseGather, &EmitContext::OpImageGather, ctx, inst, 462 return Emit(&EmitContext::OpImageSparseGather, &EmitContext::OpImageGather, ctx, inst,
427 ctx.F32[4], Texture(ctx, info, index), coords, ctx.Const(info.gather_component), 463 ctx.F32[4], Texture(ctx, info, index), coords, ctx.Const(info.gather_component),
428 operands.MaskOptional(), operands.Span()); 464 operands.MaskOptional(), operands.Span());
@@ -432,6 +468,9 @@ Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
432 const IR::Value& offset, const IR::Value& offset2, Id dref) { 468 const IR::Value& offset, const IR::Value& offset2, Id dref) {
433 const auto info{inst->Flags<IR::TextureInstInfo>()}; 469 const auto info{inst->Flags<IR::TextureInstInfo>()};
434 const ImageOperands operands(ctx, offset, offset2); 470 const ImageOperands operands(ctx, offset, offset2);
471 if (ctx.profile.need_gather_subpixel_offset) {
472 coords = ImageGatherSubpixelOffset(ctx, info, TextureImage(ctx, info, index), coords);
473 }
435 return Emit(&EmitContext::OpImageSparseDrefGather, &EmitContext::OpImageDrefGather, ctx, inst, 474 return Emit(&EmitContext::OpImageSparseDrefGather, &EmitContext::OpImageDrefGather, ctx, inst,
436 ctx.F32[4], Texture(ctx, info, index), coords, dref, operands.MaskOptional(), 475 ctx.F32[4], Texture(ctx, info, index), coords, dref, operands.MaskOptional(),
437 operands.Span()); 476 operands.Span());