summaryrefslogtreecommitdiff
path: root/src/shader_recompiler/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/shader_recompiler/backend')
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm.cpp3
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_image.cpp282
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_instructions.h6
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp2
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_image.cpp8
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_instructions.h4
6 files changed, 258 insertions, 47 deletions
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm.cpp b/src/shader_recompiler/backend/glasm/emit_glasm.cpp
index 0c591f73c..d7a08e4b3 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm.cpp
@@ -283,6 +283,9 @@ void SetupOptions(std::string& header, Info info) {
283 if (info.uses_subgroup_shuffles) { 283 if (info.uses_subgroup_shuffles) {
284 header += "OPTION NV_shader_thread_shuffle;"; 284 header += "OPTION NV_shader_thread_shuffle;";
285 } 285 }
286 if (info.uses_sparse_residency) {
287 header += "OPTION EXT_sparse_texture2;";
288 }
286 const auto non_zero_frag_colors{info.stores_frag_color | std::views::drop(1)}; 289 const auto non_zero_frag_colors{info.stores_frag_color | std::views::drop(1)};
287 if (std::ranges::find(non_zero_frag_colors, true) != non_zero_frag_colors.end()) { 290 if (std::ranges::find(non_zero_frag_colors, true) != non_zero_frag_colors.end()) {
288 header += "OPTION ARB_draw_buffers;"; 291 header += "OPTION ARB_draw_buffers;";
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp
index 2af5483d9..3d76ab315 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp
@@ -9,6 +9,34 @@
9 9
10namespace Shader::Backend::GLASM { 10namespace Shader::Backend::GLASM {
11namespace { 11namespace {
12struct ScopedRegister {
13 ScopedRegister() = default;
14 ScopedRegister(RegAlloc& reg_alloc_) : reg_alloc{&reg_alloc_}, reg{reg_alloc->AllocReg()} {}
15
16 ~ScopedRegister() {
17 if (reg_alloc) {
18 reg_alloc->FreeReg(reg);
19 }
20 }
21
22 ScopedRegister& operator=(ScopedRegister&& rhs) noexcept {
23 if (reg_alloc) {
24 reg_alloc->FreeReg(reg);
25 }
26 reg_alloc = std::exchange(rhs.reg_alloc, nullptr);
27 reg = rhs.reg;
28 }
29
30 ScopedRegister(ScopedRegister&& rhs) noexcept
31 : reg_alloc{std::exchange(rhs.reg_alloc, nullptr)}, reg{rhs.reg} {}
32
33 ScopedRegister& operator=(const ScopedRegister&) = delete;
34 ScopedRegister(const ScopedRegister&) = delete;
35
36 RegAlloc* reg_alloc{};
37 Register reg;
38};
39
12std::string Texture([[maybe_unused]] EmitContext& ctx, IR::TextureInstInfo info, 40std::string Texture([[maybe_unused]] EmitContext& ctx, IR::TextureInstInfo info,
13 [[maybe_unused]] const IR::Value& index) { 41 [[maybe_unused]] const IR::Value& index) {
14 // FIXME 42 // FIXME
@@ -36,6 +64,61 @@ std::string_view TextureType(IR::TextureInstInfo info) {
36 } 64 }
37 throw InvalidArgument("Invalid texture type {}", info.type.Value()); 65 throw InvalidArgument("Invalid texture type {}", info.type.Value());
38} 66}
67
68std::string_view ShadowTextureType(IR::TextureInstInfo info) {
69 switch (info.type) {
70 case TextureType::Color1D:
71 return "SHADOW1D";
72 case TextureType::ColorArray1D:
73 return "SHADOWARRAY1D";
74 case TextureType::Color2D:
75 return "SHADOW2D";
76 case TextureType::ColorArray2D:
77 return "SHADOWARRAY2D";
78 case TextureType::Color3D:
79 return "SHADOW3D";
80 case TextureType::ColorCube:
81 return "SHADOWCUBE";
82 case TextureType::ColorArrayCube:
83 return "SHADOWARRAYCUBE";
84 case TextureType::Buffer:
85 return "SHADOWBUFFER";
86 }
87 throw InvalidArgument("Invalid texture type {}", info.type.Value());
88}
89
90std::string Offset(EmitContext& ctx, const IR::Value& offset) {
91 if (offset.IsEmpty()) {
92 return "";
93 }
94 return fmt::format(",offset({})", Register{ctx.reg_alloc.Consume(offset)});
95}
96
97std::pair<std::string, ScopedRegister> Coord(EmitContext& ctx, const IR::Value& coord) {
98 if (coord.IsImmediate()) {
99 ScopedRegister scoped_reg(ctx.reg_alloc);
100 return {fmt::to_string(scoped_reg.reg), std::move(scoped_reg)};
101 }
102 std::string coord_vec{fmt::to_string(Register{ctx.reg_alloc.Consume(coord)})};
103 if (coord.InstRecursive()->HasUses()) {
104 // Move non-dead coords to a separate register, although this should never happen because
105 // vectors are only assembled for immediate texture instructions
106 ctx.Add("MOV.F RC,{};", coord_vec);
107 coord_vec = "RC";
108 }
109 return {std::move(coord_vec), ScopedRegister{}};
110}
111
112void StoreSparse(EmitContext& ctx, IR::Inst* sparse_inst) {
113 if (!sparse_inst) {
114 return;
115 }
116 const Register sparse_ret{ctx.reg_alloc.Define(*sparse_inst)};
117 ctx.Add("MOV.S {},-1;"
118 "MOV.S {}(NONRESIDENT),0;",
119 sparse_ret, sparse_ret);
120 sparse_inst->Invalidate();
121}
39} // Anonymous namespace 122} // Anonymous namespace
40 123
41void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, 124void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
@@ -46,17 +129,8 @@ void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu
46 const std::string_view lod_clamp_mod{info.has_lod_clamp ? ".LODCLAMP" : ""}; 129 const std::string_view lod_clamp_mod{info.has_lod_clamp ? ".LODCLAMP" : ""};
47 const std::string_view type{TextureType(info)}; 130 const std::string_view type{TextureType(info)};
48 const std::string texture{Texture(ctx, info, index)}; 131 const std::string texture{Texture(ctx, info, index)};
49 std::string offset_vec; 132 const std::string offset_vec{Offset(ctx, offset)};
50 if (!offset.IsEmpty()) { 133 const auto [coord_vec, coord_alloc]{Coord(ctx, coord)};
51 offset_vec = fmt::format(",offset({})", Register{ctx.reg_alloc.Consume(offset)});
52 }
53 std::string coord_vec{fmt::to_string(Register{ctx.reg_alloc.Consume(coord)})};
54 if (coord.InstRecursive()->HasUses()) {
55 // Move non-dead coords to a separate register, although this should never happen because
56 // vectors are only assembled for immediate texture instructions
57 ctx.Add("MOV.F RC,{};", coord_vec);
58 coord_vec = "RC";
59 }
60 const Register ret{ctx.reg_alloc.Define(inst)}; 134 const Register ret{ctx.reg_alloc.Define(inst)};
61 if (info.has_bias) { 135 if (info.has_bias) {
62 if (info.type == TextureType::ColorArrayCube) { 136 if (info.type == TextureType::ColorArrayCube) {
@@ -83,38 +157,172 @@ void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu
83 type, offset_vec); 157 type, offset_vec);
84 } 158 }
85 } 159 }
86 if (sparse_inst) { 160 StoreSparse(ctx, sparse_inst);
87 const Register sparse_ret{ctx.reg_alloc.Define(*sparse_inst)};
88 ctx.Add("MOV.S {},-1;"
89 "MOV.S {}(NONRESIDENT),0;",
90 sparse_ret, sparse_ret);
91 sparse_inst->Invalidate();
92 }
93} 161}
94 162
95void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, 163void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
96 [[maybe_unused]] const IR::Value& index, 164 const IR::Value& coord, ScalarF32 lod, const IR::Value& offset) {
97 [[maybe_unused]] Register coord, [[maybe_unused]] Register lod_lc, 165 const auto info{inst.Flags<IR::TextureInstInfo>()};
98 [[maybe_unused]] const IR::Value& offset) { 166 const auto sparse_inst{inst.GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)};
99 throw NotImplementedException("GLASM instruction"); 167 const std::string_view sparse_mod{sparse_inst ? ".SPARSE" : ""};
168 const std::string_view type{TextureType(info)};
169 const std::string texture{Texture(ctx, info, index)};
170 const std::string offset_vec{Offset(ctx, offset)};
171 const auto [coord_vec, coord_alloc]{Coord(ctx, coord)};
172 const Register ret{ctx.reg_alloc.Define(inst)};
173 if (info.type == TextureType::ColorArrayCube) {
174 ctx.Add("TXL.F{} {},{},{},{},ARRAYCUBE{};", sparse_mod, ret, coord_vec, lod, texture,
175 offset_vec);
176 } else {
177 ctx.Add("MOV.F {}.w,{};"
178 "TXL.F{} {},{},{},{}{};",
179 coord_vec, lod, sparse_mod, ret, coord_vec, texture, type, offset_vec);
180 }
181 StoreSparse(ctx, sparse_inst);
100} 182}
101 183
102void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, 184void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
103 [[maybe_unused]] IR::Inst& inst, 185 const IR::Value& coord, ScalarF32 dref, Register bias_lc,
104 [[maybe_unused]] const IR::Value& index, 186 const IR::Value& offset) {
105 [[maybe_unused]] Register coord, [[maybe_unused]] Register dref, 187 const auto info{inst.Flags<IR::TextureInstInfo>()};
106 [[maybe_unused]] Register bias_lc, 188 const auto sparse_inst{inst.GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)};
107 [[maybe_unused]] const IR::Value& offset) { 189 const std::string_view sparse_mod{sparse_inst ? ".SPARSE" : ""};
108 throw NotImplementedException("GLASM instruction"); 190 const std::string_view type{ShadowTextureType(info)};
191 const std::string texture{Texture(ctx, info, index)};
192 const std::string offset_vec{Offset(ctx, offset)};
193 const auto [coord_vec, coord_alloc]{Coord(ctx, coord)};
194 const Register ret{ctx.reg_alloc.Define(inst)};
195 if (info.has_bias) {
196 if (info.has_lod_clamp) {
197 switch (info.type) {
198 case TextureType::Color1D:
199 case TextureType::ColorArray1D:
200 case TextureType::Color2D:
201 ctx.Add("MOV.F {}.z,{};"
202 "MOV.F {}.w,{}.x;"
203 "TXB.F.LODCLAMP{} {},{},{}.y,{},{}{};",
204 coord_vec, dref, coord_vec, bias_lc, sparse_mod, ret, coord_vec, bias_lc,
205 texture, type, offset_vec);
206 break;
207 case TextureType::ColorArray2D:
208 case TextureType::ColorCube:
209 ctx.Add("MOV.F {}.w,{};"
210 "TXB.F.LODCLAMP{} {},{},{},{},{}{};",
211 coord_vec, dref, sparse_mod, ret, coord_vec, bias_lc, texture, type,
212 offset_vec);
213 break;
214 default:
215 throw NotImplementedException("Invalid type {} with bias and lod clamp",
216 info.type.Value());
217 }
218 } else {
219 switch (info.type) {
220 case TextureType::Color1D:
221 case TextureType::ColorArray1D:
222 case TextureType::Color2D:
223 ctx.Add("MOV.F {}.z,{};"
224 "MOV.F {}.w,{}.x;"
225 "TXB.F{} {},{},{},{}{};",
226 coord_vec, dref, coord_vec, bias_lc, sparse_mod, ret, coord_vec, texture,
227 type, offset_vec);
228 break;
229 case TextureType::ColorArray2D:
230 case TextureType::ColorCube:
231 ctx.Add("MOV.F {}.w,{};"
232 "TXB.F{} {},{},{},{},{}{};",
233 coord_vec, dref, sparse_mod, ret, coord_vec, bias_lc, texture, type,
234 offset_vec);
235 break;
236 case TextureType::ColorArrayCube: {
237 const ScopedRegister pair{ctx.reg_alloc};
238 ctx.Add("MOV.F {}.x,{};"
239 "MOV.F {}.y,{}.x;"
240 "TXB.F{} {},{},{},{},{}{};",
241 pair.reg, dref, pair.reg, bias_lc, sparse_mod, ret, coord_vec, pair.reg,
242 texture, type, offset_vec);
243 break;
244 }
245 default:
246 throw NotImplementedException("Invalid type {}", info.type.Value());
247 }
248 }
249 } else {
250 if (info.has_lod_clamp) {
251 if (info.type != TextureType::ColorArrayCube) {
252 const bool w_swizzle{info.type == TextureType::ColorArray2D ||
253 info.type == TextureType::ColorCube};
254 const char dref_swizzle{w_swizzle ? 'w' : 'z'};
255 ctx.Add("MOV.F {}.{},{};"
256 "TEX.F.LODCLAMP{} {},{},{},{},{}{};",
257 coord_vec, dref_swizzle, dref, sparse_mod, ret, coord_vec, bias_lc, texture,
258 type, offset_vec);
259 } else {
260 const ScopedRegister pair{ctx.reg_alloc};
261 ctx.Add("MOV.F {}.x,{};"
262 "MOV.F {}.y,{};"
263 "TEX.F.LODCLAMP{} {},{},{},{},{}{};",
264 pair.reg, dref, pair.reg, bias_lc, sparse_mod, ret, coord_vec, pair.reg,
265 texture, type, offset_vec);
266 }
267 } else {
268 if (info.type != TextureType::ColorArrayCube) {
269 const bool w_swizzle{info.type == TextureType::ColorArray2D ||
270 info.type == TextureType::ColorCube};
271 const char dref_swizzle{w_swizzle ? 'w' : 'z'};
272 ctx.Add("MOV.F {}.{},{};"
273 "TEX.F{} {},{},{},{}{};",
274 coord_vec, dref_swizzle, dref, sparse_mod, ret, coord_vec, texture, type,
275 offset_vec);
276 } else {
277 const ScopedRegister pair{ctx.reg_alloc};
278 ctx.Add("TEX.F{} {},{},{},{},{}{};", sparse_mod, ret, coord_vec, dref, texture,
279 type, offset_vec);
280 }
281 }
282 }
283 StoreSparse(ctx, sparse_inst);
109} 284}
110 285
111void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, 286void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
112 [[maybe_unused]] IR::Inst& inst, 287 const IR::Value& coord, ScalarF32 dref, ScalarF32 lod,
113 [[maybe_unused]] const IR::Value& index, 288 const IR::Value& offset) {
114 [[maybe_unused]] Register coord, [[maybe_unused]] Register dref, 289 const auto info{inst.Flags<IR::TextureInstInfo>()};
115 [[maybe_unused]] Register lod_lc, 290 const auto sparse_inst{inst.GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)};
116 [[maybe_unused]] const IR::Value& offset) { 291 const std::string_view sparse_mod{sparse_inst ? ".SPARSE" : ""};
117 throw NotImplementedException("GLASM instruction"); 292 const std::string_view type{ShadowTextureType(info)};
293 const std::string texture{Texture(ctx, info, index)};
294 const std::string offset_vec{Offset(ctx, offset)};
295 const auto [coord_vec, coord_alloc]{Coord(ctx, coord)};
296 const Register ret{ctx.reg_alloc.Define(inst)};
297 switch (info.type) {
298 case TextureType::Color1D:
299 case TextureType::ColorArray1D:
300 case TextureType::Color2D:
301 ctx.Add("MOV.F {}.z,{};"
302 "MOV.F {}.w,{};"
303 "TXL.F{} {},{},{},{}{};",
304 coord_vec, dref, coord_vec, lod, sparse_mod, ret, coord_vec, texture, type,
305 offset_vec);
306 break;
307 case TextureType::ColorArray2D:
308 case TextureType::ColorCube:
309 ctx.Add("MOV.F {}.w,{};"
310 "TXL.F{} {},{},{},{},{}{};",
311 coord_vec, dref, sparse_mod, ret, coord_vec, lod, texture, type, offset_vec);
312 break;
313 case TextureType::ColorArrayCube: {
314 const ScopedRegister pair{ctx.reg_alloc};
315 ctx.Add("MOV.F {}.x,{};"
316 "MOV.F {}.y,{};"
317 "TXL.F{} {},{},{},{},{}{};",
318 pair.reg, dref, pair.reg, lod, sparse_mod, ret, coord_vec, pair.reg, texture, type,
319 offset_vec);
320 break;
321 }
322 default:
323 throw NotImplementedException("Invalid type {}", info.type.Value());
324 }
325 StoreSparse(ctx, sparse_inst);
118} 326}
119 327
120void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, 328void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
index a128f9ac4..54e7fab3c 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
@@ -527,12 +527,12 @@ void EmitBoundImageWrite(EmitContext&);
527void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, 527void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
528 const IR::Value& coord, Register bias_lc, const IR::Value& offset); 528 const IR::Value& coord, Register bias_lc, const IR::Value& offset);
529void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, 529void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
530 Register coord, Register lod_lc, const IR::Value& offset); 530 const IR::Value& coord, ScalarF32 lod, const IR::Value& offset);
531void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, 531void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
532 Register coord, Register dref, Register bias_lc, 532 const IR::Value& coord, ScalarF32 dref, Register bias_lc,
533 const IR::Value& offset); 533 const IR::Value& offset);
534void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, 534void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
535 Register coord, Register dref, Register lod_lc, 535 const IR::Value& coord, ScalarF32 dref, ScalarF32 lod,
536 const IR::Value& offset); 536 const IR::Value& offset);
537void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord, 537void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
538 const IR::Value& offset, const IR::Value& offset2); 538 const IR::Value& offset, const IR::Value& offset2);
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp
index 0c6a6e1c8..dd307a9a3 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp
@@ -38,7 +38,7 @@ void Store(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset, ValueTy
38void Load(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset, 38void Load(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset,
39 std::string_view size) { 39 std::string_view size) {
40 const Register ret{ctx.reg_alloc.Define(inst)}; 40 const Register ret{ctx.reg_alloc.Define(inst)};
41 StorageOp(ctx, binding, offset, fmt::format("STORE.{} {},DC.x;", size, ret), 41 StorageOp(ctx, binding, offset, fmt::format("LOAD.{} {},DC.x;", size, ret),
42 fmt::format("MOV.U {},{{0,0,0,0}};", ret)); 42 fmt::format("MOV.U {},{{0,0,0,0}};", ret));
43} 43}
44} // Anonymous namespace 44} // Anonymous namespace
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index 5832104df..99b883746 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -337,9 +337,9 @@ Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value&
337} 337}
338 338
339Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, 339Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
340 Id lod_lc, const IR::Value& offset) { 340 Id lod, const IR::Value& offset) {
341 const auto info{inst->Flags<IR::TextureInstInfo>()}; 341 const auto info{inst->Flags<IR::TextureInstInfo>()};
342 const ImageOperands operands(ctx, false, true, info.has_lod_clamp != 0, lod_lc, offset); 342 const ImageOperands operands(ctx, false, true, false, lod, offset);
343 return Emit(&EmitContext::OpImageSparseSampleExplicitLod, 343 return Emit(&EmitContext::OpImageSparseSampleExplicitLod,
344 &EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4], 344 &EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4],
345 Texture(ctx, info, index), coords, operands.Mask(), operands.Span()); 345 Texture(ctx, info, index), coords, operands.Mask(), operands.Span());
@@ -356,9 +356,9 @@ Id EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Va
356} 356}
357 357
358Id EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, 358Id EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
359 Id coords, Id dref, Id lod_lc, const IR::Value& offset) { 359 Id coords, Id dref, Id lod, const IR::Value& offset) {
360 const auto info{inst->Flags<IR::TextureInstInfo>()}; 360 const auto info{inst->Flags<IR::TextureInstInfo>()};
361 const ImageOperands operands(ctx, false, true, info.has_lod_clamp != 0, lod_lc, offset); 361 const ImageOperands operands(ctx, false, true, false, lod, offset);
362 return Emit(&EmitContext::OpImageSparseSampleDrefExplicitLod, 362 return Emit(&EmitContext::OpImageSparseSampleDrefExplicitLod,
363 &EmitContext::OpImageSampleDrefExplicitLod, ctx, inst, ctx.F32[1], 363 &EmitContext::OpImageSampleDrefExplicitLod, ctx, inst, ctx.F32[1],
364 Texture(ctx, info, index), coords, dref, operands.Mask(), operands.Span()); 364 Texture(ctx, info, index), coords, dref, operands.Mask(), operands.Span());
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
index 0a2b31772..22260d2a9 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
@@ -491,11 +491,11 @@ Id EmitBoundImageWrite(EmitContext&);
491Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, 491Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
492 Id bias_lc, const IR::Value& offset); 492 Id bias_lc, const IR::Value& offset);
493Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, 493Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
494 Id lod_lc, const IR::Value& offset); 494 Id lod, const IR::Value& offset);
495Id EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, 495Id EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
496 Id coords, Id dref, Id bias_lc, const IR::Value& offset); 496 Id coords, Id dref, Id bias_lc, const IR::Value& offset);
497Id EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, 497Id EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
498 Id coords, Id dref, Id lod_lc, const IR::Value& offset); 498 Id coords, Id dref, Id lod, const IR::Value& offset);
499Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, 499Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
500 const IR::Value& offset, const IR::Value& offset2); 500 const IR::Value& offset, const IR::Value& offset2);
501Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, 501Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,