summaryrefslogtreecommitdiff
path: root/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2021-07-25 11:39:04 -0700
committerGravatar GitHub2021-07-25 11:39:04 -0700
commit98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f (patch)
tree816faa96c2c4d291825063433331a8ea4b3d08f1 /src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
parentMerge pull request #6699 from lat9nq/common-threads (diff)
parentshader: Support out of bound local memory reads and immediate writes (diff)
downloadyuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.tar.gz
yuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.tar.xz
yuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.zip
Merge pull request #6585 from ameerj/hades
Shader Decompiler Rewrite
Diffstat (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp')
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_image.cpp799
1 files changed, 799 insertions, 0 deletions
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
new file mode 100644
index 000000000..447eb8e0a
--- /dev/null
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
@@ -0,0 +1,799 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <string_view>
6
7#include "shader_recompiler/backend/glsl/emit_context.h"
8#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
9#include "shader_recompiler/frontend/ir/modifiers.h"
10#include "shader_recompiler/frontend/ir/value.h"
11#include "shader_recompiler/profile.h"
12
13namespace Shader::Backend::GLSL {
14namespace {
15std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info, const IR::Value& index) {
16 const auto def{info.type == TextureType::Buffer ? ctx.texture_buffers.at(info.descriptor_index)
17 : ctx.textures.at(info.descriptor_index)};
18 const auto index_offset{def.count > 1 ? fmt::format("[{}]", ctx.var_alloc.Consume(index)) : ""};
19 return fmt::format("tex{}{}", def.binding, index_offset);
20}
21
22std::string Image(EmitContext& ctx, const IR::TextureInstInfo& info, const IR::Value& index) {
23 const auto def{info.type == TextureType::Buffer ? ctx.image_buffers.at(info.descriptor_index)
24 : ctx.images.at(info.descriptor_index)};
25 const auto index_offset{def.count > 1 ? fmt::format("[{}]", ctx.var_alloc.Consume(index)) : ""};
26 return fmt::format("img{}{}", def.binding, index_offset);
27}
28
29std::string CastToIntVec(std::string_view value, const IR::TextureInstInfo& info) {
30 switch (info.type) {
31 case TextureType::Color1D:
32 case TextureType::Buffer:
33 return fmt::format("int({})", value);
34 case TextureType::ColorArray1D:
35 case TextureType::Color2D:
36 case TextureType::ColorArray2D:
37 return fmt::format("ivec2({})", value);
38 case TextureType::Color3D:
39 case TextureType::ColorCube:
40 return fmt::format("ivec3({})", value);
41 case TextureType::ColorArrayCube:
42 return fmt::format("ivec4({})", value);
43 default:
44 throw NotImplementedException("Integer cast for TextureType {}", info.type.Value());
45 }
46}
47
48std::string CoordsCastToInt(std::string_view value, const IR::TextureInstInfo& info) {
49 switch (info.type) {
50 case TextureType::Color1D:
51 case TextureType::Buffer:
52 return fmt::format("int({})", value);
53 case TextureType::ColorArray1D:
54 case TextureType::Color2D:
55 return fmt::format("ivec2({})", value);
56 case TextureType::ColorArray2D:
57 case TextureType::Color3D:
58 case TextureType::ColorCube:
59 return fmt::format("ivec3({})", value);
60 case TextureType::ColorArrayCube:
61 return fmt::format("ivec4({})", value);
62 default:
63 throw NotImplementedException("TexelFetchCast type {}", info.type.Value());
64 }
65}
66
67bool NeedsShadowLodExt(TextureType type) {
68 switch (type) {
69 case TextureType::ColorArray2D:
70 case TextureType::ColorCube:
71 case TextureType::ColorArrayCube:
72 return true;
73 default:
74 return false;
75 }
76}
77
78std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) {
79 if (offset.IsImmediate()) {
80 return fmt::format("int({})", offset.U32());
81 }
82 IR::Inst* const inst{offset.InstRecursive()};
83 if (inst->AreAllArgsImmediates()) {
84 switch (inst->GetOpcode()) {
85 case IR::Opcode::CompositeConstructU32x2:
86 return fmt::format("ivec2({},{})", inst->Arg(0).U32(), inst->Arg(1).U32());
87 case IR::Opcode::CompositeConstructU32x3:
88 return fmt::format("ivec3({},{},{})", inst->Arg(0).U32(), inst->Arg(1).U32(),
89 inst->Arg(2).U32());
90 case IR::Opcode::CompositeConstructU32x4:
91 return fmt::format("ivec4({},{},{},{})", inst->Arg(0).U32(), inst->Arg(1).U32(),
92 inst->Arg(2).U32(), inst->Arg(3).U32());
93 default:
94 break;
95 }
96 }
97 const bool has_var_aoffi{ctx.profile.support_gl_variable_aoffi};
98 if (!has_var_aoffi) {
99 LOG_WARNING(Shader_GLSL, "Device does not support variable texture offsets, STUBBING");
100 }
101 const auto offset_str{has_var_aoffi ? ctx.var_alloc.Consume(offset) : "0"};
102 switch (offset.Type()) {
103 case IR::Type::U32:
104 return fmt::format("int({})", offset_str);
105 case IR::Type::U32x2:
106 return fmt::format("ivec2({})", offset_str);
107 case IR::Type::U32x3:
108 return fmt::format("ivec3({})", offset_str);
109 case IR::Type::U32x4:
110 return fmt::format("ivec4({})", offset_str);
111 default:
112 throw NotImplementedException("Offset type {}", offset.Type());
113 }
114}
115
116std::string PtpOffsets(const IR::Value& offset, const IR::Value& offset2) {
117 const std::array values{offset.InstRecursive(), offset2.InstRecursive()};
118 if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) {
119 LOG_WARNING(Shader_GLSL, "Not all arguments in PTP are immediate, STUBBING");
120 return "ivec2[](ivec2(0), ivec2(1), ivec2(2), ivec2(3))";
121 }
122 const IR::Opcode opcode{values[0]->GetOpcode()};
123 if (opcode != values[1]->GetOpcode() || opcode != IR::Opcode::CompositeConstructU32x4) {
124 throw LogicError("Invalid PTP arguments");
125 }
126 auto read{[&](unsigned int a, unsigned int b) { return values[a]->Arg(b).U32(); }};
127
128 return fmt::format("ivec2[](ivec2({},{}),ivec2({},{}),ivec2({},{}),ivec2({},{}))", read(0, 0),
129 read(0, 1), read(0, 2), read(0, 3), read(1, 0), read(1, 1), read(1, 2),
130 read(1, 3));
131}
132
133IR::Inst* PrepareSparse(IR::Inst& inst) {
134 const auto sparse_inst{inst.GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)};
135 if (sparse_inst) {
136 sparse_inst->Invalidate();
137 }
138 return sparse_inst;
139}
140} // Anonymous namespace
141
142void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
143 std::string_view coords, std::string_view bias_lc,
144 const IR::Value& offset) {
145 const auto info{inst.Flags<IR::TextureInstInfo>()};
146 if (info.has_lod_clamp) {
147 throw NotImplementedException("EmitImageSampleImplicitLod Lod clamp samples");
148 }
149 const auto texture{Texture(ctx, info, index)};
150 const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""};
151 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
152 const auto sparse_inst{PrepareSparse(inst)};
153 const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
154 if (sparse_inst && !supports_sparse) {
155 LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
156 ctx.AddU1("{}=true;", *sparse_inst);
157 }
158 if (!sparse_inst || !supports_sparse) {
159 if (!offset.IsEmpty()) {
160 const auto offset_str{GetOffsetVec(ctx, offset)};
161 if (ctx.stage == Stage::Fragment) {
162 ctx.Add("{}=textureOffset({},{},{}{});", texel, texture, coords, offset_str, bias);
163 } else {
164 ctx.Add("{}=textureLodOffset({},{},0.0,{});", texel, texture, coords, offset_str);
165 }
166 } else {
167 if (ctx.stage == Stage::Fragment) {
168 ctx.Add("{}=texture({},{}{});", texel, texture, coords, bias);
169 } else {
170 ctx.Add("{}=textureLod({},{},0.0);", texel, texture, coords);
171 }
172 }
173 return;
174 }
175 if (!offset.IsEmpty()) {
176 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureOffsetARB({},{},{},{}{}));",
177 *sparse_inst, texture, coords, GetOffsetVec(ctx, offset), texel, bias);
178 } else {
179 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureARB({},{},{}{}));", *sparse_inst,
180 texture, coords, texel, bias);
181 }
182}
183
184void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
185 std::string_view coords, std::string_view lod_lc,
186 const IR::Value& offset) {
187 const auto info{inst.Flags<IR::TextureInstInfo>()};
188 if (info.has_bias) {
189 throw NotImplementedException("EmitImageSampleExplicitLod Bias texture samples");
190 }
191 if (info.has_lod_clamp) {
192 throw NotImplementedException("EmitImageSampleExplicitLod Lod clamp samples");
193 }
194 const auto texture{Texture(ctx, info, index)};
195 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
196 const auto sparse_inst{PrepareSparse(inst)};
197 const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
198 if (sparse_inst && !supports_sparse) {
199 LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
200 ctx.AddU1("{}=true;", *sparse_inst);
201 }
202 if (!sparse_inst || !supports_sparse) {
203 if (!offset.IsEmpty()) {
204 ctx.Add("{}=textureLodOffset({},{},{},{});", texel, texture, coords, lod_lc,
205 GetOffsetVec(ctx, offset));
206 } else {
207 ctx.Add("{}=textureLod({},{},{});", texel, texture, coords, lod_lc);
208 }
209 return;
210 }
211 if (!offset.IsEmpty()) {
212 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));",
213 *sparse_inst, texture, CastToIntVec(coords, info), lod_lc,
214 GetOffsetVec(ctx, offset), texel);
215 } else {
216 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureLodARB({},{},{},{}));", *sparse_inst,
217 texture, coords, lod_lc, texel);
218 }
219}
220
221void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
222 std::string_view coords, std::string_view dref,
223 std::string_view bias_lc, const IR::Value& offset) {
224 const auto info{inst.Flags<IR::TextureInstInfo>()};
225 const auto sparse_inst{PrepareSparse(inst)};
226 if (sparse_inst) {
227 throw NotImplementedException("EmitImageSampleDrefImplicitLod Sparse texture samples");
228 }
229 if (info.has_bias) {
230 throw NotImplementedException("EmitImageSampleDrefImplicitLod Bias texture samples");
231 }
232 if (info.has_lod_clamp) {
233 throw NotImplementedException("EmitImageSampleDrefImplicitLod Lod clamp samples");
234 }
235 const auto texture{Texture(ctx, info, index)};
236 const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""};
237 const bool needs_shadow_ext{NeedsShadowLodExt(info.type)};
238 const auto cast{needs_shadow_ext ? "vec4" : "vec3"};
239 const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod &&
240 ctx.stage != Stage::Fragment && needs_shadow_ext};
241 if (use_grad) {
242 LOG_WARNING(Shader_GLSL,
243 "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback");
244 if (info.type == TextureType::ColorArrayCube) {
245 LOG_WARNING(Shader_GLSL, "textureGrad does not support ColorArrayCube. Stubbing");
246 ctx.AddF32("{}=0.0f;", inst);
247 return;
248 }
249 const auto d_cast{info.type == TextureType::ColorArray2D ? "vec2" : "vec3"};
250 ctx.AddF32("{}=textureGrad({},{}({},{}),{}(0),{}(0));", inst, texture, cast, coords, dref,
251 d_cast, d_cast);
252 return;
253 }
254 if (!offset.IsEmpty()) {
255 const auto offset_str{GetOffsetVec(ctx, offset)};
256 if (ctx.stage == Stage::Fragment) {
257 ctx.AddF32("{}=textureOffset({},{}({},{}),{}{});", inst, texture, cast, coords, dref,
258 offset_str, bias);
259 } else {
260 ctx.AddF32("{}=textureLodOffset({},{}({},{}),0.0,{});", inst, texture, cast, coords,
261 dref, offset_str);
262 }
263 } else {
264 if (ctx.stage == Stage::Fragment) {
265 if (info.type == TextureType::ColorArrayCube) {
266 ctx.AddF32("{}=texture({},vec4({}),{});", inst, texture, coords, dref);
267 } else {
268 ctx.AddF32("{}=texture({},{}({},{}){});", inst, texture, cast, coords, dref, bias);
269 }
270 } else {
271 ctx.AddF32("{}=textureLod({},{}({},{}),0.0);", inst, texture, cast, coords, dref);
272 }
273 }
274}
275
276void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
277 std::string_view coords, std::string_view dref,
278 std::string_view lod_lc, const IR::Value& offset) {
279 const auto info{inst.Flags<IR::TextureInstInfo>()};
280 const auto sparse_inst{PrepareSparse(inst)};
281 if (sparse_inst) {
282 throw NotImplementedException("EmitImageSampleDrefExplicitLod Sparse texture samples");
283 }
284 if (info.has_bias) {
285 throw NotImplementedException("EmitImageSampleDrefExplicitLod Bias texture samples");
286 }
287 if (info.has_lod_clamp) {
288 throw NotImplementedException("EmitImageSampleDrefExplicitLod Lod clamp samples");
289 }
290 const auto texture{Texture(ctx, info, index)};
291 const bool needs_shadow_ext{NeedsShadowLodExt(info.type)};
292 const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext};
293 const auto cast{needs_shadow_ext ? "vec4" : "vec3"};
294 if (use_grad) {
295 LOG_WARNING(Shader_GLSL,
296 "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback");
297 if (info.type == TextureType::ColorArrayCube) {
298 LOG_WARNING(Shader_GLSL, "textureGrad does not support ColorArrayCube. Stubbing");
299 ctx.AddF32("{}=0.0f;", inst);
300 return;
301 }
302 const auto d_cast{info.type == TextureType::ColorArray2D ? "vec2" : "vec3"};
303 ctx.AddF32("{}=textureGrad({},{}({},{}),{}(0),{}(0));", inst, texture, cast, coords, dref,
304 d_cast, d_cast);
305 return;
306 }
307 if (!offset.IsEmpty()) {
308 const auto offset_str{GetOffsetVec(ctx, offset)};
309 if (info.type == TextureType::ColorArrayCube) {
310 ctx.AddF32("{}=textureLodOffset({},{},{},{},{});", inst, texture, coords, dref, lod_lc,
311 offset_str);
312 } else {
313 ctx.AddF32("{}=textureLodOffset({},{}({},{}),{},{});", inst, texture, cast, coords,
314 dref, lod_lc, offset_str);
315 }
316 } else {
317 if (info.type == TextureType::ColorArrayCube) {
318 ctx.AddF32("{}=textureLod({},{},{},{});", inst, texture, coords, dref, lod_lc);
319 } else {
320 ctx.AddF32("{}=textureLod({},{}({},{}),{});", inst, texture, cast, coords, dref,
321 lod_lc);
322 }
323 }
324}
325
326void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
327 std::string_view coords, const IR::Value& offset, const IR::Value& offset2) {
328 const auto info{inst.Flags<IR::TextureInstInfo>()};
329 const auto texture{Texture(ctx, info, index)};
330 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
331 const auto sparse_inst{PrepareSparse(inst)};
332 const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
333 if (sparse_inst && !supports_sparse) {
334 LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
335 ctx.AddU1("{}=true;", *sparse_inst);
336 }
337 if (!sparse_inst || !supports_sparse) {
338 if (offset.IsEmpty()) {
339 ctx.Add("{}=textureGather({},{},int({}));", texel, texture, coords,
340 info.gather_component);
341 return;
342 }
343 if (offset2.IsEmpty()) {
344 ctx.Add("{}=textureGatherOffset({},{},{},int({}));", texel, texture, coords,
345 GetOffsetVec(ctx, offset), info.gather_component);
346 return;
347 }
348 // PTP
349 const auto offsets{PtpOffsets(offset, offset2)};
350 ctx.Add("{}=textureGatherOffsets({},{},{},int({}));", texel, texture, coords, offsets,
351 info.gather_component);
352 return;
353 }
354 if (offset.IsEmpty()) {
355 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherARB({},{},{},int({})));",
356 *sparse_inst, texture, coords, texel, info.gather_component);
357 return;
358 }
359 if (offset2.IsEmpty()) {
360 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},{},int({})));",
361 *sparse_inst, texture, CastToIntVec(coords, info), GetOffsetVec(ctx, offset),
362 texel, info.gather_component);
363 return;
364 }
365 // PTP
366 const auto offsets{PtpOffsets(offset, offset2)};
367 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},{},int({})));",
368 *sparse_inst, texture, CastToIntVec(coords, info), offsets, texel,
369 info.gather_component);
370}
371
372void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
373 std::string_view coords, const IR::Value& offset, const IR::Value& offset2,
374 std::string_view dref) {
375 const auto info{inst.Flags<IR::TextureInstInfo>()};
376 const auto texture{Texture(ctx, info, index)};
377 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
378 const auto sparse_inst{PrepareSparse(inst)};
379 const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
380 if (sparse_inst && !supports_sparse) {
381 LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
382 ctx.AddU1("{}=true;", *sparse_inst);
383 }
384 if (!sparse_inst || !supports_sparse) {
385 if (offset.IsEmpty()) {
386 ctx.Add("{}=textureGather({},{},{});", texel, texture, coords, dref);
387 return;
388 }
389 if (offset2.IsEmpty()) {
390 ctx.Add("{}=textureGatherOffset({},{},{},{});", texel, texture, coords, dref,
391 GetOffsetVec(ctx, offset));
392 return;
393 }
394 // PTP
395 const auto offsets{PtpOffsets(offset, offset2)};
396 ctx.Add("{}=textureGatherOffsets({},{},{},{});", texel, texture, coords, dref, offsets);
397 return;
398 }
399 if (offset.IsEmpty()) {
400 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherARB({},{},{},{}));", *sparse_inst,
401 texture, coords, dref, texel);
402 return;
403 }
404 if (offset2.IsEmpty()) {
405 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},,{},{}));",
406 *sparse_inst, texture, CastToIntVec(coords, info), dref,
407 GetOffsetVec(ctx, offset), texel);
408 return;
409 }
410 // PTP
411 const auto offsets{PtpOffsets(offset, offset2)};
412 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},,{},{}));",
413 *sparse_inst, texture, CastToIntVec(coords, info), dref, offsets, texel);
414}
415
416void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
417 std::string_view coords, std::string_view offset, std::string_view lod,
418 [[maybe_unused]] std::string_view ms) {
419 const auto info{inst.Flags<IR::TextureInstInfo>()};
420 if (info.has_bias) {
421 throw NotImplementedException("EmitImageFetch Bias texture samples");
422 }
423 if (info.has_lod_clamp) {
424 throw NotImplementedException("EmitImageFetch Lod clamp samples");
425 }
426 const auto texture{Texture(ctx, info, index)};
427 const auto sparse_inst{PrepareSparse(inst)};
428 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
429 const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
430 if (sparse_inst && !supports_sparse) {
431 LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
432 ctx.AddU1("{}=true;", *sparse_inst);
433 }
434 if (!sparse_inst || !supports_sparse) {
435 if (!offset.empty()) {
436 ctx.Add("{}=texelFetchOffset({},{},int({}),{});", texel, texture,
437 CoordsCastToInt(coords, info), lod, CoordsCastToInt(offset, info));
438 } else {
439 if (info.type == TextureType::Buffer) {
440 ctx.Add("{}=texelFetch({},int({}));", texel, texture, coords);
441 } else {
442 ctx.Add("{}=texelFetch({},{},int({}));", texel, texture,
443 CoordsCastToInt(coords, info), lod);
444 }
445 }
446 return;
447 }
448 if (!offset.empty()) {
449 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));",
450 *sparse_inst, texture, CastToIntVec(coords, info), lod,
451 CastToIntVec(offset, info), texel);
452 } else {
453 ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchARB({},{},int({}),{}));",
454 *sparse_inst, texture, CastToIntVec(coords, info), lod, texel);
455 }
456}
457
458void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
459 std::string_view lod) {
460 const auto info{inst.Flags<IR::TextureInstInfo>()};
461 const auto texture{Texture(ctx, info, index)};
462 switch (info.type) {
463 case TextureType::Color1D:
464 return ctx.AddU32x4(
465 "{}=uvec4(uint(textureSize({},int({}))),0u,0u,uint(textureQueryLevels({})));", inst,
466 texture, lod, texture);
467 case TextureType::ColorArray1D:
468 case TextureType::Color2D:
469 case TextureType::ColorCube:
470 return ctx.AddU32x4(
471 "{}=uvec4(uvec2(textureSize({},int({}))),0u,uint(textureQueryLevels({})));", inst,
472 texture, lod, texture);
473 case TextureType::ColorArray2D:
474 case TextureType::Color3D:
475 case TextureType::ColorArrayCube:
476 return ctx.AddU32x4(
477 "{}=uvec4(uvec3(textureSize({},int({}))),uint(textureQueryLevels({})));", inst, texture,
478 lod, texture);
479 case TextureType::Buffer:
480 throw NotImplementedException("EmitImageQueryDimensions Texture buffers");
481 }
482 throw LogicError("Unspecified image type {}", info.type.Value());
483}
484
485void EmitImageQueryLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
486 std::string_view coords) {
487 const auto info{inst.Flags<IR::TextureInstInfo>()};
488 const auto texture{Texture(ctx, info, index)};
489 return ctx.AddF32x4("{}=vec4(textureQueryLod({},{}),0.0,0.0);", inst, texture, coords);
490}
491
492void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
493 std::string_view coords, const IR::Value& derivatives,
494 const IR::Value& offset, [[maybe_unused]] const IR::Value& lod_clamp) {
495 const auto info{inst.Flags<IR::TextureInstInfo>()};
496 if (info.has_lod_clamp) {
497 throw NotImplementedException("EmitImageGradient Lod clamp samples");
498 }
499 const auto sparse_inst{PrepareSparse(inst)};
500 if (sparse_inst) {
501 throw NotImplementedException("EmitImageGradient Sparse");
502 }
503 if (!offset.IsEmpty()) {
504 throw NotImplementedException("EmitImageGradient offset");
505 }
506 const auto texture{Texture(ctx, info, index)};
507 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
508 const bool multi_component{info.num_derivates > 1 || info.has_lod_clamp};
509 const auto derivatives_vec{ctx.var_alloc.Consume(derivatives)};
510 if (multi_component) {
511 ctx.Add("{}=textureGrad({},{},vec2({}.xz),vec2({}.yz));", texel, texture, coords,
512 derivatives_vec, derivatives_vec);
513 } else {
514 ctx.Add("{}=textureGrad({},{},float({}.x),float({}.y));", texel, texture, coords,
515 derivatives_vec, derivatives_vec);
516 }
517}
518
519void EmitImageRead(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
520 std::string_view coords) {
521 const auto info{inst.Flags<IR::TextureInstInfo>()};
522 const auto sparse_inst{PrepareSparse(inst)};
523 if (sparse_inst) {
524 throw NotImplementedException("EmitImageRead Sparse");
525 }
526 const auto image{Image(ctx, info, index)};
527 ctx.AddU32x4("{}=uvec4(imageLoad({},{}));", inst, image, CoordsCastToInt(coords, info));
528}
529
530void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
531 std::string_view coords, std::string_view color) {
532 const auto info{inst.Flags<IR::TextureInstInfo>()};
533 const auto image{Image(ctx, info, index)};
534 ctx.Add("imageStore({},{},{});", image, CoordsCastToInt(coords, info), color);
535}
536
537void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
538 std::string_view coords, std::string_view value) {
539 const auto info{inst.Flags<IR::TextureInstInfo>()};
540 const auto image{Image(ctx, info, index)};
541 ctx.AddU32("{}=imageAtomicAdd({},{},{});", inst, image, CoordsCastToInt(coords, info), value);
542}
543
544void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
545 std::string_view coords, std::string_view value) {
546 const auto info{inst.Flags<IR::TextureInstInfo>()};
547 const auto image{Image(ctx, info, index)};
548 ctx.AddU32("{}=imageAtomicMin({},{},int({}));", inst, image, CoordsCastToInt(coords, info),
549 value);
550}
551
552void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
553 std::string_view coords, std::string_view value) {
554 const auto info{inst.Flags<IR::TextureInstInfo>()};
555 const auto image{Image(ctx, info, index)};
556 ctx.AddU32("{}=imageAtomicMin({},{},uint({}));", inst, image, CoordsCastToInt(coords, info),
557 value);
558}
559
560void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
561 std::string_view coords, std::string_view value) {
562 const auto info{inst.Flags<IR::TextureInstInfo>()};
563 const auto image{Image(ctx, info, index)};
564 ctx.AddU32("{}=imageAtomicMax({},{},int({}));", inst, image, CoordsCastToInt(coords, info),
565 value);
566}
567
568void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
569 std::string_view coords, std::string_view value) {
570 const auto info{inst.Flags<IR::TextureInstInfo>()};
571 const auto image{Image(ctx, info, index)};
572 ctx.AddU32("{}=imageAtomicMax({},{},uint({}));", inst, image, CoordsCastToInt(coords, info),
573 value);
574}
575
576void EmitImageAtomicInc32(EmitContext&, IR::Inst&, const IR::Value&, std::string_view,
577 std::string_view) {
578 NotImplemented();
579}
580
581void EmitImageAtomicDec32(EmitContext&, IR::Inst&, const IR::Value&, std::string_view,
582 std::string_view) {
583 NotImplemented();
584}
585
586void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
587 std::string_view coords, std::string_view value) {
588 const auto info{inst.Flags<IR::TextureInstInfo>()};
589 const auto image{Image(ctx, info, index)};
590 ctx.AddU32("{}=imageAtomicAnd({},{},{});", inst, image, CoordsCastToInt(coords, info), value);
591}
592
593void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
594 std::string_view coords, std::string_view value) {
595 const auto info{inst.Flags<IR::TextureInstInfo>()};
596 const auto image{Image(ctx, info, index)};
597 ctx.AddU32("{}=imageAtomicOr({},{},{});", inst, image, CoordsCastToInt(coords, info), value);
598}
599
600void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
601 std::string_view coords, std::string_view value) {
602 const auto info{inst.Flags<IR::TextureInstInfo>()};
603 const auto image{Image(ctx, info, index)};
604 ctx.AddU32("{}=imageAtomicXor({},{},{});", inst, image, CoordsCastToInt(coords, info), value);
605}
606
607void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
608 std::string_view coords, std::string_view value) {
609 const auto info{inst.Flags<IR::TextureInstInfo>()};
610 const auto image{Image(ctx, info, index)};
611 ctx.AddU32("{}=imageAtomicExchange({},{},{});", inst, image, CoordsCastToInt(coords, info),
612 value);
613}
614
615void EmitBindlessImageSampleImplicitLod(EmitContext&) {
616 NotImplemented();
617}
618
619void EmitBindlessImageSampleExplicitLod(EmitContext&) {
620 NotImplemented();
621}
622
623void EmitBindlessImageSampleDrefImplicitLod(EmitContext&) {
624 NotImplemented();
625}
626
627void EmitBindlessImageSampleDrefExplicitLod(EmitContext&) {
628 NotImplemented();
629}
630
631void EmitBindlessImageGather(EmitContext&) {
632 NotImplemented();
633}
634
635void EmitBindlessImageGatherDref(EmitContext&) {
636 NotImplemented();
637}
638
639void EmitBindlessImageFetch(EmitContext&) {
640 NotImplemented();
641}
642
643void EmitBindlessImageQueryDimensions(EmitContext&) {
644 NotImplemented();
645}
646
647void EmitBindlessImageQueryLod(EmitContext&) {
648 NotImplemented();
649}
650
651void EmitBindlessImageGradient(EmitContext&) {
652 NotImplemented();
653}
654
655void EmitBindlessImageRead(EmitContext&) {
656 NotImplemented();
657}
658
659void EmitBindlessImageWrite(EmitContext&) {
660 NotImplemented();
661}
662
663void EmitBoundImageSampleImplicitLod(EmitContext&) {
664 NotImplemented();
665}
666
667void EmitBoundImageSampleExplicitLod(EmitContext&) {
668 NotImplemented();
669}
670
671void EmitBoundImageSampleDrefImplicitLod(EmitContext&) {
672 NotImplemented();
673}
674
675void EmitBoundImageSampleDrefExplicitLod(EmitContext&) {
676 NotImplemented();
677}
678
679void EmitBoundImageGather(EmitContext&) {
680 NotImplemented();
681}
682
683void EmitBoundImageGatherDref(EmitContext&) {
684 NotImplemented();
685}
686
687void EmitBoundImageFetch(EmitContext&) {
688 NotImplemented();
689}
690
691void EmitBoundImageQueryDimensions(EmitContext&) {
692 NotImplemented();
693}
694
695void EmitBoundImageQueryLod(EmitContext&) {
696 NotImplemented();
697}
698
699void EmitBoundImageGradient(EmitContext&) {
700 NotImplemented();
701}
702
703void EmitBoundImageRead(EmitContext&) {
704 NotImplemented();
705}
706
707void EmitBoundImageWrite(EmitContext&) {
708 NotImplemented();
709}
710
711void EmitBindlessImageAtomicIAdd32(EmitContext&) {
712 NotImplemented();
713}
714
715void EmitBindlessImageAtomicSMin32(EmitContext&) {
716 NotImplemented();
717}
718
719void EmitBindlessImageAtomicUMin32(EmitContext&) {
720 NotImplemented();
721}
722
723void EmitBindlessImageAtomicSMax32(EmitContext&) {
724 NotImplemented();
725}
726
727void EmitBindlessImageAtomicUMax32(EmitContext&) {
728 NotImplemented();
729}
730
731void EmitBindlessImageAtomicInc32(EmitContext&) {
732 NotImplemented();
733}
734
735void EmitBindlessImageAtomicDec32(EmitContext&) {
736 NotImplemented();
737}
738
739void EmitBindlessImageAtomicAnd32(EmitContext&) {
740 NotImplemented();
741}
742
743void EmitBindlessImageAtomicOr32(EmitContext&) {
744 NotImplemented();
745}
746
747void EmitBindlessImageAtomicXor32(EmitContext&) {
748 NotImplemented();
749}
750
751void EmitBindlessImageAtomicExchange32(EmitContext&) {
752 NotImplemented();
753}
754
755void EmitBoundImageAtomicIAdd32(EmitContext&) {
756 NotImplemented();
757}
758
759void EmitBoundImageAtomicSMin32(EmitContext&) {
760 NotImplemented();
761}
762
763void EmitBoundImageAtomicUMin32(EmitContext&) {
764 NotImplemented();
765}
766
767void EmitBoundImageAtomicSMax32(EmitContext&) {
768 NotImplemented();
769}
770
771void EmitBoundImageAtomicUMax32(EmitContext&) {
772 NotImplemented();
773}
774
775void EmitBoundImageAtomicInc32(EmitContext&) {
776 NotImplemented();
777}
778
779void EmitBoundImageAtomicDec32(EmitContext&) {
780 NotImplemented();
781}
782
783void EmitBoundImageAtomicAnd32(EmitContext&) {
784 NotImplemented();
785}
786
787void EmitBoundImageAtomicOr32(EmitContext&) {
788 NotImplemented();
789}
790
791void EmitBoundImageAtomicXor32(EmitContext&) {
792 NotImplemented();
793}
794
795void EmitBoundImageAtomicExchange32(EmitContext&) {
796 NotImplemented();
797}
798
799} // namespace Shader::Backend::GLSL