summaryrefslogtreecommitdiff
path: root/src/shader_recompiler/backend/glsl
diff options
context:
space:
mode:
Diffstat (limited to 'src/shader_recompiler/backend/glsl')
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp13
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp23
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_image.cpp24
3 files changed, 32 insertions, 28 deletions
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
index 850eee1e1..9152ace98 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
@@ -98,7 +98,7 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_vi
98 98
99void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset, 99void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
100 std::string_view value) { 100 std::string_view value) {
101 // LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic"); 101 LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic");
102 ctx.AddU64("{}=packUint2x32(uvec2(smem[{}>>2],smem[({}+4)>>2]));", inst, pointer_offset, 102 ctx.AddU64("{}=packUint2x32(uvec2(smem[{}>>2],smem[({}+4)>>2]));", inst, pointer_offset,
103 pointer_offset); 103 pointer_offset);
104 ctx.Add("smem[{}>>2]=unpackUint2x32({}).x;smem[({}+4)>>2]=unpackUint2x32({}).y;", 104 ctx.Add("smem[{}>>2]=unpackUint2x32({}).x;smem[({}+4)>>2]=unpackUint2x32({}).y;",
@@ -171,7 +171,7 @@ void EmitStorageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Val
171 171
172void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 172void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
173 const IR::Value& offset, std::string_view value) { 173 const IR::Value& offset, std::string_view value) {
174 // LOG_WARNING(..., "Op falling to non-atomic"); 174 LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic");
175 ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, 175 ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
176 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, 176 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
177 binding.U32(), ctx.var_alloc.Consume(offset)); 177 binding.U32(), ctx.var_alloc.Consume(offset));
@@ -182,7 +182,7 @@ void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value&
182 182
183void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 183void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
184 const IR::Value& offset, std::string_view value) { 184 const IR::Value& offset, std::string_view value) {
185 // LOG_WARNING(..., "Op falling to non-atomic"); 185 LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic");
186 ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, 186 ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
187 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, 187 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
188 binding.U32(), ctx.var_alloc.Consume(offset)); 188 binding.U32(), ctx.var_alloc.Consume(offset));
@@ -195,7 +195,7 @@ void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value&
195 195
196void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 196void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
197 const IR::Value& offset, std::string_view value) { 197 const IR::Value& offset, std::string_view value) {
198 // LOG_WARNING(..., "Op falling to non-atomic"); 198 LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic");
199 ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, 199 ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
200 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, 200 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
201 binding.U32(), ctx.var_alloc.Consume(offset)); 201 binding.U32(), ctx.var_alloc.Consume(offset));
@@ -207,7 +207,7 @@ void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value&
207 207
208void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 208void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
209 const IR::Value& offset, std::string_view value) { 209 const IR::Value& offset, std::string_view value) {
210 // LOG_WARNING(..., "Op falling to non-atomic"); 210 LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic");
211 ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, 211 ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
212 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, 212 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
213 binding.U32(), ctx.var_alloc.Consume(offset)); 213 binding.U32(), ctx.var_alloc.Consume(offset));
@@ -220,8 +220,7 @@ void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value&
220 220
221void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 221void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
222 const IR::Value& offset, std::string_view value) { 222 const IR::Value& offset, std::string_view value) {
223 // LOG_WARNING(..., "Op falling to non-atomic"); 223 LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic");
224
225 ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, 224 ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
226 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, 225 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
227 binding.U32(), ctx.var_alloc.Consume(offset)); 226 binding.U32(), ctx.var_alloc.Consume(offset));
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
index 3eeccfb3c..0d1e5ed7f 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
@@ -42,7 +42,7 @@ void GetCbuf(EmitContext& ctx, std::string_view ret, const IR::Value& binding,
42 const s32 signed_offset{static_cast<s32>(offset.U32())}; 42 const s32 signed_offset{static_cast<s32>(offset.U32())};
43 static constexpr u32 cbuf_size{4096 * 16}; 43 static constexpr u32 cbuf_size{4096 * 16};
44 if (signed_offset < 0 || offset.U32() > cbuf_size) { 44 if (signed_offset < 0 || offset.U32() > cbuf_size) {
45 // LOG_WARNING(..., "Immediate constant buffer offset is out of bounds"); 45 LOG_WARNING(Shader_GLSL, "Immediate constant buffer offset is out of bounds");
46 ctx.Add("{}=0u;", ret); 46 ctx.Add("{}=0u;", ret);
47 return; 47 return;
48 } 48 }
@@ -144,7 +144,7 @@ void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding
144 const u32 u32_offset{offset.U32()}; 144 const u32 u32_offset{offset.U32()};
145 const s32 signed_offset{static_cast<s32>(offset.U32())}; 145 const s32 signed_offset{static_cast<s32>(offset.U32())};
146 if (signed_offset < 0 || u32_offset > cbuf_size) { 146 if (signed_offset < 0 || u32_offset > cbuf_size) {
147 // LOG_WARNING(..., "Immediate constant buffer offset is out of bounds"); 147 LOG_WARNING(Shader_GLSL, "Immediate constant buffer offset is out of bounds");
148 ctx.AddU32x2("{}=uvec2(0u);", inst); 148 ctx.AddU32x2("{}=uvec2(0u);", inst);
149 return; 149 return;
150 } 150 }
@@ -184,7 +184,8 @@ void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr,
184 } 184 }
185 // GLSL only exposes 8 legacy texcoords 185 // GLSL only exposes 8 legacy texcoords
186 if (attr >= IR::Attribute::FixedFncTexture8S && attr <= IR::Attribute::FixedFncTexture9Q) { 186 if (attr >= IR::Attribute::FixedFncTexture8S && attr <= IR::Attribute::FixedFncTexture9Q) {
187 // LOG_WARNING(..., "GLSL does not allow access to gl_TexCoord[{}]", TexCoordIndex(attr)); 187 LOG_WARNING(Shader_GLSL, "GLSL does not allow access to gl_TexCoord[{}]",
188 TexCoordIndex(attr));
188 ctx.AddF32("{}=0.f;", inst); 189 ctx.AddF32("{}=0.f;", inst);
189 return; 190 return;
190 } 191 }
@@ -257,7 +258,8 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view val
257 const char swizzle{"xyzw"[element]}; 258 const char swizzle{"xyzw"[element]};
258 // GLSL only exposes 8 legacy texcoords 259 // GLSL only exposes 8 legacy texcoords
259 if (attr >= IR::Attribute::FixedFncTexture8S && attr <= IR::Attribute::FixedFncTexture9Q) { 260 if (attr >= IR::Attribute::FixedFncTexture8S && attr <= IR::Attribute::FixedFncTexture9Q) {
260 // LOG_WARNING(..., "GLSL does not allow access to gl_TexCoord[{}]", TexCoordIndex(attr)); 261 LOG_WARNING(Shader_GLSL, "GLSL does not allow access to gl_TexCoord[{}]",
262 TexCoordIndex(attr));
261 return; 263 return;
262 } 264 }
263 if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture7Q) { 265 if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture7Q) {
@@ -269,8 +271,8 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view val
269 case IR::Attribute::Layer: 271 case IR::Attribute::Layer:
270 if (ctx.stage != Stage::Geometry && 272 if (ctx.stage != Stage::Geometry &&
271 !ctx.profile.support_viewport_index_layer_non_geometry) { 273 !ctx.profile.support_viewport_index_layer_non_geometry) {
272 // LOG_WARNING(..., "Shader stores viewport layer but device does not support viewport 274 LOG_WARNING(Shader_GLSL, "Shader stores viewport layer but device does not support "
273 // layer extension"); 275 "viewport layer extension");
274 break; 276 break;
275 } 277 }
276 ctx.Add("gl_Layer=ftoi({});", value); 278 ctx.Add("gl_Layer=ftoi({});", value);
@@ -278,16 +280,17 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view val
278 case IR::Attribute::ViewportIndex: 280 case IR::Attribute::ViewportIndex:
279 if (ctx.stage != Stage::Geometry && 281 if (ctx.stage != Stage::Geometry &&
280 !ctx.profile.support_viewport_index_layer_non_geometry) { 282 !ctx.profile.support_viewport_index_layer_non_geometry) {
281 // LOG_WARNING(..., "Shader stores viewport index but device does not support viewport 283 LOG_WARNING(Shader_GLSL, "Shader stores viewport index but device does not support "
282 // layer extension"); 284 "viewport layer extension");
283 break; 285 break;
284 } 286 }
285 ctx.Add("gl_ViewportIndex=ftoi({});", value); 287 ctx.Add("gl_ViewportIndex=ftoi({});", value);
286 break; 288 break;
287 case IR::Attribute::ViewportMask: 289 case IR::Attribute::ViewportMask:
288 if (ctx.stage != Stage::Geometry && !ctx.profile.support_viewport_mask) { 290 if (ctx.stage != Stage::Geometry && !ctx.profile.support_viewport_mask) {
289 // LOG_WARNING(..., "Shader stores viewport mask but device does not support viewport 291 LOG_WARNING(
290 // mask extension"); 292 Shader_GLSL,
293 "Shader stores viewport mask but device does not support viewport mask extension");
291 break; 294 break;
292 } 295 }
293 ctx.Add("gl_ViewportMask[0]=ftoi({});", value); 296 ctx.Add("gl_ViewportMask[0]=ftoi({});", value);
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
index c6b3df9c9..447eb8e0a 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
@@ -96,7 +96,7 @@ std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) {
96 } 96 }
97 const bool has_var_aoffi{ctx.profile.support_gl_variable_aoffi}; 97 const bool has_var_aoffi{ctx.profile.support_gl_variable_aoffi};
98 if (!has_var_aoffi) { 98 if (!has_var_aoffi) {
99 // LOG_WARNING("Device does not support variable texture offsets, STUBBING"); 99 LOG_WARNING(Shader_GLSL, "Device does not support variable texture offsets, STUBBING");
100 } 100 }
101 const auto offset_str{has_var_aoffi ? ctx.var_alloc.Consume(offset) : "0"}; 101 const auto offset_str{has_var_aoffi ? ctx.var_alloc.Consume(offset) : "0"};
102 switch (offset.Type()) { 102 switch (offset.Type()) {
@@ -116,7 +116,7 @@ std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) {
116std::string PtpOffsets(const IR::Value& offset, const IR::Value& offset2) { 116std::string PtpOffsets(const IR::Value& offset, const IR::Value& offset2) {
117 const std::array values{offset.InstRecursive(), offset2.InstRecursive()}; 117 const std::array values{offset.InstRecursive(), offset2.InstRecursive()};
118 if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) { 118 if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) {
119 // LOG_WARNING("Not all arguments in PTP are immediate, STUBBING"); 119 LOG_WARNING(Shader_GLSL, "Not all arguments in PTP are immediate, STUBBING");
120 return "ivec2[](ivec2(0), ivec2(1), ivec2(2), ivec2(3))"; 120 return "ivec2[](ivec2(0), ivec2(1), ivec2(2), ivec2(3))";
121 } 121 }
122 const IR::Opcode opcode{values[0]->GetOpcode()}; 122 const IR::Opcode opcode{values[0]->GetOpcode()};
@@ -152,7 +152,7 @@ void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu
152 const auto sparse_inst{PrepareSparse(inst)}; 152 const auto sparse_inst{PrepareSparse(inst)};
153 const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; 153 const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
154 if (sparse_inst && !supports_sparse) { 154 if (sparse_inst && !supports_sparse) {
155 // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); 155 LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
156 ctx.AddU1("{}=true;", *sparse_inst); 156 ctx.AddU1("{}=true;", *sparse_inst);
157 } 157 }
158 if (!sparse_inst || !supports_sparse) { 158 if (!sparse_inst || !supports_sparse) {
@@ -196,7 +196,7 @@ void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu
196 const auto sparse_inst{PrepareSparse(inst)}; 196 const auto sparse_inst{PrepareSparse(inst)};
197 const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; 197 const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
198 if (sparse_inst && !supports_sparse) { 198 if (sparse_inst && !supports_sparse) {
199 // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); 199 LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
200 ctx.AddU1("{}=true;", *sparse_inst); 200 ctx.AddU1("{}=true;", *sparse_inst);
201 } 201 }
202 if (!sparse_inst || !supports_sparse) { 202 if (!sparse_inst || !supports_sparse) {
@@ -239,9 +239,10 @@ void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::
239 const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && 239 const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod &&
240 ctx.stage != Stage::Fragment && needs_shadow_ext}; 240 ctx.stage != Stage::Fragment && needs_shadow_ext};
241 if (use_grad) { 241 if (use_grad) {
242 // LOG_WARNING(..., "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); 242 LOG_WARNING(Shader_GLSL,
243 "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback");
243 if (info.type == TextureType::ColorArrayCube) { 244 if (info.type == TextureType::ColorArrayCube) {
244 // LOG_WARNING(..., "textureGrad does not support ColorArrayCube. Stubbing"); 245 LOG_WARNING(Shader_GLSL, "textureGrad does not support ColorArrayCube. Stubbing");
245 ctx.AddF32("{}=0.0f;", inst); 246 ctx.AddF32("{}=0.0f;", inst);
246 return; 247 return;
247 } 248 }
@@ -291,9 +292,10 @@ void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::
291 const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext}; 292 const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext};
292 const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; 293 const auto cast{needs_shadow_ext ? "vec4" : "vec3"};
293 if (use_grad) { 294 if (use_grad) {
294 // LOG_WARNING(..., "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); 295 LOG_WARNING(Shader_GLSL,
296 "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback");
295 if (info.type == TextureType::ColorArrayCube) { 297 if (info.type == TextureType::ColorArrayCube) {
296 // LOG_WARNING(..., "textureGrad does not support ColorArrayCube. Stubbing"); 298 LOG_WARNING(Shader_GLSL, "textureGrad does not support ColorArrayCube. Stubbing");
297 ctx.AddF32("{}=0.0f;", inst); 299 ctx.AddF32("{}=0.0f;", inst);
298 return; 300 return;
299 } 301 }
@@ -329,7 +331,7 @@ void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
329 const auto sparse_inst{PrepareSparse(inst)}; 331 const auto sparse_inst{PrepareSparse(inst)};
330 const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; 332 const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
331 if (sparse_inst && !supports_sparse) { 333 if (sparse_inst && !supports_sparse) {
332 // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); 334 LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
333 ctx.AddU1("{}=true;", *sparse_inst); 335 ctx.AddU1("{}=true;", *sparse_inst);
334 } 336 }
335 if (!sparse_inst || !supports_sparse) { 337 if (!sparse_inst || !supports_sparse) {
@@ -376,7 +378,7 @@ void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& inde
376 const auto sparse_inst{PrepareSparse(inst)}; 378 const auto sparse_inst{PrepareSparse(inst)};
377 const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; 379 const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
378 if (sparse_inst && !supports_sparse) { 380 if (sparse_inst && !supports_sparse) {
379 // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); 381 LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
380 ctx.AddU1("{}=true;", *sparse_inst); 382 ctx.AddU1("{}=true;", *sparse_inst);
381 } 383 }
382 if (!sparse_inst || !supports_sparse) { 384 if (!sparse_inst || !supports_sparse) {
@@ -426,7 +428,7 @@ void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
426 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; 428 const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
427 const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; 429 const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
428 if (sparse_inst && !supports_sparse) { 430 if (sparse_inst && !supports_sparse) {
429 // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); 431 LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
430 ctx.AddU1("{}=true;", *sparse_inst); 432 ctx.AddU1("{}=true;", *sparse_inst);
431 } 433 }
432 if (!sparse_inst || !supports_sparse) { 434 if (!sparse_inst || !supports_sparse) {