summaryrefslogtreecommitdiff
path: root/src/video_core/shader
diff options
context:
space:
mode:
authorGravatar bunnei2020-05-02 00:45:41 -0400
committerGravatar GitHub2020-05-02 00:45:41 -0400
commite6b4311178b4f87b67eb2383f2a64520c2a8dd25 (patch)
tree066f25773f9db49747f26ddf94b23a5007502ff8 /src/video_core/shader
parentMerge pull request #3859 from jbeich/clang (diff)
parentshader/texture: Support multiple unknown sampler properties (diff)
downloadyuzu-e6b4311178b4f87b67eb2383f2a64520c2a8dd25.tar.gz
yuzu-e6b4311178b4f87b67eb2383f2a64520c2a8dd25.tar.xz
yuzu-e6b4311178b4f87b67eb2383f2a64520c2a8dd25.zip
Merge pull request #3693 from ReinUsesLisp/clean-samplers
shader/texture: Support multiple unknown sampler properties
Diffstat (limited to 'src/video_core/shader')
-rw-r--r--src/video_core/shader/decode.cpp18
-rw-r--r--src/video_core/shader/decode/image.cpp18
-rw-r--r--src/video_core/shader/decode/texture.cpp192
-rw-r--r--src/video_core/shader/node.h129
-rw-r--r--src/video_core/shader/shader_ir.h34
5 files changed, 168 insertions, 223 deletions
diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp
index 1167ff4ec..a75a5cc63 100644
--- a/src/video_core/shader/decode.cpp
+++ b/src/video_core/shader/decode.cpp
@@ -32,11 +32,11 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
32 u32 count{}; 32 u32 count{};
33 std::vector<u32> bound_offsets; 33 std::vector<u32> bound_offsets;
34 for (const auto& sampler : used_samplers) { 34 for (const auto& sampler : used_samplers) {
35 if (sampler.IsBindless()) { 35 if (sampler.is_bindless) {
36 continue; 36 continue;
37 } 37 }
38 ++count; 38 ++count;
39 bound_offsets.emplace_back(sampler.GetOffset()); 39 bound_offsets.emplace_back(sampler.offset);
40 } 40 }
41 if (count > 1) { 41 if (count > 1) {
42 gpu_driver.DeduceTextureHandlerSize(std::move(bound_offsets)); 42 gpu_driver.DeduceTextureHandlerSize(std::move(bound_offsets));
@@ -46,14 +46,14 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
46std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce, 46std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce,
47 VideoCore::GuestDriverProfile& gpu_driver, 47 VideoCore::GuestDriverProfile& gpu_driver,
48 const std::list<Sampler>& used_samplers) { 48 const std::list<Sampler>& used_samplers) {
49 const u32 base_offset = sampler_to_deduce.GetOffset(); 49 const u32 base_offset = sampler_to_deduce.offset;
50 u32 max_offset{std::numeric_limits<u32>::max()}; 50 u32 max_offset{std::numeric_limits<u32>::max()};
51 for (const auto& sampler : used_samplers) { 51 for (const auto& sampler : used_samplers) {
52 if (sampler.IsBindless()) { 52 if (sampler.is_bindless) {
53 continue; 53 continue;
54 } 54 }
55 if (sampler.GetOffset() > base_offset) { 55 if (sampler.offset > base_offset) {
56 max_offset = std::min(sampler.GetOffset(), max_offset); 56 max_offset = std::min(sampler.offset, max_offset);
57 } 57 }
58 } 58 }
59 if (max_offset == std::numeric_limits<u32>::max()) { 59 if (max_offset == std::numeric_limits<u32>::max()) {
@@ -353,14 +353,14 @@ void ShaderIR::PostDecode() {
353 return; 353 return;
354 } 354 }
355 for (auto& sampler : used_samplers) { 355 for (auto& sampler : used_samplers) {
356 if (!sampler.IsIndexed()) { 356 if (!sampler.is_indexed) {
357 continue; 357 continue;
358 } 358 }
359 if (const auto size = TryDeduceSamplerSize(sampler, gpu_driver, used_samplers)) { 359 if (const auto size = TryDeduceSamplerSize(sampler, gpu_driver, used_samplers)) {
360 sampler.SetSize(*size); 360 sampler.size = *size;
361 } else { 361 } else {
362 LOG_CRITICAL(HW_GPU, "Failed to deduce size of indexed sampler"); 362 LOG_CRITICAL(HW_GPU, "Failed to deduce size of indexed sampler");
363 sampler.SetSize(1); 363 sampler.size = 1;
364 } 364 }
365 } 365 }
366} 366}
diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp
index 85ee9aa5e..60b6ad72a 100644
--- a/src/video_core/shader/decode/image.cpp
+++ b/src/video_core/shader/decode/image.cpp
@@ -485,11 +485,10 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
485Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) { 485Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) {
486 const auto offset = static_cast<u32>(image.index.Value()); 486 const auto offset = static_cast<u32>(image.index.Value());
487 487
488 const auto it = 488 const auto it = std::find_if(std::begin(used_images), std::end(used_images),
489 std::find_if(std::begin(used_images), std::end(used_images), 489 [offset](const Image& entry) { return entry.offset == offset; });
490 [offset](const Image& entry) { return entry.GetOffset() == offset; });
491 if (it != std::end(used_images)) { 490 if (it != std::end(used_images)) {
492 ASSERT(!it->IsBindless() && it->GetType() == it->GetType()); 491 ASSERT(!it->is_bindless && it->type == type);
493 return *it; 492 return *it;
494 } 493 }
495 494
@@ -505,13 +504,12 @@ Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::Im
505 const auto buffer = std::get<1>(result); 504 const auto buffer = std::get<1>(result);
506 const auto offset = std::get<2>(result); 505 const auto offset = std::get<2>(result);
507 506
508 const auto it = 507 const auto it = std::find_if(std::begin(used_images), std::end(used_images),
509 std::find_if(std::begin(used_images), std::end(used_images), 508 [buffer, offset](const Image& entry) {
510 [buffer = buffer, offset = offset](const Image& entry) { 509 return entry.buffer == buffer && entry.offset == offset;
511 return entry.GetBuffer() == buffer && entry.GetOffset() == offset; 510 });
512 });
513 if (it != std::end(used_images)) { 511 if (it != std::end(used_images)) {
514 ASSERT(it->IsBindless() && it->GetType() == it->GetType()); 512 ASSERT(it->is_bindless && it->type == type);
515 return *it; 513 return *it;
516 } 514 }
517 515
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index e68f1d305..8f0bb996e 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -139,15 +139,15 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
139 } 139 }
140 const Node component = Immediate(static_cast<u32>(instr.tld4s.component)); 140 const Node component = Immediate(static_cast<u32>(instr.tld4s.component));
141 141
142 const SamplerInfo info{TextureType::Texture2D, false, is_depth_compare, false}; 142 SamplerInfo info;
143 const Sampler& sampler = *GetSampler(instr.sampler, info); 143 info.is_shadow = is_depth_compare;
144 const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
144 145
145 Node4 values; 146 Node4 values;
146 for (u32 element = 0; element < values.size(); ++element) { 147 for (u32 element = 0; element < values.size(); ++element) {
147 auto coords_copy = coords; 148 MetaTexture meta{*sampler, {}, depth_compare, aoffi, {}, {},
148 MetaTexture meta{sampler, {}, depth_compare, aoffi, {}, {}, 149 {}, {}, component, element, {}};
149 {}, {}, component, element, {}}; 150 values[element] = Operation(OperationCode::TextureGather, meta, coords);
150 values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
151 } 151 }
152 152
153 if (instr.tld4s.fp16_flag) { 153 if (instr.tld4s.fp16_flag) {
@@ -165,18 +165,20 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
165 "AOFFI is not implemented"); 165 "AOFFI is not implemented");
166 166
167 const bool is_array = instr.txd.is_array != 0; 167 const bool is_array = instr.txd.is_array != 0;
168 u64 base_reg = instr.gpr8.Value();
169 const auto derivate_reg = instr.gpr20.Value(); 168 const auto derivate_reg = instr.gpr20.Value();
170 const auto texture_type = instr.txd.texture_type.Value(); 169 const auto texture_type = instr.txd.texture_type.Value();
171 const auto coord_count = GetCoordCount(texture_type); 170 const auto coord_count = GetCoordCount(texture_type);
172 Node index_var{}; 171 u64 base_reg = instr.gpr8.Value();
173 const Sampler* sampler = 172 Node index_var;
174 is_bindless 173 SamplerInfo info;
175 ? GetBindlessSampler(base_reg, index_var, {{texture_type, is_array, false, false}}) 174 info.type = texture_type;
176 : GetSampler(instr.sampler, {{texture_type, is_array, false, false}}); 175 info.is_array = is_array;
176 const std::optional<Sampler> sampler = is_bindless
177 ? GetBindlessSampler(base_reg, info, index_var)
178 : GetSampler(instr.sampler, info);
177 Node4 values; 179 Node4 values;
178 if (sampler == nullptr) { 180 if (!sampler) {
179 std::generate(values.begin(), values.end(), [] { return Immediate(0); }); 181 std::generate(values.begin(), values.end(), [this] { return Immediate(0); });
180 WriteTexInstructionFloat(bb, instr, values); 182 WriteTexInstructionFloat(bb, instr, values);
181 break; 183 break;
182 } 184 }
@@ -214,14 +216,12 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
214 is_bindless = true; 216 is_bindless = true;
215 [[fallthrough]]; 217 [[fallthrough]];
216 case OpCode::Id::TXQ: { 218 case OpCode::Id::TXQ: {
217 // TODO: The new commits on the texture refactor, change the way samplers work. 219 Node index_var;
218 // Sadly, not all texture instructions specify the type of texture their sampler 220 const std::optional<Sampler> sampler = is_bindless
219 // uses. This must be fixed at a later instance. 221 ? GetBindlessSampler(instr.gpr8, {}, index_var)
220 Node index_var{}; 222 : GetSampler(instr.sampler, {});
221 const Sampler* sampler = 223
222 is_bindless ? GetBindlessSampler(instr.gpr8, index_var) : GetSampler(instr.sampler); 224 if (!sampler) {
223
224 if (sampler == nullptr) {
225 u32 indexer = 0; 225 u32 indexer = 0;
226 for (u32 element = 0; element < 4; ++element) { 226 for (u32 element = 0; element < 4; ++element) {
227 if (!instr.txq.IsComponentEnabled(element)) { 227 if (!instr.txq.IsComponentEnabled(element)) {
@@ -267,12 +267,17 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
267 UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), 267 UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
268 "NDV is not implemented"); 268 "NDV is not implemented");
269 269
270 auto texture_type = instr.tmml.texture_type.Value(); 270 const auto texture_type = instr.tmml.texture_type.Value();
271 Node index_var{}; 271 const bool is_array = instr.tmml.array != 0;
272 const Sampler* sampler = 272 SamplerInfo info;
273 is_bindless ? GetBindlessSampler(instr.gpr20, index_var) : GetSampler(instr.sampler); 273 info.type = texture_type;
274 274 info.is_array = is_array;
275 if (sampler == nullptr) { 275 Node index_var;
276 const std::optional<Sampler> sampler =
277 is_bindless ? GetBindlessSampler(instr.gpr20, info, index_var)
278 : GetSampler(instr.sampler, info);
279
280 if (!sampler) {
276 u32 indexer = 0; 281 u32 indexer = 0;
277 for (u32 element = 0; element < 2; ++element) { 282 for (u32 element = 0; element < 2; ++element) {
278 if (!instr.tmml.IsComponentEnabled(element)) { 283 if (!instr.tmml.IsComponentEnabled(element)) {
@@ -299,12 +304,11 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
299 coords.push_back(GetRegister(instr.gpr8.Value() + 1)); 304 coords.push_back(GetRegister(instr.gpr8.Value() + 1));
300 break; 305 break;
301 default: 306 default:
302 UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type)); 307 UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<int>(texture_type));
303 308
304 // Fallback to interpreting as a 2D texture for now 309 // Fallback to interpreting as a 2D texture for now
305 coords.push_back(GetRegister(instr.gpr8.Value() + 0)); 310 coords.push_back(GetRegister(instr.gpr8.Value() + 0));
306 coords.push_back(GetRegister(instr.gpr8.Value() + 1)); 311 coords.push_back(GetRegister(instr.gpr8.Value() + 1));
307 texture_type = TextureType::Texture2D;
308 } 312 }
309 u32 indexer = 0; 313 u32 indexer = 0;
310 for (u32 element = 0; element < 2; ++element) { 314 for (u32 element = 0; element < 2; ++element) {
@@ -353,98 +357,103 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
353 return pc; 357 return pc;
354} 358}
355 359
356ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(std::optional<SamplerInfo> sampler_info, u32 offset, 360ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(SamplerInfo info, u32 offset,
357 std::optional<u32> buffer) { 361 std::optional<u32> buffer) {
358 if (sampler_info) { 362 if (info.IsComplete()) {
359 return *sampler_info; 363 return info;
360 } 364 }
361 const auto sampler = buffer ? registry.ObtainBindlessSampler(*buffer, offset) 365 const auto sampler = buffer ? registry.ObtainBindlessSampler(*buffer, offset)
362 : registry.ObtainBoundSampler(offset); 366 : registry.ObtainBoundSampler(offset);
363 if (!sampler) { 367 if (!sampler) {
364 LOG_WARNING(HW_GPU, "Unknown sampler info"); 368 LOG_WARNING(HW_GPU, "Unknown sampler info");
365 return SamplerInfo{TextureType::Texture2D, false, false, false}; 369 info.type = info.type.value_or(Tegra::Shader::TextureType::Texture2D);
366 } 370 info.is_array = info.is_array.value_or(false);
367 return SamplerInfo{sampler->texture_type, sampler->is_array != 0, sampler->is_shadow != 0, 371 info.is_shadow = info.is_shadow.value_or(false);
368 sampler->is_buffer != 0}; 372 info.is_buffer = info.is_buffer.value_or(false);
373 return info;
374 }
375 info.type = info.type.value_or(sampler->texture_type);
376 info.is_array = info.is_array.value_or(sampler->is_array != 0);
377 info.is_shadow = info.is_shadow.value_or(sampler->is_shadow != 0);
378 info.is_buffer = info.is_buffer.value_or(sampler->is_buffer != 0);
379 return info;
369} 380}
370 381
371const Sampler* ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, 382std::optional<Sampler> ShaderIR::GetSampler(Tegra::Shader::Sampler sampler,
372 std::optional<SamplerInfo> sampler_info) { 383 SamplerInfo sampler_info) {
373 const auto offset = static_cast<u32>(sampler.index.Value()); 384 const auto offset = static_cast<u32>(sampler.index.Value());
374 const auto info = GetSamplerInfo(sampler_info, offset); 385 const auto info = GetSamplerInfo(sampler_info, offset);
375 386
376 // If this sampler has already been used, return the existing mapping. 387 // If this sampler has already been used, return the existing mapping.
377 const auto it = 388 const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
378 std::find_if(used_samplers.begin(), used_samplers.end(), 389 [offset](const Sampler& entry) { return entry.offset == offset; });
379 [offset](const Sampler& entry) { return entry.GetOffset() == offset; });
380 if (it != used_samplers.end()) { 390 if (it != used_samplers.end()) {
381 ASSERT(!it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array && 391 ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
382 it->IsShadow() == info.is_shadow && it->IsBuffer() == info.is_buffer); 392 it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer);
383 return &*it; 393 return *it;
384 } 394 }
385 395
386 // Otherwise create a new mapping for this sampler 396 // Otherwise create a new mapping for this sampler
387 const auto next_index = static_cast<u32>(used_samplers.size()); 397 const auto next_index = static_cast<u32>(used_samplers.size());
388 return &used_samplers.emplace_back(next_index, offset, info.type, info.is_array, info.is_shadow, 398 return used_samplers.emplace_back(next_index, offset, *info.type, *info.is_array,
389 info.is_buffer, false); 399 *info.is_shadow, *info.is_buffer, false);
390} 400}
391 401
392const Sampler* ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var, 402std::optional<Sampler> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
393 std::optional<SamplerInfo> sampler_info) { 403 Node& index_var) {
394 const Node sampler_register = GetRegister(reg); 404 const Node sampler_register = GetRegister(reg);
395 const auto [base_node, tracked_sampler_info] = 405 const auto [base_node, tracked_sampler_info] =
396 TrackBindlessSampler(sampler_register, global_code, static_cast<s64>(global_code.size())); 406 TrackBindlessSampler(sampler_register, global_code, static_cast<s64>(global_code.size()));
397 ASSERT(base_node != nullptr); 407 ASSERT(base_node != nullptr);
398 if (base_node == nullptr) { 408 if (base_node == nullptr) {
399 return nullptr; 409 return std::nullopt;
400 } 410 }
401 411
402 if (const auto bindless_sampler_info = 412 if (const auto bindless_sampler_info =
403 std::get_if<BindlessSamplerNode>(&*tracked_sampler_info)) { 413 std::get_if<BindlessSamplerNode>(&*tracked_sampler_info)) {
404 const u32 buffer = bindless_sampler_info->GetIndex(); 414 const u32 buffer = bindless_sampler_info->GetIndex();
405 const u32 offset = bindless_sampler_info->GetOffset(); 415 const u32 offset = bindless_sampler_info->GetOffset();
406 const auto info = GetSamplerInfo(sampler_info, offset, buffer); 416 info = GetSamplerInfo(info, offset, buffer);
407 417
408 // If this sampler has already been used, return the existing mapping. 418 // If this sampler has already been used, return the existing mapping.
409 const auto it = 419 const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
410 std::find_if(used_samplers.begin(), used_samplers.end(), 420 [buffer = buffer, offset = offset](const Sampler& entry) {
411 [buffer = buffer, offset = offset](const Sampler& entry) { 421 return entry.buffer == buffer && entry.offset == offset;
412 return entry.GetBuffer() == buffer && entry.GetOffset() == offset; 422 });
413 });
414 if (it != used_samplers.end()) { 423 if (it != used_samplers.end()) {
415 ASSERT(it->IsBindless() && it->GetType() == info.type && 424 ASSERT(it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
416 it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow); 425 it->is_shadow == info.is_shadow);
417 return &*it; 426 return *it;
418 } 427 }
419 428
420 // Otherwise create a new mapping for this sampler 429 // Otherwise create a new mapping for this sampler
421 const auto next_index = static_cast<u32>(used_samplers.size()); 430 const auto next_index = static_cast<u32>(used_samplers.size());
422 return &used_samplers.emplace_back(next_index, offset, buffer, info.type, info.is_array, 431 return used_samplers.emplace_back(next_index, offset, buffer, *info.type, *info.is_array,
423 info.is_shadow, info.is_buffer, false); 432 *info.is_shadow, *info.is_buffer, false);
424 } else if (const auto array_sampler_info = 433 }
425 std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) { 434 if (const auto array_sampler_info = std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) {
426 const u32 base_offset = array_sampler_info->GetBaseOffset() / 4; 435 const u32 base_offset = array_sampler_info->GetBaseOffset() / 4;
427 index_var = GetCustomVariable(array_sampler_info->GetIndexVar()); 436 index_var = GetCustomVariable(array_sampler_info->GetIndexVar());
428 const auto info = GetSamplerInfo(sampler_info, base_offset); 437 info = GetSamplerInfo(info, base_offset);
429 438
430 // If this sampler has already been used, return the existing mapping. 439 // If this sampler has already been used, return the existing mapping.
431 const auto it = std::find_if( 440 const auto it = std::find_if(
432 used_samplers.begin(), used_samplers.end(), 441 used_samplers.begin(), used_samplers.end(),
433 [base_offset](const Sampler& entry) { return entry.GetOffset() == base_offset; }); 442 [base_offset](const Sampler& entry) { return entry.offset == base_offset; });
434 if (it != used_samplers.end()) { 443 if (it != used_samplers.end()) {
435 ASSERT(!it->IsBindless() && it->GetType() == info.type && 444 ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
436 it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow && 445 it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer &&
437 it->IsBuffer() == info.is_buffer && it->IsIndexed()); 446 it->is_indexed);
438 return &*it; 447 return *it;
439 } 448 }
440 449
441 uses_indexed_samplers = true; 450 uses_indexed_samplers = true;
442 // Otherwise create a new mapping for this sampler 451 // Otherwise create a new mapping for this sampler
443 const auto next_index = static_cast<u32>(used_samplers.size()); 452 const auto next_index = static_cast<u32>(used_samplers.size());
444 return &used_samplers.emplace_back(next_index, base_offset, info.type, info.is_array, 453 return used_samplers.emplace_back(next_index, base_offset, *info.type, *info.is_array,
445 info.is_shadow, info.is_buffer, true); 454 *info.is_shadow, *info.is_buffer, true);
446 } 455 }
447 return nullptr; 456 return std::nullopt;
448} 457}
449 458
450void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) { 459void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) {
@@ -529,10 +538,16 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
529 ASSERT_MSG(texture_type != TextureType::Texture3D || !is_array || !is_shadow, 538 ASSERT_MSG(texture_type != TextureType::Texture3D || !is_array || !is_shadow,
530 "Illegal texture type"); 539 "Illegal texture type");
531 540
532 const SamplerInfo info{texture_type, is_array, is_shadow, false}; 541 SamplerInfo info;
542 info.type = texture_type;
543 info.is_array = is_array;
544 info.is_shadow = is_shadow;
545 info.is_buffer = false;
546
533 Node index_var; 547 Node index_var;
534 const Sampler* sampler = is_bindless ? GetBindlessSampler(*bindless_reg, index_var, info) 548 const std::optional<Sampler> sampler = is_bindless
535 : GetSampler(instr.sampler, info); 549 ? GetBindlessSampler(*bindless_reg, info, index_var)
550 : GetSampler(instr.sampler, info);
536 if (!sampler) { 551 if (!sampler) {
537 return {Immediate(0), Immediate(0), Immediate(0), Immediate(0)}; 552 return {Immediate(0), Immediate(0), Immediate(0), Immediate(0)};
538 } 553 }
@@ -683,12 +698,17 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de
683 698
684 u64 parameter_register = instr.gpr20.Value(); 699 u64 parameter_register = instr.gpr20.Value();
685 700
686 const SamplerInfo info{texture_type, is_array, depth_compare, false}; 701 SamplerInfo info;
687 Node index_var{}; 702 info.type = texture_type;
688 const Sampler* sampler = is_bindless ? GetBindlessSampler(parameter_register++, index_var, info) 703 info.is_array = is_array;
689 : GetSampler(instr.sampler, info); 704 info.is_shadow = depth_compare;
705
706 Node index_var;
707 const std::optional<Sampler> sampler =
708 is_bindless ? GetBindlessSampler(parameter_register++, info, index_var)
709 : GetSampler(instr.sampler, info);
690 Node4 values; 710 Node4 values;
691 if (sampler == nullptr) { 711 if (!sampler) {
692 for (u32 element = 0; element < values.size(); ++element) { 712 for (u32 element = 0; element < values.size(); ++element) {
693 values[element] = Immediate(0); 713 values[element] = Immediate(0);
694 } 714 }
@@ -743,12 +763,12 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
743 // const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr}; 763 // const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr};
744 // const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr}; 764 // const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr};
745 765
746 const auto& sampler = *GetSampler(instr.sampler); 766 const std::optional<Sampler> sampler = GetSampler(instr.sampler, {});
747 767
748 Node4 values; 768 Node4 values;
749 for (u32 element = 0; element < values.size(); ++element) { 769 for (u32 element = 0; element < values.size(); ++element) {
750 auto coords_copy = coords; 770 auto coords_copy = coords;
751 MetaTexture meta{sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element, {}}; 771 MetaTexture meta{*sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element, {}};
752 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); 772 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
753 } 773 }
754 774
@@ -756,7 +776,11 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
756} 776}
757 777
758Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) { 778Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) {
759 const Sampler& sampler = *GetSampler(instr.sampler); 779 SamplerInfo info;
780 info.type = texture_type;
781 info.is_array = is_array;
782 info.is_shadow = false;
783 const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
760 784
761 const std::size_t type_coord_count = GetCoordCount(texture_type); 785 const std::size_t type_coord_count = GetCoordCount(texture_type);
762 const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL; 786 const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL;
@@ -784,7 +808,7 @@ Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is
784 Node4 values; 808 Node4 values;
785 for (u32 element = 0; element < values.size(); ++element) { 809 for (u32 element = 0; element < values.size(); ++element) {
786 auto coords_copy = coords; 810 auto coords_copy = coords;
787 MetaTexture meta{sampler, array, {}, {}, {}, {}, {}, lod, {}, element, {}}; 811 MetaTexture meta{*sampler, array, {}, {}, {}, {}, {}, lod, {}, element, {}};
788 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); 812 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
789 } 813 }
790 return values; 814 return values;
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h
index 3f5a7bc7a..601c822d2 100644
--- a/src/video_core/shader/node.h
+++ b/src/video_core/shader/node.h
@@ -267,76 +267,30 @@ class ArraySamplerNode;
267using TrackSamplerData = std::variant<BindlessSamplerNode, ArraySamplerNode>; 267using TrackSamplerData = std::variant<BindlessSamplerNode, ArraySamplerNode>;
268using TrackSampler = std::shared_ptr<TrackSamplerData>; 268using TrackSampler = std::shared_ptr<TrackSamplerData>;
269 269
270class Sampler { 270struct Sampler {
271public: 271 /// Bound samplers constructor
272 /// This constructor is for bound samplers
273 constexpr explicit Sampler(u32 index, u32 offset, Tegra::Shader::TextureType type, 272 constexpr explicit Sampler(u32 index, u32 offset, Tegra::Shader::TextureType type,
274 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed) 273 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
275 : index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow}, 274 : index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow},
276 is_buffer{is_buffer}, is_indexed{is_indexed} {} 275 is_buffer{is_buffer}, is_indexed{is_indexed} {}
277 276
278 /// This constructor is for bindless samplers 277 /// Bindless samplers constructor
279 constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type, 278 constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type,
280 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed) 279 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
281 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array}, 280 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array},
282 is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true}, is_indexed{is_indexed} {} 281 is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true}, is_indexed{is_indexed} {}
283 282
284 constexpr u32 GetIndex() const { 283 u32 index = 0; ///< Emulated index given for the this sampler.
285 return index; 284 u32 offset = 0; ///< Offset in the const buffer from where the sampler is being read.
286 } 285 u32 buffer = 0; ///< Buffer where the bindless sampler is being read (unused on bound samplers).
287 286 u32 size = 1; ///< Size of the sampler.
288 constexpr u32 GetOffset() const {
289 return offset;
290 }
291
292 constexpr u32 GetBuffer() const {
293 return buffer;
294 }
295
296 constexpr Tegra::Shader::TextureType GetType() const {
297 return type;
298 }
299
300 constexpr bool IsArray() const {
301 return is_array;
302 }
303
304 constexpr bool IsShadow() const {
305 return is_shadow;
306 }
307
308 constexpr bool IsBuffer() const {
309 return is_buffer;
310 }
311
312 constexpr bool IsBindless() const {
313 return is_bindless;
314 }
315
316 constexpr bool IsIndexed() const {
317 return is_indexed;
318 }
319
320 constexpr u32 Size() const {
321 return size;
322 }
323
324 constexpr void SetSize(u32 new_size) {
325 size = new_size;
326 }
327
328private:
329 u32 index{}; ///< Emulated index given for the this sampler.
330 u32 offset{}; ///< Offset in the const buffer from where the sampler is being read.
331 u32 buffer{}; ///< Buffer where the bindless sampler is being read (unused on bound samplers).
332 u32 size{1}; ///< Size of the sampler.
333 287
334 Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc) 288 Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc)
335 bool is_array{}; ///< Whether the texture is being sampled as an array texture or not. 289 bool is_array = false; ///< Whether the texture is being sampled as an array texture or not.
336 bool is_shadow{}; ///< Whether the texture is being sampled as a depth texture or not. 290 bool is_shadow = false; ///< Whether the texture is being sampled as a depth texture or not.
337 bool is_buffer{}; ///< Whether the texture is a texture buffer without sampler. 291 bool is_buffer = false; ///< Whether the texture is a texture buffer without sampler.
338 bool is_bindless{}; ///< Whether this sampler belongs to a bindless texture or not. 292 bool is_bindless = false; ///< Whether this sampler belongs to a bindless texture or not.
339 bool is_indexed{}; ///< Whether this sampler is an indexed array of textures. 293 bool is_indexed = false; ///< Whether this sampler is an indexed array of textures.
340}; 294};
341 295
342/// Represents a tracked bindless sampler into a direct const buffer 296/// Represents a tracked bindless sampler into a direct const buffer
@@ -381,13 +335,13 @@ private:
381 u32 offset; 335 u32 offset;
382}; 336};
383 337
384class Image final { 338struct Image {
385public: 339public:
386 /// This constructor is for bound images 340 /// Bound images constructor
387 constexpr explicit Image(u32 index, u32 offset, Tegra::Shader::ImageType type) 341 constexpr explicit Image(u32 index, u32 offset, Tegra::Shader::ImageType type)
388 : index{index}, offset{offset}, type{type} {} 342 : index{index}, offset{offset}, type{type} {}
389 343
390 /// This constructor is for bindless samplers 344 /// Bindless samplers constructor
391 constexpr explicit Image(u32 index, u32 offset, u32 buffer, Tegra::Shader::ImageType type) 345 constexpr explicit Image(u32 index, u32 offset, u32 buffer, Tegra::Shader::ImageType type)
392 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_bindless{true} {} 346 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_bindless{true} {}
393 347
@@ -405,53 +359,20 @@ public:
405 is_atomic = true; 359 is_atomic = true;
406 } 360 }
407 361
408 constexpr u32 GetIndex() const { 362 u32 index = 0;
409 return index; 363 u32 offset = 0;
410 } 364 u32 buffer = 0;
411
412 constexpr u32 GetOffset() const {
413 return offset;
414 }
415
416 constexpr u32 GetBuffer() const {
417 return buffer;
418 }
419
420 constexpr Tegra::Shader::ImageType GetType() const {
421 return type;
422 }
423
424 constexpr bool IsBindless() const {
425 return is_bindless;
426 }
427
428 constexpr bool IsWritten() const {
429 return is_written;
430 }
431
432 constexpr bool IsRead() const {
433 return is_read;
434 }
435
436 constexpr bool IsAtomic() const {
437 return is_atomic;
438 }
439
440private:
441 u32 index{};
442 u32 offset{};
443 u32 buffer{};
444 365
445 Tegra::Shader::ImageType type{}; 366 Tegra::Shader::ImageType type{};
446 bool is_bindless{}; 367 bool is_bindless = false;
447 bool is_written{}; 368 bool is_written = false;
448 bool is_read{}; 369 bool is_read = false;
449 bool is_atomic{}; 370 bool is_atomic = false;
450}; 371};
451 372
452struct GlobalMemoryBase { 373struct GlobalMemoryBase {
453 u32 cbuf_index{}; 374 u32 cbuf_index = 0;
454 u32 cbuf_offset{}; 375 u32 cbuf_offset = 0;
455 376
456 bool operator<(const GlobalMemoryBase& rhs) const { 377 bool operator<(const GlobalMemoryBase& rhs) const {
457 return std::tie(cbuf_index, cbuf_offset) < std::tie(rhs.cbuf_index, rhs.cbuf_offset); 378 return std::tie(cbuf_index, cbuf_offset) < std::tie(rhs.cbuf_index, rhs.cbuf_offset);
@@ -465,7 +386,7 @@ struct MetaArithmetic {
465 386
466/// Parameters describing a texture sampler 387/// Parameters describing a texture sampler
467struct MetaTexture { 388struct MetaTexture {
468 const Sampler& sampler; 389 Sampler sampler;
469 Node array; 390 Node array;
470 Node depth_compare; 391 Node depth_compare;
471 std::vector<Node> aoffi; 392 std::vector<Node> aoffi;
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h
index 69de5e68b..15ae152f2 100644
--- a/src/video_core/shader/shader_ir.h
+++ b/src/video_core/shader/shader_ir.h
@@ -28,12 +28,11 @@ struct ShaderBlock;
28 28
29constexpr u32 MAX_PROGRAM_LENGTH = 0x1000; 29constexpr u32 MAX_PROGRAM_LENGTH = 0x1000;
30 30
31class ConstBuffer { 31struct ConstBuffer {
32public: 32 constexpr explicit ConstBuffer(u32 max_offset, bool is_indirect)
33 explicit ConstBuffer(u32 max_offset, bool is_indirect)
34 : max_offset{max_offset}, is_indirect{is_indirect} {} 33 : max_offset{max_offset}, is_indirect{is_indirect} {}
35 34
36 ConstBuffer() = default; 35 constexpr ConstBuffer() = default;
37 36
38 void MarkAsUsed(u64 offset) { 37 void MarkAsUsed(u64 offset) {
39 max_offset = std::max(max_offset, static_cast<u32>(offset)); 38 max_offset = std::max(max_offset, static_cast<u32>(offset));
@@ -56,8 +55,8 @@ public:
56 } 55 }
57 56
58private: 57private:
59 u32 max_offset{}; 58 u32 max_offset = 0;
60 bool is_indirect{}; 59 bool is_indirect = false;
61}; 60};
62 61
63struct GlobalMemoryUsage { 62struct GlobalMemoryUsage {
@@ -191,10 +190,14 @@ private:
191 friend class ASTDecoder; 190 friend class ASTDecoder;
192 191
193 struct SamplerInfo { 192 struct SamplerInfo {
194 Tegra::Shader::TextureType type; 193 std::optional<Tegra::Shader::TextureType> type;
195 bool is_array; 194 std::optional<bool> is_array;
196 bool is_shadow; 195 std::optional<bool> is_shadow;
197 bool is_buffer; 196 std::optional<bool> is_buffer;
197
198 constexpr bool IsComplete() const noexcept {
199 return type && is_array && is_shadow && is_buffer;
200 }
198 }; 201 };
199 202
200 void Decode(); 203 void Decode();
@@ -327,16 +330,15 @@ private:
327 OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation); 330 OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation);
328 331
329 /// Queries the missing sampler info from the execution context. 332 /// Queries the missing sampler info from the execution context.
330 SamplerInfo GetSamplerInfo(std::optional<SamplerInfo> sampler_info, u32 offset, 333 SamplerInfo GetSamplerInfo(SamplerInfo info, u32 offset,
331 std::optional<u32> buffer = std::nullopt); 334 std::optional<u32> buffer = std::nullopt);
332 335
333 /// Accesses a texture sampler 336 /// Accesses a texture sampler.
334 const Sampler* GetSampler(const Tegra::Shader::Sampler& sampler, 337 std::optional<Sampler> GetSampler(Tegra::Shader::Sampler sampler, SamplerInfo info);
335 std::optional<SamplerInfo> sampler_info = std::nullopt);
336 338
337 /// Accesses a texture sampler for a bindless texture. 339 /// Accesses a texture sampler for a bindless texture.
338 const Sampler* GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var, 340 std::optional<Sampler> GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
339 std::optional<SamplerInfo> sampler_info = std::nullopt); 341 Node& index_var);
340 342
341 /// Accesses an image. 343 /// Accesses an image.
342 Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type); 344 Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type);