diff options
| author | 2021-01-23 03:58:35 -0300 | |
|---|---|---|
| committer | 2021-01-23 03:59:59 -0300 | |
| commit | 37ef2ee595ac2a2b08053d5544d7efbc6627ab65 (patch) | |
| tree | 2f7c3fcdecd346c821b8aca9e42cc613ed157ca7 /src | |
| parent | Merge pull request #4713 from behunin/int-flags (diff) | |
| download | yuzu-37ef2ee595ac2a2b08053d5544d7efbc6627ab65.tar.gz yuzu-37ef2ee595ac2a2b08053d5544d7efbc6627ab65.tar.xz yuzu-37ef2ee595ac2a2b08053d5544d7efbc6627ab65.zip | |
vk_pipeline_cache: Properly bypass VertexA shaders
The VertexA stage is not yet implemented, but Vulkan is adding its
descriptors, causing a discrepancy in the pushed descriptors and the
template. This generally ends up in a driver side crash.
Bypass the VertexA stage for now.
Diffstat (limited to 'src')
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_pipeline_cache.cpp | 12 |
1 files changed, 3 insertions, 9 deletions
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index 02282e36f..8991505ca 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp | |||
| @@ -355,14 +355,12 @@ VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) { | |||
| 355 | SPIRVProgram program; | 355 | SPIRVProgram program; |
| 356 | std::vector<VkDescriptorSetLayoutBinding> bindings; | 356 | std::vector<VkDescriptorSetLayoutBinding> bindings; |
| 357 | 357 | ||
| 358 | for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) { | 358 | for (std::size_t index = 1; index < Maxwell::MaxShaderProgram; ++index) { |
| 359 | const auto program_enum = static_cast<Maxwell::ShaderProgram>(index); | 359 | const auto program_enum = static_cast<Maxwell::ShaderProgram>(index); |
| 360 | |||
| 361 | // Skip stages that are not enabled | 360 | // Skip stages that are not enabled |
| 362 | if (!maxwell3d.regs.IsShaderConfigEnabled(index)) { | 361 | if (!maxwell3d.regs.IsShaderConfigEnabled(index)) { |
| 363 | continue; | 362 | continue; |
| 364 | } | 363 | } |
| 365 | |||
| 366 | const GPUVAddr gpu_addr = GetShaderAddress(maxwell3d, program_enum); | 364 | const GPUVAddr gpu_addr = GetShaderAddress(maxwell3d, program_enum); |
| 367 | const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); | 365 | const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); |
| 368 | Shader* const shader = cpu_addr ? TryGet(*cpu_addr) : null_shader.get(); | 366 | Shader* const shader = cpu_addr ? TryGet(*cpu_addr) : null_shader.get(); |
| @@ -372,12 +370,8 @@ VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) { | |||
| 372 | const auto& entries = shader->GetEntries(); | 370 | const auto& entries = shader->GetEntries(); |
| 373 | program[stage] = { | 371 | program[stage] = { |
| 374 | Decompile(device, shader->GetIR(), program_type, shader->GetRegistry(), specialization), | 372 | Decompile(device, shader->GetIR(), program_type, shader->GetRegistry(), specialization), |
| 375 | entries}; | 373 | entries, |
| 376 | 374 | }; | |
| 377 | if (program_enum == Maxwell::ShaderProgram::VertexA) { | ||
| 378 | // VertexB was combined with VertexA, so we skip the VertexB iteration | ||
| 379 | ++index; | ||
| 380 | } | ||
| 381 | 375 | ||
| 382 | const u32 old_binding = specialization.base_binding; | 376 | const u32 old_binding = specialization.base_binding; |
| 383 | specialization.base_binding = | 377 | specialization.base_binding = |