summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar ReinUsesLisp2021-04-25 01:04:49 -0300
committerGravatar ameerj2021-07-22 21:51:29 -0400
commitac8835659ead30d289ff8b907a2295d87790670f (patch)
treeacd654c71bfceede98197aa99f83a9e6784fa9b4
parentvulkan: Rework descriptor allocation algorithm (diff)
downloadyuzu-ac8835659ead30d289ff8b907a2295d87790670f.tar.gz
yuzu-ac8835659ead30d289ff8b907a2295d87790670f.tar.xz
yuzu-ac8835659ead30d289ff8b907a2295d87790670f.zip
vulkan: Defer descriptor set work to the Vulkan thread
Move descriptor lookup and update code to a separate thread. Delaying this removes work from the main GPU thread and allows creating descriptor layouts on another thread. This reduces a bit the workload of the main thread when new pipelines are encountered.
Diffstat (limited to '')
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp45
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h8
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp36
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp40
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.cpp9
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h4
8 files changed, 69 insertions, 79 deletions
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index e2f3d16bf..7e5ba283b 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -172,11 +172,12 @@ struct AstcPushConstants {
172}; 172};
173} // Anonymous namespace 173} // Anonymous namespace
174 174
175ComputePass::ComputePass(const Device& device, DescriptorPool& descriptor_pool, 175ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool,
176 vk::Span<VkDescriptorSetLayoutBinding> bindings, 176 vk::Span<VkDescriptorSetLayoutBinding> bindings,
177 vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates, 177 vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
178 const DescriptorBankInfo& bank_info, 178 const DescriptorBankInfo& bank_info,
179 vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code) { 179 vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code)
180 : device{device_} {
180 descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout({ 181 descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout({
181 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, 182 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
182 .pNext = nullptr, 183 .pNext = nullptr,
@@ -237,15 +238,6 @@ ComputePass::ComputePass(const Device& device, DescriptorPool& descriptor_pool,
237 238
238ComputePass::~ComputePass() = default; 239ComputePass::~ComputePass() = default;
239 240
240VkDescriptorSet ComputePass::CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue) {
241 if (!descriptor_template) {
242 return nullptr;
243 }
244 const VkDescriptorSet set = descriptor_allocator.Commit();
245 update_descriptor_queue.Send(descriptor_template.address(), set);
246 return set;
247}
248
249Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_, DescriptorPool& descriptor_pool, 241Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_, DescriptorPool& descriptor_pool,
250 StagingBufferPool& staging_buffer_pool_, 242 StagingBufferPool& staging_buffer_pool_,
251 VKUpdateDescriptorQueue& update_descriptor_queue_) 243 VKUpdateDescriptorQueue& update_descriptor_queue_)
@@ -265,10 +257,11 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
265 update_descriptor_queue.Acquire(); 257 update_descriptor_queue.Acquire();
266 update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices); 258 update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
267 update_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size); 259 update_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size);
268 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); 260 const void* const descriptor_data{update_descriptor_queue.UpdateData()};
261 const VkBuffer buffer{staging.buffer};
269 262
270 scheduler.RequestOutsideRenderPassOperationContext(); 263 scheduler.RequestOutsideRenderPassOperationContext();
271 scheduler.Record([this, buffer = staging.buffer, set, num_vertices](vk::CommandBuffer cmdbuf) { 264 scheduler.Record([this, buffer, descriptor_data, num_vertices](vk::CommandBuffer cmdbuf) {
272 static constexpr u32 DISPATCH_SIZE = 1024; 265 static constexpr u32 DISPATCH_SIZE = 1024;
273 static constexpr VkMemoryBarrier WRITE_BARRIER{ 266 static constexpr VkMemoryBarrier WRITE_BARRIER{
274 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, 267 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
@@ -276,6 +269,8 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
276 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, 269 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
277 .dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, 270 .dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
278 }; 271 };
272 const VkDescriptorSet set = descriptor_allocator.Commit();
273 device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
279 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline); 274 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
280 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {}); 275 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
281 cmdbuf.Dispatch(Common::DivCeil(num_vertices, DISPATCH_SIZE), 1, 1); 276 cmdbuf.Dispatch(Common::DivCeil(num_vertices, DISPATCH_SIZE), 1, 1);
@@ -321,10 +316,10 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
321 update_descriptor_queue.Acquire(); 316 update_descriptor_queue.Acquire();
322 update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size); 317 update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size);
323 update_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size); 318 update_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size);
324 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); 319 const void* const descriptor_data{update_descriptor_queue.UpdateData()};
325 320
326 scheduler.RequestOutsideRenderPassOperationContext(); 321 scheduler.RequestOutsideRenderPassOperationContext();
327 scheduler.Record([this, buffer = staging.buffer, set, num_tri_vertices, base_vertex, 322 scheduler.Record([this, buffer = staging.buffer, descriptor_data, num_tri_vertices, base_vertex,
328 index_shift](vk::CommandBuffer cmdbuf) { 323 index_shift](vk::CommandBuffer cmdbuf) {
329 static constexpr u32 DISPATCH_SIZE = 1024; 324 static constexpr u32 DISPATCH_SIZE = 1024;
330 static constexpr VkMemoryBarrier WRITE_BARRIER{ 325 static constexpr VkMemoryBarrier WRITE_BARRIER{
@@ -333,7 +328,9 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
333 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, 328 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
334 .dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, 329 .dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
335 }; 330 };
336 const std::array push_constants = {base_vertex, index_shift}; 331 const std::array push_constants{base_vertex, index_shift};
332 const VkDescriptorSet set = descriptor_allocator.Commit();
333 device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
337 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline); 334 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
338 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {}); 335 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
339 cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants), 336 cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
@@ -353,7 +350,7 @@ ASTCDecoderPass::ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_,
353 : ComputePass(device_, descriptor_pool_, ASTC_DESCRIPTOR_SET_BINDINGS, 350 : ComputePass(device_, descriptor_pool_, ASTC_DESCRIPTOR_SET_BINDINGS,
354 ASTC_PASS_DESCRIPTOR_UPDATE_TEMPLATE_ENTRY, ASTC_BANK_INFO, 351 ASTC_PASS_DESCRIPTOR_UPDATE_TEMPLATE_ENTRY, ASTC_BANK_INFO,
355 COMPUTE_PUSH_CONSTANT_RANGE<sizeof(AstcPushConstants)>, ASTC_DECODER_COMP_SPV), 352 COMPUTE_PUSH_CONSTANT_RANGE<sizeof(AstcPushConstants)>, ASTC_DECODER_COMP_SPV),
356 device{device_}, scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_}, 353 scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
357 update_descriptor_queue{update_descriptor_queue_}, memory_allocator{memory_allocator_} {} 354 update_descriptor_queue{update_descriptor_queue_}, memory_allocator{memory_allocator_} {}
358 355
359ASTCDecoderPass::~ASTCDecoderPass() = default; 356ASTCDecoderPass::~ASTCDecoderPass() = default;
@@ -451,16 +448,14 @@ void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map,
451 update_descriptor_queue.AddBuffer(*data_buffer, sizeof(ASTC_ENCODINGS_VALUES), 448 update_descriptor_queue.AddBuffer(*data_buffer, sizeof(ASTC_ENCODINGS_VALUES),
452 sizeof(SWIZZLE_TABLE)); 449 sizeof(SWIZZLE_TABLE));
453 update_descriptor_queue.AddImage(image.StorageImageView(swizzle.level)); 450 update_descriptor_queue.AddImage(image.StorageImageView(swizzle.level));
454 451 const void* const descriptor_data{update_descriptor_queue.UpdateData()};
455 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
456 const VkPipelineLayout vk_layout = *layout;
457 452
458 // To unswizzle the ASTC data 453 // To unswizzle the ASTC data
459 const auto params = MakeBlockLinearSwizzle2DParams(swizzle, image.info); 454 const auto params = MakeBlockLinearSwizzle2DParams(swizzle, image.info);
460 ASSERT(params.origin == (std::array<u32, 3>{0, 0, 0})); 455 ASSERT(params.origin == (std::array<u32, 3>{0, 0, 0}));
461 ASSERT(params.destination == (std::array<s32, 3>{0, 0, 0})); 456 ASSERT(params.destination == (std::array<s32, 3>{0, 0, 0}));
462 scheduler.Record([vk_layout, num_dispatches_x, num_dispatches_y, num_dispatches_z, 457 scheduler.Record([this, num_dispatches_x, num_dispatches_y, num_dispatches_z, block_dims,
463 block_dims, params, set](vk::CommandBuffer cmdbuf) { 458 params, descriptor_data](vk::CommandBuffer cmdbuf) {
464 const AstcPushConstants uniforms{ 459 const AstcPushConstants uniforms{
465 .blocks_dims = block_dims, 460 .blocks_dims = block_dims,
466 .bytes_per_block_log2 = params.bytes_per_block_log2, 461 .bytes_per_block_log2 = params.bytes_per_block_log2,
@@ -470,8 +465,10 @@ void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map,
470 .block_height = params.block_height, 465 .block_height = params.block_height,
471 .block_height_mask = params.block_height_mask, 466 .block_height_mask = params.block_height_mask,
472 }; 467 };
473 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, vk_layout, 0, set, {}); 468 const VkDescriptorSet set = descriptor_allocator.Commit();
474 cmdbuf.PushConstants(vk_layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms); 469 device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
470 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
471 cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms);
475 cmdbuf.Dispatch(num_dispatches_x, num_dispatches_y, num_dispatches_z); 472 cmdbuf.Dispatch(num_dispatches_x, num_dispatches_y, num_dispatches_z);
476 }); 473 });
477 } 474 }
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 54c1ac4cb..114aef2bd 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -36,15 +36,14 @@ public:
36 ~ComputePass(); 36 ~ComputePass();
37 37
38protected: 38protected:
39 VkDescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue); 39 const Device& device;
40
41 vk::DescriptorUpdateTemplateKHR descriptor_template; 40 vk::DescriptorUpdateTemplateKHR descriptor_template;
42 vk::PipelineLayout layout; 41 vk::PipelineLayout layout;
43 vk::Pipeline pipeline; 42 vk::Pipeline pipeline;
44
45private:
46 vk::DescriptorSetLayout descriptor_set_layout; 43 vk::DescriptorSetLayout descriptor_set_layout;
47 DescriptorAllocator descriptor_allocator; 44 DescriptorAllocator descriptor_allocator;
45
46private:
48 vk::ShaderModule module; 47 vk::ShaderModule module;
49}; 48};
50 49
@@ -99,7 +98,6 @@ public:
99private: 98private:
100 void MakeDataBuffer(); 99 void MakeDataBuffer();
101 100
102 const Device& device;
103 VKScheduler& scheduler; 101 VKScheduler& scheduler;
104 StagingBufferPool& staging_buffer_pool; 102 StagingBufferPool& staging_buffer_pool;
105 VKUpdateDescriptorQueue& update_descriptor_queue; 103 VKUpdateDescriptorQueue& update_descriptor_queue;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 54a57c358..feaace0c5 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -18,21 +18,22 @@
18 18
19namespace Vulkan { 19namespace Vulkan {
20 20
21ComputePipeline::ComputePipeline(const Device& device, DescriptorPool& descriptor_pool, 21ComputePipeline::ComputePipeline(const Device& device_, DescriptorPool& descriptor_pool,
22 VKUpdateDescriptorQueue& update_descriptor_queue_, 22 VKUpdateDescriptorQueue& update_descriptor_queue_,
23 Common::ThreadWorker* thread_worker, const Shader::Info& info_, 23 Common::ThreadWorker* thread_worker, const Shader::Info& info_,
24 vk::ShaderModule spv_module_) 24 vk::ShaderModule spv_module_)
25 : update_descriptor_queue{update_descriptor_queue_}, info{info_}, 25 : device{device_}, update_descriptor_queue{update_descriptor_queue_}, info{info_},
26 spv_module(std::move(spv_module_)) { 26 spv_module(std::move(spv_module_)) {
27 DescriptorLayoutBuilder builder{device.GetLogical()}; 27 auto func{[this, &descriptor_pool] {
28 builder.Add(info, VK_SHADER_STAGE_COMPUTE_BIT); 28 DescriptorLayoutBuilder builder{device.GetLogical()};
29 builder.Add(info, VK_SHADER_STAGE_COMPUTE_BIT);
29 30
30 descriptor_set_layout = builder.CreateDescriptorSetLayout(); 31 descriptor_set_layout = builder.CreateDescriptorSetLayout();
31 pipeline_layout = builder.CreatePipelineLayout(*descriptor_set_layout); 32 pipeline_layout = builder.CreatePipelineLayout(*descriptor_set_layout);
32 descriptor_update_template = builder.CreateTemplate(*descriptor_set_layout, *pipeline_layout); 33 descriptor_update_template =
33 descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, info); 34 builder.CreateTemplate(*descriptor_set_layout, *pipeline_layout);
35 descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, info);
34 36
35 auto func{[this, &device] {
36 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{ 37 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
37 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT, 38 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
38 .pNext = nullptr, 39 .pNext = nullptr,
@@ -166,15 +167,16 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
166 build_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); }); 167 build_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); });
167 }); 168 });
168 } 169 }
169 scheduler.Record([this](vk::CommandBuffer cmdbuf) { 170 const void* const descriptor_data{update_descriptor_queue.UpdateData()};
171 scheduler.Record([this, descriptor_data](vk::CommandBuffer cmdbuf) {
170 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline); 172 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
171 }); 173
172 if (!descriptor_set_layout) { 174 if (!descriptor_set_layout) {
173 return; 175 return;
174 } 176 }
175 const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()}; 177 const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()};
176 update_descriptor_queue.Send(descriptor_update_template.address(), descriptor_set); 178 const vk::Device& dev{device.GetLogical()};
177 scheduler.Record([this, descriptor_set](vk::CommandBuffer cmdbuf) { 179 dev.UpdateDescriptorSet(descriptor_set, *descriptor_update_template, descriptor_data);
178 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline_layout, 0, 180 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline_layout, 0,
179 descriptor_set, nullptr); 181 descriptor_set, nullptr);
180 }); 182 });
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
index 0d4cd37be..a560e382e 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -40,6 +40,7 @@ public:
40 VKScheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache); 40 VKScheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache);
41 41
42private: 42private:
43 const Device& device;
43 VKUpdateDescriptorQueue& update_descriptor_queue; 44 VKUpdateDescriptorQueue& update_descriptor_queue;
44 Shader::Info info; 45 Shader::Info info;
45 46
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 0526c197a..76080bde1 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -205,31 +205,31 @@ ConfigureFuncPtr ConfigureFunc(const std::array<vk::ShaderModule, NUM_STAGES>& m
205GraphicsPipeline::GraphicsPipeline(Tegra::Engines::Maxwell3D& maxwell3d_, 205GraphicsPipeline::GraphicsPipeline(Tegra::Engines::Maxwell3D& maxwell3d_,
206 Tegra::MemoryManager& gpu_memory_, VKScheduler& scheduler_, 206 Tegra::MemoryManager& gpu_memory_, VKScheduler& scheduler_,
207 BufferCache& buffer_cache_, TextureCache& texture_cache_, 207 BufferCache& buffer_cache_, TextureCache& texture_cache_,
208 const Device& device, DescriptorPool& descriptor_pool, 208 const Device& device_, DescriptorPool& descriptor_pool,
209 VKUpdateDescriptorQueue& update_descriptor_queue_, 209 VKUpdateDescriptorQueue& update_descriptor_queue_,
210 Common::ThreadWorker* worker_thread, 210 Common::ThreadWorker* worker_thread,
211 RenderPassCache& render_pass_cache, 211 RenderPassCache& render_pass_cache,
212 const GraphicsPipelineCacheKey& key_, 212 const GraphicsPipelineCacheKey& key_,
213 std::array<vk::ShaderModule, NUM_STAGES> stages, 213 std::array<vk::ShaderModule, NUM_STAGES> stages,
214 const std::array<const Shader::Info*, NUM_STAGES>& infos) 214 const std::array<const Shader::Info*, NUM_STAGES>& infos)
215 : key{key_}, maxwell3d{maxwell3d_}, gpu_memory{gpu_memory_}, texture_cache{texture_cache_}, 215 : key{key_}, maxwell3d{maxwell3d_}, gpu_memory{gpu_memory_}, device{device_},
216 buffer_cache{buffer_cache_}, scheduler{scheduler_}, 216 texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, scheduler{scheduler_},
217 update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} { 217 update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} {
218 std::ranges::transform(infos, stage_infos.begin(), 218 std::ranges::transform(infos, stage_infos.begin(),
219 [](const Shader::Info* info) { return info ? *info : Shader::Info{}; }); 219 [](const Shader::Info* info) { return info ? *info : Shader::Info{}; });
220 220
221 DescriptorLayoutBuilder builder{MakeBuilder(device, stage_infos)}; 221 auto func{[this, &render_pass_cache, &descriptor_pool] {
222 descriptor_set_layout = builder.CreateDescriptorSetLayout(); 222 DescriptorLayoutBuilder builder{MakeBuilder(device, stage_infos)};
223 descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, stage_infos); 223 descriptor_set_layout = builder.CreateDescriptorSetLayout();
224 descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, stage_infos);
224 225
225 auto func{[this, &device, &render_pass_cache, builder] {
226 const VkDescriptorSetLayout set_layout{*descriptor_set_layout}; 226 const VkDescriptorSetLayout set_layout{*descriptor_set_layout};
227 pipeline_layout = builder.CreatePipelineLayout(set_layout); 227 pipeline_layout = builder.CreatePipelineLayout(set_layout);
228 descriptor_update_template = builder.CreateTemplate(set_layout, *pipeline_layout); 228 descriptor_update_template = builder.CreateTemplate(set_layout, *pipeline_layout);
229 229
230 const VkRenderPass render_pass{render_pass_cache.Get(MakeRenderPassKey(key.state))}; 230 const VkRenderPass render_pass{render_pass_cache.Get(MakeRenderPassKey(key.state))};
231 Validate(); 231 Validate();
232 MakePipeline(device, render_pass); 232 MakePipeline(render_pass);
233 233
234 std::lock_guard lock{build_mutex}; 234 std::lock_guard lock{build_mutex};
235 is_built = true; 235 is_built = true;
@@ -440,24 +440,22 @@ void GraphicsPipeline::ConfigureDraw() {
440 build_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); }); 440 build_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); });
441 }); 441 });
442 } 442 }
443 if (scheduler.UpdateGraphicsPipeline(this)) { 443 const bool bind_pipeline{scheduler.UpdateGraphicsPipeline(this)};
444 scheduler.Record([this](vk::CommandBuffer cmdbuf) { 444 const void* const descriptor_data{update_descriptor_queue.UpdateData()};
445 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline); 445 scheduler.Record([this, descriptor_data, bind_pipeline](vk::CommandBuffer cmdbuf) {
446 }); 446 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
447 } 447 if (!descriptor_set_layout) {
448 if (!descriptor_set_layout) { 448 return;
449 return; 449 }
450 } 450 const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()};
451 const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()}; 451 const vk::Device& dev{device.GetLogical()};
452 update_descriptor_queue.Send(descriptor_update_template.address(), descriptor_set); 452 dev.UpdateDescriptorSet(descriptor_set, *descriptor_update_template, descriptor_data);
453
454 scheduler.Record([this, descriptor_set](vk::CommandBuffer cmdbuf) {
455 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline_layout, 0, 453 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline_layout, 0,
456 descriptor_set, nullptr); 454 descriptor_set, nullptr);
457 }); 455 });
458} 456}
459 457
460void GraphicsPipeline::MakePipeline(const Device& device, VkRenderPass render_pass) { 458void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
461 FixedPipelineState::DynamicState dynamic{}; 459 FixedPipelineState::DynamicState dynamic{};
462 if (!device.IsExtExtendedDynamicStateSupported()) { 460 if (!device.IsExtExtendedDynamicStateSupported()) {
463 dynamic = key.state.dynamic_state; 461 dynamic = key.state.dynamic_state;
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index 454fc049e..85e21f611 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -109,19 +109,20 @@ private:
109 109
110 void ConfigureDraw(); 110 void ConfigureDraw();
111 111
112 void MakePipeline(const Device& device, VkRenderPass render_pass); 112 void MakePipeline(VkRenderPass render_pass);
113 113
114 void Validate(); 114 void Validate();
115 115
116 const GraphicsPipelineCacheKey key; 116 const GraphicsPipelineCacheKey key;
117 Tegra::Engines::Maxwell3D& maxwell3d; 117 Tegra::Engines::Maxwell3D& maxwell3d;
118 Tegra::MemoryManager& gpu_memory; 118 Tegra::MemoryManager& gpu_memory;
119 const Device& device;
119 TextureCache& texture_cache; 120 TextureCache& texture_cache;
120 BufferCache& buffer_cache; 121 BufferCache& buffer_cache;
121 VKScheduler& scheduler; 122 VKScheduler& scheduler;
122 VKUpdateDescriptorQueue& update_descriptor_queue; 123 VKUpdateDescriptorQueue& update_descriptor_queue;
123 124
124 void (*configure_func)(GraphicsPipeline*, bool); 125 void (*configure_func)(GraphicsPipeline*, bool){};
125 126
126 std::vector<GraphicsPipelineCacheKey> transition_keys; 127 std::vector<GraphicsPipelineCacheKey> transition_keys;
127 std::vector<GraphicsPipeline*> transitions; 128 std::vector<GraphicsPipeline*> transitions;
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
index bea9b8012..ce3427c9b 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
@@ -36,13 +36,4 @@ void VKUpdateDescriptorQueue::Acquire() {
36 upload_start = payload_cursor; 36 upload_start = payload_cursor;
37} 37}
38 38
39void VKUpdateDescriptorQueue::Send(const VkDescriptorUpdateTemplateKHR* update_template,
40 VkDescriptorSet set) {
41 const void* const data = upload_start;
42 const vk::Device* const logical = &device.GetLogical();
43 scheduler.Record([data, logical, set, update_template](vk::CommandBuffer) {
44 logical->UpdateDescriptorSet(set, *update_template, data);
45 });
46}
47
48} // namespace Vulkan 39} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index 82bc9920c..d7de4c490 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -39,7 +39,9 @@ public:
39 39
40 void Acquire(); 40 void Acquire();
41 41
42 void Send(const VkDescriptorUpdateTemplateKHR* update_template, VkDescriptorSet set); 42 const DescriptorUpdateEntry* UpdateData() const noexcept {
43 return upload_start;
44 }
43 45
44 void AddSampledImage(VkImageView image_view, VkSampler sampler) { 46 void AddSampledImage(VkImageView image_view, VkSampler sampler) {
45 *(payload_cursor++) = VkDescriptorImageInfo{ 47 *(payload_cursor++) = VkDescriptorImageInfo{