summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar ReinUsesLisp2021-04-03 22:28:07 -0300
committerGravatar ameerj2021-07-22 21:51:26 -0400
commit5b3c6d59c2c92ac388530740f8008f1b9764c14d (patch)
treeff494b6891399fa66852659f8028a1caaa903c30
parentshader: Remove atomic flags and use mutex + cond variable for pipelines (diff)
downloadyuzu-5b3c6d59c2c92ac388530740f8008f1b9764c14d.tar.gz
yuzu-5b3c6d59c2c92ac388530740f8008f1b9764c14d.tar.xz
yuzu-5b3c6d59c2c92ac388530740f8008f1b9764c14d.zip
vk_compute_pass: Fix compute passes
Diffstat (limited to '')
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp39
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp1
3 files changed, 19 insertions, 23 deletions
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 760857839..2cfe9d4bd 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -206,27 +206,23 @@ VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_
206 .codeSize = static_cast<u32>(code.size_bytes()), 206 .codeSize = static_cast<u32>(code.size_bytes()),
207 .pCode = code.data(), 207 .pCode = code.data(),
208 }); 208 });
209 /*
210 FIXME
211 pipeline = device.GetLogical().CreateComputePipeline({ 209 pipeline = device.GetLogical().CreateComputePipeline({
212 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, 210 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
213 .pNext = nullptr, 211 .pNext = nullptr,
214 .flags = 0, 212 .flags = 0,
215 .stage = 213 .stage{
216 { 214 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
217 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, 215 .pNext = nullptr,
218 .pNext = nullptr, 216 .flags = 0,
219 .flags = 0, 217 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
220 .stage = VK_SHADER_STAGE_COMPUTE_BIT, 218 .module = *module,
221 .module = *module, 219 .pName = "main",
222 .pName = "main", 220 .pSpecializationInfo = nullptr,
223 .pSpecializationInfo = nullptr, 221 },
224 },
225 .layout = *layout, 222 .layout = *layout,
226 .basePipelineHandle = nullptr, 223 .basePipelineHandle = nullptr,
227 .basePipelineIndex = 0, 224 .basePipelineIndex = 0,
228 }); 225 });
229 */
230} 226}
231 227
232VKComputePass::~VKComputePass() = default; 228VKComputePass::~VKComputePass() = default;
@@ -262,8 +258,7 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
262 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); 258 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
263 259
264 scheduler.RequestOutsideRenderPassOperationContext(); 260 scheduler.RequestOutsideRenderPassOperationContext();
265 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging.buffer, set, 261 scheduler.Record([this, buffer = staging.buffer, set, num_vertices](vk::CommandBuffer cmdbuf) {
266 num_vertices](vk::CommandBuffer cmdbuf) {
267 static constexpr u32 DISPATCH_SIZE = 1024; 262 static constexpr u32 DISPATCH_SIZE = 1024;
268 static constexpr VkMemoryBarrier WRITE_BARRIER{ 263 static constexpr VkMemoryBarrier WRITE_BARRIER{
269 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, 264 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
@@ -271,8 +266,8 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
271 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, 266 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
272 .dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, 267 .dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
273 }; 268 };
274 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); 269 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
275 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {}); 270 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
276 cmdbuf.Dispatch(Common::DivCeil(num_vertices, DISPATCH_SIZE), 1, 1); 271 cmdbuf.Dispatch(Common::DivCeil(num_vertices, DISPATCH_SIZE), 1, 1);
277 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 272 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
278 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, WRITE_BARRIER); 273 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, WRITE_BARRIER);
@@ -319,8 +314,8 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
319 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); 314 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
320 315
321 scheduler.RequestOutsideRenderPassOperationContext(); 316 scheduler.RequestOutsideRenderPassOperationContext();
322 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging.buffer, set, 317 scheduler.Record([this, buffer = staging.buffer, set, num_tri_vertices, base_vertex,
323 num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) { 318 index_shift](vk::CommandBuffer cmdbuf) {
324 static constexpr u32 DISPATCH_SIZE = 1024; 319 static constexpr u32 DISPATCH_SIZE = 1024;
325 static constexpr VkMemoryBarrier WRITE_BARRIER{ 320 static constexpr VkMemoryBarrier WRITE_BARRIER{
326 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, 321 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
@@ -329,9 +324,9 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
329 .dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, 324 .dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
330 }; 325 };
331 const std::array push_constants = {base_vertex, index_shift}; 326 const std::array push_constants = {base_vertex, index_shift};
332 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); 327 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
333 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {}); 328 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
334 cmdbuf.PushConstants(layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants), 329 cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
335 &push_constants); 330 &push_constants);
336 cmdbuf.Dispatch(Common::DivCeil(num_tri_vertices, DISPATCH_SIZE), 1, 1); 331 cmdbuf.Dispatch(Common::DivCeil(num_tri_vertices, DISPATCH_SIZE), 1, 1);
337 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 332 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 67de3cb79..a0ef0e98b 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -189,6 +189,8 @@ void GraphicsPipeline::Configure(bool is_indexed) {
189 189
190 buffer_cache.BindHostGeometryBuffers(is_indexed); 190 buffer_cache.BindHostGeometryBuffers(is_indexed);
191 191
192 update_descriptor_queue.Acquire();
193
192 size_t index{}; 194 size_t index{};
193 for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) { 195 for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
194 buffer_cache.BindHostStageBuffers(stage); 196 buffer_cache.BindHostStageBuffers(stage);
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index f0bd4b8af..0292a1b94 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -172,7 +172,6 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
172 if (!pipeline) { 172 if (!pipeline) {
173 return; 173 return;
174 } 174 }
175 update_descriptor_queue.Acquire();
176 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 175 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
177 pipeline->Configure(is_indexed); 176 pipeline->Configure(is_indexed);
178 177