summaryrefslogtreecommitdiff
path: root/src/video_core/renderer_vulkan
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2020-04-16 19:58:13 -0400
committerGravatar GitHub2020-04-16 19:58:13 -0400
commitc81f2561111541e1b5b7f18b70ea69f93a33904d (patch)
tree9908d80fa8cb62643fa1f0a015a5b43b4f3bc229 /src/video_core/renderer_vulkan
parentMerge pull request #3675 from degasus/linux_shared_libraries (diff)
parentbuffer_cache: Return handles instead of pointer to handles (diff)
downloadyuzu-c81f2561111541e1b5b7f18b70ea69f93a33904d.tar.gz
yuzu-c81f2561111541e1b5b7f18b70ea69f93a33904d.tar.xz
yuzu-c81f2561111541e1b5b7f18b70ea69f93a33904d.zip
Merge pull request #3600 from ReinUsesLisp/no-pointer-buf-cache
buffer_cache: Return handles instead of pointer to handles
Diffstat (limited to 'src/video_core/renderer_vulkan')
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp12
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h10
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp16
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp30
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.cpp5
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h16
7 files changed, 42 insertions, 51 deletions
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 0d167afbd..81e1de2be 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -74,18 +74,18 @@ Buffer VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
74 return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size); 74 return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size);
75} 75}
76 76
77const VkBuffer* VKBufferCache::ToHandle(const Buffer& buffer) { 77VkBuffer VKBufferCache::ToHandle(const Buffer& buffer) {
78 return buffer->GetHandle(); 78 return buffer->GetHandle();
79} 79}
80 80
81const VkBuffer* VKBufferCache::GetEmptyBuffer(std::size_t size) { 81VkBuffer VKBufferCache::GetEmptyBuffer(std::size_t size) {
82 size = std::max(size, std::size_t(4)); 82 size = std::max(size, std::size_t(4));
83 const auto& empty = staging_pool.GetUnusedBuffer(size, false); 83 const auto& empty = staging_pool.GetUnusedBuffer(size, false);
84 scheduler.RequestOutsideRenderPassOperationContext(); 84 scheduler.RequestOutsideRenderPassOperationContext();
85 scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) { 85 scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) {
86 cmdbuf.FillBuffer(buffer, 0, size, 0); 86 cmdbuf.FillBuffer(buffer, 0, size, 0);
87 }); 87 });
88 return empty.handle.address(); 88 return *empty.handle;
89} 89}
90 90
91void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 91void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
@@ -94,7 +94,7 @@ void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, st
94 std::memcpy(staging.commit->Map(size), data, size); 94 std::memcpy(staging.commit->Map(size), data, size);
95 95
96 scheduler.RequestOutsideRenderPassOperationContext(); 96 scheduler.RequestOutsideRenderPassOperationContext();
97 scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset, 97 scheduler.Record([staging = *staging.handle, buffer = buffer->GetHandle(), offset,
98 size](vk::CommandBuffer cmdbuf) { 98 size](vk::CommandBuffer cmdbuf) {
99 cmdbuf.CopyBuffer(staging, buffer, VkBufferCopy{0, offset, size}); 99 cmdbuf.CopyBuffer(staging, buffer, VkBufferCopy{0, offset, size});
100 100
@@ -117,7 +117,7 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset,
117 u8* data) { 117 u8* data) {
118 const auto& staging = staging_pool.GetUnusedBuffer(size, true); 118 const auto& staging = staging_pool.GetUnusedBuffer(size, true);
119 scheduler.RequestOutsideRenderPassOperationContext(); 119 scheduler.RequestOutsideRenderPassOperationContext();
120 scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset, 120 scheduler.Record([staging = *staging.handle, buffer = buffer->GetHandle(), offset,
121 size](vk::CommandBuffer cmdbuf) { 121 size](vk::CommandBuffer cmdbuf) {
122 VkBufferMemoryBarrier barrier; 122 VkBufferMemoryBarrier barrier;
123 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; 123 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
@@ -144,7 +144,7 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset,
144void VKBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset, 144void VKBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
145 std::size_t dst_offset, std::size_t size) { 145 std::size_t dst_offset, std::size_t size) {
146 scheduler.RequestOutsideRenderPassOperationContext(); 146 scheduler.RequestOutsideRenderPassOperationContext();
147 scheduler.Record([src_buffer = *src->GetHandle(), dst_buffer = *dst->GetHandle(), src_offset, 147 scheduler.Record([src_buffer = src->GetHandle(), dst_buffer = dst->GetHandle(), src_offset,
148 dst_offset, size](vk::CommandBuffer cmdbuf) { 148 dst_offset, size](vk::CommandBuffer cmdbuf) {
149 cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size}); 149 cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size});
150 150
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index d3c23da98..3cd2e2774 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -33,8 +33,8 @@ public:
33 VAddr cpu_addr, std::size_t size); 33 VAddr cpu_addr, std::size_t size);
34 ~CachedBufferBlock(); 34 ~CachedBufferBlock();
35 35
36 const VkBuffer* GetHandle() const { 36 VkBuffer GetHandle() const {
37 return buffer.handle.address(); 37 return *buffer.handle;
38 } 38 }
39 39
40private: 40private:
@@ -50,15 +50,15 @@ public:
50 VKScheduler& scheduler, VKStagingBufferPool& staging_pool); 50 VKScheduler& scheduler, VKStagingBufferPool& staging_pool);
51 ~VKBufferCache(); 51 ~VKBufferCache();
52 52
53 const VkBuffer* GetEmptyBuffer(std::size_t size) override; 53 VkBuffer GetEmptyBuffer(std::size_t size) override;
54 54
55protected: 55protected:
56 VkBuffer ToHandle(const Buffer& buffer) override;
57
56 void WriteBarrier() override {} 58 void WriteBarrier() override {}
57 59
58 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; 60 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override;
59 61
60 const VkBuffer* ToHandle(const Buffer& buffer) override;
61
62 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 62 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
63 const u8* data) override; 63 const u8* data) override;
64 64
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 9d92305f4..878a78755 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -343,13 +343,13 @@ QuadArrayPass::QuadArrayPass(const VKDevice& device, VKScheduler& scheduler,
343 343
344QuadArrayPass::~QuadArrayPass() = default; 344QuadArrayPass::~QuadArrayPass() = default;
345 345
346std::pair<const VkBuffer*, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) { 346std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) {
347 const u32 num_triangle_vertices = num_vertices * 6 / 4; 347 const u32 num_triangle_vertices = num_vertices * 6 / 4;
348 const std::size_t staging_size = num_triangle_vertices * sizeof(u32); 348 const std::size_t staging_size = num_triangle_vertices * sizeof(u32);
349 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 349 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
350 350
351 update_descriptor_queue.Acquire(); 351 update_descriptor_queue.Acquire();
352 update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size); 352 update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
353 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); 353 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
354 354
355 scheduler.RequestOutsideRenderPassOperationContext(); 355 scheduler.RequestOutsideRenderPassOperationContext();
@@ -377,7 +377,7 @@ std::pair<const VkBuffer*, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertice
377 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 377 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
378 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {}); 378 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {});
379 }); 379 });
380 return {buffer.handle.address(), 0}; 380 return {*buffer.handle, 0};
381} 381}
382 382
383Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler, 383Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler,
@@ -391,14 +391,14 @@ Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler,
391 391
392Uint8Pass::~Uint8Pass() = default; 392Uint8Pass::~Uint8Pass() = default;
393 393
394std::pair<const VkBuffer*, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer, 394std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer,
395 u64 src_offset) { 395 u64 src_offset) {
396 const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16)); 396 const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16));
397 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 397 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
398 398
399 update_descriptor_queue.Acquire(); 399 update_descriptor_queue.Acquire();
400 update_descriptor_queue.AddBuffer(&src_buffer, src_offset, num_vertices); 400 update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
401 update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size); 401 update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
402 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); 402 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
403 403
404 scheduler.RequestOutsideRenderPassOperationContext(); 404 scheduler.RequestOutsideRenderPassOperationContext();
@@ -422,7 +422,7 @@ std::pair<const VkBuffer*, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer s
422 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 422 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
423 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); 423 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
424 }); 424 });
425 return {buffer.handle.address(), 0}; 425 return {*buffer.handle, 0};
426} 426}
427 427
428} // namespace Vulkan 428} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index c62516bff..ec80c8683 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -50,7 +50,7 @@ public:
50 VKUpdateDescriptorQueue& update_descriptor_queue); 50 VKUpdateDescriptorQueue& update_descriptor_queue);
51 ~QuadArrayPass(); 51 ~QuadArrayPass();
52 52
53 std::pair<const VkBuffer*, VkDeviceSize> Assemble(u32 num_vertices, u32 first); 53 std::pair<VkBuffer, VkDeviceSize> Assemble(u32 num_vertices, u32 first);
54 54
55private: 55private:
56 VKScheduler& scheduler; 56 VKScheduler& scheduler;
@@ -65,7 +65,7 @@ public:
65 VKUpdateDescriptorQueue& update_descriptor_queue); 65 VKUpdateDescriptorQueue& update_descriptor_queue);
66 ~Uint8Pass(); 66 ~Uint8Pass();
67 67
68 std::pair<const VkBuffer*, u64> Assemble(u32 num_vertices, VkBuffer src_buffer, u64 src_offset); 68 std::pair<VkBuffer, u64> Assemble(u32 num_vertices, VkBuffer src_buffer, u64 src_offset);
69 69
70private: 70private:
71 VKScheduler& scheduler; 71 VKScheduler& scheduler;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 774ba1f26..4ca0febb8 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -137,13 +137,13 @@ Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry
137 137
138class BufferBindings final { 138class BufferBindings final {
139public: 139public:
140 void AddVertexBinding(const VkBuffer* buffer, VkDeviceSize offset) { 140 void AddVertexBinding(VkBuffer buffer, VkDeviceSize offset) {
141 vertex.buffer_ptrs[vertex.num_buffers] = buffer; 141 vertex.buffers[vertex.num_buffers] = buffer;
142 vertex.offsets[vertex.num_buffers] = offset; 142 vertex.offsets[vertex.num_buffers] = offset;
143 ++vertex.num_buffers; 143 ++vertex.num_buffers;
144 } 144 }
145 145
146 void SetIndexBinding(const VkBuffer* buffer, VkDeviceSize offset, VkIndexType type) { 146 void SetIndexBinding(VkBuffer buffer, VkDeviceSize offset, VkIndexType type) {
147 index.buffer = buffer; 147 index.buffer = buffer;
148 index.offset = offset; 148 index.offset = offset;
149 index.type = type; 149 index.type = type;
@@ -227,19 +227,19 @@ private:
227 // Some of these fields are intentionally left uninitialized to avoid initializing them twice. 227 // Some of these fields are intentionally left uninitialized to avoid initializing them twice.
228 struct { 228 struct {
229 std::size_t num_buffers = 0; 229 std::size_t num_buffers = 0;
230 std::array<const VkBuffer*, Maxwell::NumVertexArrays> buffer_ptrs; 230 std::array<VkBuffer, Maxwell::NumVertexArrays> buffers;
231 std::array<VkDeviceSize, Maxwell::NumVertexArrays> offsets; 231 std::array<VkDeviceSize, Maxwell::NumVertexArrays> offsets;
232 } vertex; 232 } vertex;
233 233
234 struct { 234 struct {
235 const VkBuffer* buffer = nullptr; 235 VkBuffer buffer = nullptr;
236 VkDeviceSize offset; 236 VkDeviceSize offset;
237 VkIndexType type; 237 VkIndexType type;
238 } index; 238 } index;
239 239
240 template <std::size_t N> 240 template <std::size_t N>
241 void BindStatic(VKScheduler& scheduler) const { 241 void BindStatic(VKScheduler& scheduler) const {
242 if (index.buffer != nullptr) { 242 if (index.buffer) {
243 BindStatic<N, true>(scheduler); 243 BindStatic<N, true>(scheduler);
244 } else { 244 } else {
245 BindStatic<N, false>(scheduler); 245 BindStatic<N, false>(scheduler);
@@ -254,18 +254,14 @@ private:
254 } 254 }
255 255
256 std::array<VkBuffer, N> buffers; 256 std::array<VkBuffer, N> buffers;
257 std::transform(vertex.buffer_ptrs.begin(), vertex.buffer_ptrs.begin() + N, buffers.begin(),
258 [](const auto ptr) { return *ptr; });
259
260 std::array<VkDeviceSize, N> offsets; 257 std::array<VkDeviceSize, N> offsets;
258 std::copy(vertex.buffers.begin(), vertex.buffers.begin() + N, buffers.begin());
261 std::copy(vertex.offsets.begin(), vertex.offsets.begin() + N, offsets.begin()); 259 std::copy(vertex.offsets.begin(), vertex.offsets.begin() + N, offsets.begin());
262 260
263 if constexpr (is_indexed) { 261 if constexpr (is_indexed) {
264 // Indexed draw 262 // Indexed draw
265 scheduler.Record([buffers, offsets, index_buffer = *index.buffer, 263 scheduler.Record([buffers, offsets, index = index](vk::CommandBuffer cmdbuf) {
266 index_offset = index.offset, 264 cmdbuf.BindIndexBuffer(index.buffer, index.offset, index.type);
267 index_type = index.type](vk::CommandBuffer cmdbuf) {
268 cmdbuf.BindIndexBuffer(index_buffer, index_offset, index_type);
269 cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data()); 265 cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data());
270 }); 266 });
271 } else { 267 } else {
@@ -790,7 +786,7 @@ void RasterizerVulkan::BeginTransformFeedback() {
790 const std::size_t size = binding.buffer_size; 786 const std::size_t size = binding.buffer_size;
791 const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true); 787 const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true);
792 788
793 scheduler.Record([buffer = *buffer, offset = offset, size](vk::CommandBuffer cmdbuf) { 789 scheduler.Record([buffer = buffer, offset = offset, size](vk::CommandBuffer cmdbuf) {
794 cmdbuf.BindTransformFeedbackBuffersEXT(0, 1, &buffer, &offset, &size); 790 cmdbuf.BindTransformFeedbackBuffersEXT(0, 1, &buffer, &offset, &size);
795 cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr); 791 cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr);
796 }); 792 });
@@ -870,7 +866,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar
870 auto format = regs.index_array.format; 866 auto format = regs.index_array.format;
871 const bool is_uint8 = format == Maxwell::IndexFormat::UnsignedByte; 867 const bool is_uint8 = format == Maxwell::IndexFormat::UnsignedByte;
872 if (is_uint8 && !device.IsExtIndexTypeUint8Supported()) { 868 if (is_uint8 && !device.IsExtIndexTypeUint8Supported()) {
873 std::tie(buffer, offset) = uint8_pass.Assemble(params.num_vertices, *buffer, offset); 869 std::tie(buffer, offset) = uint8_pass.Assemble(params.num_vertices, buffer, offset);
874 format = Maxwell::IndexFormat::UnsignedShort; 870 format = Maxwell::IndexFormat::UnsignedShort;
875 } 871 }
876 872
@@ -1007,8 +1003,8 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
1007 const auto size = memory_manager.Read<u32>(address + 8); 1003 const auto size = memory_manager.Read<u32>(address + 8);
1008 1004
1009 if (size == 0) { 1005 if (size == 0) {
1010 // Sometimes global memory pointers don't have a proper size. Upload a dummy entry because 1006 // Sometimes global memory pointers don't have a proper size. Upload a dummy entry
1011 // Vulkan doesn't like empty buffers. 1007 // because Vulkan doesn't like empty buffers.
1012 constexpr std::size_t dummy_size = 4; 1008 constexpr std::size_t dummy_size = 4;
1013 const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size); 1009 const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size);
1014 update_descriptor_queue.AddBuffer(buffer, 0, dummy_size); 1010 update_descriptor_queue.AddBuffer(buffer, 0, dummy_size);
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
index 4bfec0077..681ecde98 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
@@ -35,12 +35,13 @@ void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template
35 payload.clear(); 35 payload.clear();
36 } 36 }
37 37
38 // TODO(Rodrigo): Rework to write the payload directly
38 const auto payload_start = payload.data() + payload.size(); 39 const auto payload_start = payload.data() + payload.size();
39 for (const auto& entry : entries) { 40 for (const auto& entry : entries) {
40 if (const auto image = std::get_if<VkDescriptorImageInfo>(&entry)) { 41 if (const auto image = std::get_if<VkDescriptorImageInfo>(&entry)) {
41 payload.push_back(*image); 42 payload.push_back(*image);
42 } else if (const auto buffer = std::get_if<Buffer>(&entry)) { 43 } else if (const auto buffer = std::get_if<VkDescriptorBufferInfo>(&entry)) {
43 payload.emplace_back(*buffer->buffer, buffer->offset, buffer->size); 44 payload.push_back(*buffer);
44 } else if (const auto texel = std::get_if<VkBufferView>(&entry)) { 45 } else if (const auto texel = std::get_if<VkBufferView>(&entry)) {
45 payload.push_back(*texel); 46 payload.push_back(*texel);
46 } else { 47 } else {
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index a9e3d5dba..6ba2c9997 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -18,12 +18,11 @@ class VKScheduler;
18 18
19class DescriptorUpdateEntry { 19class DescriptorUpdateEntry {
20public: 20public:
21 explicit DescriptorUpdateEntry() : image{} {} 21 explicit DescriptorUpdateEntry() {}
22 22
23 DescriptorUpdateEntry(VkDescriptorImageInfo image) : image{image} {} 23 DescriptorUpdateEntry(VkDescriptorImageInfo image) : image{image} {}
24 24
25 DescriptorUpdateEntry(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size) 25 DescriptorUpdateEntry(VkDescriptorBufferInfo buffer) : buffer{buffer} {}
26 : buffer{buffer, offset, size} {}
27 26
28 DescriptorUpdateEntry(VkBufferView texel_buffer) : texel_buffer{texel_buffer} {} 27 DescriptorUpdateEntry(VkBufferView texel_buffer) : texel_buffer{texel_buffer} {}
29 28
@@ -54,8 +53,8 @@ public:
54 entries.emplace_back(VkDescriptorImageInfo{{}, image_view, {}}); 53 entries.emplace_back(VkDescriptorImageInfo{{}, image_view, {}});
55 } 54 }
56 55
57 void AddBuffer(const VkBuffer* buffer, u64 offset, std::size_t size) { 56 void AddBuffer(VkBuffer buffer, u64 offset, std::size_t size) {
58 entries.push_back(Buffer{buffer, offset, size}); 57 entries.emplace_back(VkDescriptorBufferInfo{buffer, offset, size});
59 } 58 }
60 59
61 void AddTexelBuffer(VkBufferView texel_buffer) { 60 void AddTexelBuffer(VkBufferView texel_buffer) {
@@ -67,12 +66,7 @@ public:
67 } 66 }
68 67
69private: 68private:
70 struct Buffer { 69 using Variant = std::variant<VkDescriptorImageInfo, VkDescriptorBufferInfo, VkBufferView>;
71 const VkBuffer* buffer = nullptr;
72 u64 offset = 0;
73 std::size_t size = 0;
74 };
75 using Variant = std::variant<VkDescriptorImageInfo, Buffer, VkBufferView>;
76 70
77 const VKDevice& device; 71 const VKDevice& device;
78 VKScheduler& scheduler; 72 VKScheduler& scheduler;