summaryrefslogtreecommitdiff
path: root/src/video_core/renderer_vulkan
diff options
context:
space:
mode:
authorGravatar ReinUsesLisp2020-04-04 02:54:55 -0300
committerGravatar ReinUsesLisp2020-04-16 02:33:34 -0300
commit090fd3fefab9ef251e6e5bf4011280a657340f2a (patch)
treeacfdc14d7e948ae296859d3d112f356f3279680c /src/video_core/renderer_vulkan
parentMerge pull request #3636 from ReinUsesLisp/drop-vk-hpp (diff)
downloadyuzu-090fd3fefab9ef251e6e5bf4011280a657340f2a.tar.gz
yuzu-090fd3fefab9ef251e6e5bf4011280a657340f2a.tar.xz
yuzu-090fd3fefab9ef251e6e5bf4011280a657340f2a.zip
buffer_cache: Return handles instead of pointer to handles
The original idea of returning pointers is that handles can be moved. The problem is that the implementation didn't take that in mind and made everything harder to work with. This commit drops pointer to handles and returns the handles themselves. While it is still true that handles can be invalidated, this way we get an old handle instead of a dangling pointer. This problem can be solved in the future with sparse buffers.
Diffstat (limited to 'src/video_core/renderer_vulkan')
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp12
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h10
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp16
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp30
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.cpp5
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h16
7 files changed, 42 insertions, 51 deletions
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 0d167afbd..81e1de2be 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -74,18 +74,18 @@ Buffer VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
74 return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size); 74 return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size);
75} 75}
76 76
77const VkBuffer* VKBufferCache::ToHandle(const Buffer& buffer) { 77VkBuffer VKBufferCache::ToHandle(const Buffer& buffer) {
78 return buffer->GetHandle(); 78 return buffer->GetHandle();
79} 79}
80 80
81const VkBuffer* VKBufferCache::GetEmptyBuffer(std::size_t size) { 81VkBuffer VKBufferCache::GetEmptyBuffer(std::size_t size) {
82 size = std::max(size, std::size_t(4)); 82 size = std::max(size, std::size_t(4));
83 const auto& empty = staging_pool.GetUnusedBuffer(size, false); 83 const auto& empty = staging_pool.GetUnusedBuffer(size, false);
84 scheduler.RequestOutsideRenderPassOperationContext(); 84 scheduler.RequestOutsideRenderPassOperationContext();
85 scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) { 85 scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) {
86 cmdbuf.FillBuffer(buffer, 0, size, 0); 86 cmdbuf.FillBuffer(buffer, 0, size, 0);
87 }); 87 });
88 return empty.handle.address(); 88 return *empty.handle;
89} 89}
90 90
91void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 91void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
@@ -94,7 +94,7 @@ void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, st
94 std::memcpy(staging.commit->Map(size), data, size); 94 std::memcpy(staging.commit->Map(size), data, size);
95 95
96 scheduler.RequestOutsideRenderPassOperationContext(); 96 scheduler.RequestOutsideRenderPassOperationContext();
97 scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset, 97 scheduler.Record([staging = *staging.handle, buffer = buffer->GetHandle(), offset,
98 size](vk::CommandBuffer cmdbuf) { 98 size](vk::CommandBuffer cmdbuf) {
99 cmdbuf.CopyBuffer(staging, buffer, VkBufferCopy{0, offset, size}); 99 cmdbuf.CopyBuffer(staging, buffer, VkBufferCopy{0, offset, size});
100 100
@@ -117,7 +117,7 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset,
117 u8* data) { 117 u8* data) {
118 const auto& staging = staging_pool.GetUnusedBuffer(size, true); 118 const auto& staging = staging_pool.GetUnusedBuffer(size, true);
119 scheduler.RequestOutsideRenderPassOperationContext(); 119 scheduler.RequestOutsideRenderPassOperationContext();
120 scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset, 120 scheduler.Record([staging = *staging.handle, buffer = buffer->GetHandle(), offset,
121 size](vk::CommandBuffer cmdbuf) { 121 size](vk::CommandBuffer cmdbuf) {
122 VkBufferMemoryBarrier barrier; 122 VkBufferMemoryBarrier barrier;
123 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; 123 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
@@ -144,7 +144,7 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset,
144void VKBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset, 144void VKBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
145 std::size_t dst_offset, std::size_t size) { 145 std::size_t dst_offset, std::size_t size) {
146 scheduler.RequestOutsideRenderPassOperationContext(); 146 scheduler.RequestOutsideRenderPassOperationContext();
147 scheduler.Record([src_buffer = *src->GetHandle(), dst_buffer = *dst->GetHandle(), src_offset, 147 scheduler.Record([src_buffer = src->GetHandle(), dst_buffer = dst->GetHandle(), src_offset,
148 dst_offset, size](vk::CommandBuffer cmdbuf) { 148 dst_offset, size](vk::CommandBuffer cmdbuf) {
149 cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size}); 149 cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size});
150 150
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index d3c23da98..3cd2e2774 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -33,8 +33,8 @@ public:
33 VAddr cpu_addr, std::size_t size); 33 VAddr cpu_addr, std::size_t size);
34 ~CachedBufferBlock(); 34 ~CachedBufferBlock();
35 35
36 const VkBuffer* GetHandle() const { 36 VkBuffer GetHandle() const {
37 return buffer.handle.address(); 37 return *buffer.handle;
38 } 38 }
39 39
40private: 40private:
@@ -50,15 +50,15 @@ public:
50 VKScheduler& scheduler, VKStagingBufferPool& staging_pool); 50 VKScheduler& scheduler, VKStagingBufferPool& staging_pool);
51 ~VKBufferCache(); 51 ~VKBufferCache();
52 52
53 const VkBuffer* GetEmptyBuffer(std::size_t size) override; 53 VkBuffer GetEmptyBuffer(std::size_t size) override;
54 54
55protected: 55protected:
56 VkBuffer ToHandle(const Buffer& buffer) override;
57
56 void WriteBarrier() override {} 58 void WriteBarrier() override {}
57 59
58 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; 60 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override;
59 61
60 const VkBuffer* ToHandle(const Buffer& buffer) override;
61
62 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 62 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
63 const u8* data) override; 63 const u8* data) override;
64 64
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 9d92305f4..878a78755 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -343,13 +343,13 @@ QuadArrayPass::QuadArrayPass(const VKDevice& device, VKScheduler& scheduler,
343 343
344QuadArrayPass::~QuadArrayPass() = default; 344QuadArrayPass::~QuadArrayPass() = default;
345 345
346std::pair<const VkBuffer*, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) { 346std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) {
347 const u32 num_triangle_vertices = num_vertices * 6 / 4; 347 const u32 num_triangle_vertices = num_vertices * 6 / 4;
348 const std::size_t staging_size = num_triangle_vertices * sizeof(u32); 348 const std::size_t staging_size = num_triangle_vertices * sizeof(u32);
349 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 349 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
350 350
351 update_descriptor_queue.Acquire(); 351 update_descriptor_queue.Acquire();
352 update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size); 352 update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
353 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); 353 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
354 354
355 scheduler.RequestOutsideRenderPassOperationContext(); 355 scheduler.RequestOutsideRenderPassOperationContext();
@@ -377,7 +377,7 @@ std::pair<const VkBuffer*, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertice
377 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 377 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
378 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {}); 378 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {});
379 }); 379 });
380 return {buffer.handle.address(), 0}; 380 return {*buffer.handle, 0};
381} 381}
382 382
383Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler, 383Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler,
@@ -391,14 +391,14 @@ Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler,
391 391
392Uint8Pass::~Uint8Pass() = default; 392Uint8Pass::~Uint8Pass() = default;
393 393
394std::pair<const VkBuffer*, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer, 394std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer,
395 u64 src_offset) { 395 u64 src_offset) {
396 const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16)); 396 const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16));
397 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 397 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
398 398
399 update_descriptor_queue.Acquire(); 399 update_descriptor_queue.Acquire();
400 update_descriptor_queue.AddBuffer(&src_buffer, src_offset, num_vertices); 400 update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
401 update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size); 401 update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
402 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); 402 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
403 403
404 scheduler.RequestOutsideRenderPassOperationContext(); 404 scheduler.RequestOutsideRenderPassOperationContext();
@@ -422,7 +422,7 @@ std::pair<const VkBuffer*, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer s
422 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 422 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
423 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); 423 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
424 }); 424 });
425 return {buffer.handle.address(), 0}; 425 return {*buffer.handle, 0};
426} 426}
427 427
428} // namespace Vulkan 428} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index c62516bff..ec80c8683 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -50,7 +50,7 @@ public:
50 VKUpdateDescriptorQueue& update_descriptor_queue); 50 VKUpdateDescriptorQueue& update_descriptor_queue);
51 ~QuadArrayPass(); 51 ~QuadArrayPass();
52 52
53 std::pair<const VkBuffer*, VkDeviceSize> Assemble(u32 num_vertices, u32 first); 53 std::pair<VkBuffer, VkDeviceSize> Assemble(u32 num_vertices, u32 first);
54 54
55private: 55private:
56 VKScheduler& scheduler; 56 VKScheduler& scheduler;
@@ -65,7 +65,7 @@ public:
65 VKUpdateDescriptorQueue& update_descriptor_queue); 65 VKUpdateDescriptorQueue& update_descriptor_queue);
66 ~Uint8Pass(); 66 ~Uint8Pass();
67 67
68 std::pair<const VkBuffer*, u64> Assemble(u32 num_vertices, VkBuffer src_buffer, u64 src_offset); 68 std::pair<VkBuffer, u64> Assemble(u32 num_vertices, VkBuffer src_buffer, u64 src_offset);
69 69
70private: 70private:
71 VKScheduler& scheduler; 71 VKScheduler& scheduler;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 33cbc0bb6..ab281c9e2 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -134,13 +134,13 @@ Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry
134 134
135class BufferBindings final { 135class BufferBindings final {
136public: 136public:
137 void AddVertexBinding(const VkBuffer* buffer, VkDeviceSize offset) { 137 void AddVertexBinding(VkBuffer buffer, VkDeviceSize offset) {
138 vertex.buffer_ptrs[vertex.num_buffers] = buffer; 138 vertex.buffers[vertex.num_buffers] = buffer;
139 vertex.offsets[vertex.num_buffers] = offset; 139 vertex.offsets[vertex.num_buffers] = offset;
140 ++vertex.num_buffers; 140 ++vertex.num_buffers;
141 } 141 }
142 142
143 void SetIndexBinding(const VkBuffer* buffer, VkDeviceSize offset, VkIndexType type) { 143 void SetIndexBinding(VkBuffer buffer, VkDeviceSize offset, VkIndexType type) {
144 index.buffer = buffer; 144 index.buffer = buffer;
145 index.offset = offset; 145 index.offset = offset;
146 index.type = type; 146 index.type = type;
@@ -224,19 +224,19 @@ private:
224 // Some of these fields are intentionally left uninitialized to avoid initializing them twice. 224 // Some of these fields are intentionally left uninitialized to avoid initializing them twice.
225 struct { 225 struct {
226 std::size_t num_buffers = 0; 226 std::size_t num_buffers = 0;
227 std::array<const VkBuffer*, Maxwell::NumVertexArrays> buffer_ptrs; 227 std::array<VkBuffer, Maxwell::NumVertexArrays> buffers;
228 std::array<VkDeviceSize, Maxwell::NumVertexArrays> offsets; 228 std::array<VkDeviceSize, Maxwell::NumVertexArrays> offsets;
229 } vertex; 229 } vertex;
230 230
231 struct { 231 struct {
232 const VkBuffer* buffer = nullptr; 232 VkBuffer buffer = nullptr;
233 VkDeviceSize offset; 233 VkDeviceSize offset;
234 VkIndexType type; 234 VkIndexType type;
235 } index; 235 } index;
236 236
237 template <std::size_t N> 237 template <std::size_t N>
238 void BindStatic(VKScheduler& scheduler) const { 238 void BindStatic(VKScheduler& scheduler) const {
239 if (index.buffer != nullptr) { 239 if (index.buffer) {
240 BindStatic<N, true>(scheduler); 240 BindStatic<N, true>(scheduler);
241 } else { 241 } else {
242 BindStatic<N, false>(scheduler); 242 BindStatic<N, false>(scheduler);
@@ -251,18 +251,14 @@ private:
251 } 251 }
252 252
253 std::array<VkBuffer, N> buffers; 253 std::array<VkBuffer, N> buffers;
254 std::transform(vertex.buffer_ptrs.begin(), vertex.buffer_ptrs.begin() + N, buffers.begin(),
255 [](const auto ptr) { return *ptr; });
256
257 std::array<VkDeviceSize, N> offsets; 254 std::array<VkDeviceSize, N> offsets;
255 std::copy(vertex.buffers.begin(), vertex.buffers.begin() + N, buffers.begin());
258 std::copy(vertex.offsets.begin(), vertex.offsets.begin() + N, offsets.begin()); 256 std::copy(vertex.offsets.begin(), vertex.offsets.begin() + N, offsets.begin());
259 257
260 if constexpr (is_indexed) { 258 if constexpr (is_indexed) {
261 // Indexed draw 259 // Indexed draw
262 scheduler.Record([buffers, offsets, index_buffer = *index.buffer, 260 scheduler.Record([buffers, offsets, index = index](vk::CommandBuffer cmdbuf) {
263 index_offset = index.offset, 261 cmdbuf.BindIndexBuffer(index.buffer, index.offset, index.type);
264 index_type = index.type](vk::CommandBuffer cmdbuf) {
265 cmdbuf.BindIndexBuffer(index_buffer, index_offset, index_type);
266 cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data()); 262 cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data());
267 }); 263 });
268 } else { 264 } else {
@@ -787,7 +783,7 @@ void RasterizerVulkan::BeginTransformFeedback() {
787 const std::size_t size = binding.buffer_size; 783 const std::size_t size = binding.buffer_size;
788 const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true); 784 const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true);
789 785
790 scheduler.Record([buffer = *buffer, offset = offset, size](vk::CommandBuffer cmdbuf) { 786 scheduler.Record([buffer = buffer, offset = offset, size](vk::CommandBuffer cmdbuf) {
791 cmdbuf.BindTransformFeedbackBuffersEXT(0, 1, &buffer, &offset, &size); 787 cmdbuf.BindTransformFeedbackBuffersEXT(0, 1, &buffer, &offset, &size);
792 cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr); 788 cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr);
793 }); 789 });
@@ -867,7 +863,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar
867 auto format = regs.index_array.format; 863 auto format = regs.index_array.format;
868 const bool is_uint8 = format == Maxwell::IndexFormat::UnsignedByte; 864 const bool is_uint8 = format == Maxwell::IndexFormat::UnsignedByte;
869 if (is_uint8 && !device.IsExtIndexTypeUint8Supported()) { 865 if (is_uint8 && !device.IsExtIndexTypeUint8Supported()) {
870 std::tie(buffer, offset) = uint8_pass.Assemble(params.num_vertices, *buffer, offset); 866 std::tie(buffer, offset) = uint8_pass.Assemble(params.num_vertices, buffer, offset);
871 format = Maxwell::IndexFormat::UnsignedShort; 867 format = Maxwell::IndexFormat::UnsignedShort;
872 } 868 }
873 869
@@ -1004,8 +1000,8 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
1004 const auto size = memory_manager.Read<u32>(address + 8); 1000 const auto size = memory_manager.Read<u32>(address + 8);
1005 1001
1006 if (size == 0) { 1002 if (size == 0) {
1007 // Sometimes global memory pointers don't have a proper size. Upload a dummy entry because 1003 // Sometimes global memory pointers don't have a proper size. Upload a dummy entry
1008 // Vulkan doesn't like empty buffers. 1004 // because Vulkan doesn't like empty buffers.
1009 constexpr std::size_t dummy_size = 4; 1005 constexpr std::size_t dummy_size = 4;
1010 const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size); 1006 const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size);
1011 update_descriptor_queue.AddBuffer(buffer, 0, dummy_size); 1007 update_descriptor_queue.AddBuffer(buffer, 0, dummy_size);
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
index 4bfec0077..681ecde98 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
@@ -35,12 +35,13 @@ void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template
35 payload.clear(); 35 payload.clear();
36 } 36 }
37 37
38 // TODO(Rodrigo): Rework to write the payload directly
38 const auto payload_start = payload.data() + payload.size(); 39 const auto payload_start = payload.data() + payload.size();
39 for (const auto& entry : entries) { 40 for (const auto& entry : entries) {
40 if (const auto image = std::get_if<VkDescriptorImageInfo>(&entry)) { 41 if (const auto image = std::get_if<VkDescriptorImageInfo>(&entry)) {
41 payload.push_back(*image); 42 payload.push_back(*image);
42 } else if (const auto buffer = std::get_if<Buffer>(&entry)) { 43 } else if (const auto buffer = std::get_if<VkDescriptorBufferInfo>(&entry)) {
43 payload.emplace_back(*buffer->buffer, buffer->offset, buffer->size); 44 payload.push_back(*buffer);
44 } else if (const auto texel = std::get_if<VkBufferView>(&entry)) { 45 } else if (const auto texel = std::get_if<VkBufferView>(&entry)) {
45 payload.push_back(*texel); 46 payload.push_back(*texel);
46 } else { 47 } else {
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index a9e3d5dba..6ba2c9997 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -18,12 +18,11 @@ class VKScheduler;
18 18
19class DescriptorUpdateEntry { 19class DescriptorUpdateEntry {
20public: 20public:
21 explicit DescriptorUpdateEntry() : image{} {} 21 explicit DescriptorUpdateEntry() {}
22 22
23 DescriptorUpdateEntry(VkDescriptorImageInfo image) : image{image} {} 23 DescriptorUpdateEntry(VkDescriptorImageInfo image) : image{image} {}
24 24
25 DescriptorUpdateEntry(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size) 25 DescriptorUpdateEntry(VkDescriptorBufferInfo buffer) : buffer{buffer} {}
26 : buffer{buffer, offset, size} {}
27 26
28 DescriptorUpdateEntry(VkBufferView texel_buffer) : texel_buffer{texel_buffer} {} 27 DescriptorUpdateEntry(VkBufferView texel_buffer) : texel_buffer{texel_buffer} {}
29 28
@@ -54,8 +53,8 @@ public:
54 entries.emplace_back(VkDescriptorImageInfo{{}, image_view, {}}); 53 entries.emplace_back(VkDescriptorImageInfo{{}, image_view, {}});
55 } 54 }
56 55
57 void AddBuffer(const VkBuffer* buffer, u64 offset, std::size_t size) { 56 void AddBuffer(VkBuffer buffer, u64 offset, std::size_t size) {
58 entries.push_back(Buffer{buffer, offset, size}); 57 entries.emplace_back(VkDescriptorBufferInfo{buffer, offset, size});
59 } 58 }
60 59
61 void AddTexelBuffer(VkBufferView texel_buffer) { 60 void AddTexelBuffer(VkBufferView texel_buffer) {
@@ -67,12 +66,7 @@ public:
67 } 66 }
68 67
69private: 68private:
70 struct Buffer { 69 using Variant = std::variant<VkDescriptorImageInfo, VkDescriptorBufferInfo, VkBufferView>;
71 const VkBuffer* buffer = nullptr;
72 u64 offset = 0;
73 std::size_t size = 0;
74 };
75 using Variant = std::variant<VkDescriptorImageInfo, Buffer, VkBufferView>;
76 70
77 const VKDevice& device; 71 const VKDevice& device;
78 VKScheduler& scheduler; 72 VKScheduler& scheduler;