summaryrefslogtreecommitdiff
path: root/src/video_core
diff options
context:
space:
mode:
authorGravatar ReinUsesLisp2020-12-30 22:58:05 -0300
committerGravatar ReinUsesLisp2021-01-15 16:19:36 -0300
commite996f1ad09dcfe1e9eaa0273b710560a44d9a1da (patch)
tree66efbbace2b1bdcf287edb48b9b2549ef7cbc3bb /src/video_core
parentMerge pull request #5355 from lioncash/timer (diff)
downloadyuzu-e996f1ad09dcfe1e9eaa0273b710560a44d9a1da.tar.gz
yuzu-e996f1ad09dcfe1e9eaa0273b710560a44d9a1da.tar.xz
yuzu-e996f1ad09dcfe1e9eaa0273b710560a44d9a1da.zip
vk_memory_manager: Improve memory manager and its API
Fix a bug where the memory allocator could leave gaps between commits. To fix this the allocation algorithm was reworked, although it's still short in number of lines of code. Rework the allocation API to self-contained movable objects instead of naively using an unique_ptr to do the job for us. Remove the VK prefix.
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp12
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp30
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h15
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp32
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h14
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.cpp237
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h139
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp106
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h49
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp6
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h13
13 files changed, 318 insertions, 343 deletions
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index 5e184eb42..d8261526a 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -150,8 +150,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
150 SetUniformData(data, framebuffer); 150 SetUniformData(data, framebuffer);
151 SetVertexData(data, framebuffer); 151 SetVertexData(data, framebuffer);
152 152
153 auto map = buffer_commit->Map(); 153 const std::span<u8> map = buffer_commit.Map();
154 std::memcpy(map.Address(), &data, sizeof(data)); 154 std::memcpy(map.data(), &data, sizeof(data));
155 155
156 if (!use_accelerated) { 156 if (!use_accelerated) {
157 const u64 image_offset = GetRawImageOffset(framebuffer, image_index); 157 const u64 image_offset = GetRawImageOffset(framebuffer, image_index);
@@ -165,8 +165,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
165 constexpr u32 block_height_log2 = 4; 165 constexpr u32 block_height_log2 = 4;
166 const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer); 166 const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer);
167 Tegra::Texture::UnswizzleTexture( 167 Tegra::Texture::UnswizzleTexture(
168 std::span(map.Address() + image_offset, size_bytes), std::span(host_ptr, size_bytes), 168 map.subspan(image_offset, size_bytes), std::span(host_ptr, size_bytes), bytes_per_pixel,
169 bytes_per_pixel, framebuffer.width, framebuffer.height, 1, block_height_log2, 0); 169 framebuffer.width, framebuffer.height, 1, block_height_log2, 0);
170 170
171 const VkBufferImageCopy copy{ 171 const VkBufferImageCopy copy{
172 .bufferOffset = image_offset, 172 .bufferOffset = image_offset,
@@ -224,8 +224,6 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
224 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier); 224 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier);
225 }); 225 });
226 } 226 }
227 map.Release();
228
229 scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index], 227 scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index],
230 descriptor_set = descriptor_sets[image_index], buffer = *buffer, 228 descriptor_set = descriptor_sets[image_index], buffer = *buffer,
231 size = swapchain.GetSize(), pipeline = *pipeline, 229 size = swapchain.GetSize(), pipeline = *pipeline,
@@ -642,7 +640,7 @@ void VKBlitScreen::ReleaseRawImages() {
642 raw_images.clear(); 640 raw_images.clear();
643 raw_buffer_commits.clear(); 641 raw_buffer_commits.clear();
644 buffer.reset(); 642 buffer.reset();
645 buffer_commit.reset(); 643 buffer_commit = MemoryCommit{};
646} 644}
647 645
648void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) { 646void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) {
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index 69ed61770..1aa8e3182 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -104,14 +104,14 @@ private:
104 vk::Sampler sampler; 104 vk::Sampler sampler;
105 105
106 vk::Buffer buffer; 106 vk::Buffer buffer;
107 VKMemoryCommit buffer_commit; 107 MemoryCommit buffer_commit;
108 108
109 std::vector<u64> resource_ticks; 109 std::vector<u64> resource_ticks;
110 110
111 std::vector<vk::Semaphore> semaphores; 111 std::vector<vk::Semaphore> semaphores;
112 std::vector<vk::Image> raw_images; 112 std::vector<vk::Image> raw_images;
113 std::vector<vk::ImageView> raw_image_views; 113 std::vector<vk::ImageView> raw_image_views;
114 std::vector<VKMemoryCommit> raw_buffer_commits; 114 std::vector<MemoryCommit> raw_buffer_commits;
115 u32 raw_width = 0; 115 u32 raw_width = 0;
116 u32 raw_height = 0; 116 u32 raw_height = 0;
117}; 117};
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 58c710344..94c2e101b 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -37,10 +37,10 @@ constexpr VkAccessFlags TRANSFORM_FEEDBACK_WRITE_ACCESS =
37} // Anonymous namespace 37} // Anonymous namespace
38 38
39Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKScheduler& scheduler_, 39Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKScheduler& scheduler_,
40 VKStagingBufferPool& staging_pool_, VAddr cpu_addr_, std::size_t size_) 40 StagingBufferPool& staging_pool_, VAddr cpu_addr_, std::size_t size_)
41 : BufferBlock{cpu_addr_, size_}, device{device_}, scheduler{scheduler_}, staging_pool{ 41 : BufferBlock{cpu_addr_, size_}, device{device_}, scheduler{scheduler_}, staging_pool{
42 staging_pool_} { 42 staging_pool_} {
43 const VkBufferCreateInfo ci{ 43 buffer = device.GetLogical().CreateBuffer(VkBufferCreateInfo{
44 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, 44 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
45 .pNext = nullptr, 45 .pNext = nullptr,
46 .flags = 0, 46 .flags = 0,
@@ -49,22 +49,20 @@ Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKSchedul
49 .sharingMode = VK_SHARING_MODE_EXCLUSIVE, 49 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
50 .queueFamilyIndexCount = 0, 50 .queueFamilyIndexCount = 0,
51 .pQueueFamilyIndices = nullptr, 51 .pQueueFamilyIndices = nullptr,
52 }; 52 });
53 53 commit = memory_manager.Commit(buffer, false);
54 buffer.handle = device.GetLogical().CreateBuffer(ci);
55 buffer.commit = memory_manager.Commit(buffer.handle, false);
56} 54}
57 55
58Buffer::~Buffer() = default; 56Buffer::~Buffer() = default;
59 57
60void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) { 58void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) {
61 const auto& staging = staging_pool.GetUnusedBuffer(data_size, true); 59 const auto& staging = staging_pool.Request(data_size, true);
62 std::memcpy(staging.commit->Map(data_size), data, data_size); 60 std::memcpy(staging.mapped_span.data(), data, data_size);
63 61
64 scheduler.RequestOutsideRenderPassOperationContext(); 62 scheduler.RequestOutsideRenderPassOperationContext();
65 63
66 const VkBuffer handle = Handle(); 64 const VkBuffer handle = Handle();
67 scheduler.Record([staging = *staging.handle, handle, offset, data_size, 65 scheduler.Record([staging = staging.buffer, handle, offset, data_size,
68 &device = device](vk::CommandBuffer cmdbuf) { 66 &device = device](vk::CommandBuffer cmdbuf) {
69 const VkBufferMemoryBarrier read_barrier{ 67 const VkBufferMemoryBarrier read_barrier{
70 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, 68 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
@@ -100,12 +98,12 @@ void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) {
100} 98}
101 99
102void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) { 100void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) {
103 const auto& staging = staging_pool.GetUnusedBuffer(data_size, true); 101 auto staging = staging_pool.Request(data_size, true);
104 scheduler.RequestOutsideRenderPassOperationContext(); 102 scheduler.RequestOutsideRenderPassOperationContext();
105 103
106 const VkBuffer handle = Handle(); 104 const VkBuffer handle = Handle();
107 scheduler.Record( 105 scheduler.Record(
108 [staging = *staging.handle, handle, offset, data_size](vk::CommandBuffer cmdbuf) { 106 [staging = staging.buffer, handle, offset, data_size](vk::CommandBuffer cmdbuf) {
109 const VkBufferMemoryBarrier barrier{ 107 const VkBufferMemoryBarrier barrier{
110 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, 108 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
111 .pNext = nullptr, 109 .pNext = nullptr,
@@ -126,7 +124,7 @@ void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) {
126 }); 124 });
127 scheduler.Finish(); 125 scheduler.Finish();
128 126
129 std::memcpy(data, staging.commit->Map(data_size), data_size); 127 std::memcpy(data, staging.mapped_span.data(), data_size);
130} 128}
131 129
132void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset, 130void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset,
@@ -166,7 +164,7 @@ VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer_,
166 Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, 164 Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
167 const Device& device_, VKMemoryManager& memory_manager_, 165 const Device& device_, VKMemoryManager& memory_manager_,
168 VKScheduler& scheduler_, VKStreamBuffer& stream_buffer_, 166 VKScheduler& scheduler_, VKStreamBuffer& stream_buffer_,
169 VKStagingBufferPool& staging_pool_) 167 StagingBufferPool& staging_pool_)
170 : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer_, gpu_memory_, 168 : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer_, gpu_memory_,
171 cpu_memory_, stream_buffer_}, 169 cpu_memory_, stream_buffer_},
172 device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{ 170 device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{
@@ -181,12 +179,12 @@ std::shared_ptr<Buffer> VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t s
181 179
182VKBufferCache::BufferInfo VKBufferCache::GetEmptyBuffer(std::size_t size) { 180VKBufferCache::BufferInfo VKBufferCache::GetEmptyBuffer(std::size_t size) {
183 size = std::max(size, std::size_t(4)); 181 size = std::max(size, std::size_t(4));
184 const auto& empty = staging_pool.GetUnusedBuffer(size, false); 182 const auto& empty = staging_pool.Request(size, false);
185 scheduler.RequestOutsideRenderPassOperationContext(); 183 scheduler.RequestOutsideRenderPassOperationContext();
186 scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) { 184 scheduler.Record([size, buffer = empty.buffer](vk::CommandBuffer cmdbuf) {
187 cmdbuf.FillBuffer(buffer, 0, size, 0); 185 cmdbuf.FillBuffer(buffer, 0, size, 0);
188 }); 186 });
189 return {*empty.handle, 0, 0}; 187 return {empty.buffer, 0, 0};
190} 188}
191 189
192} // namespace Vulkan 190} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 1c39aed34..e54c107f2 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -10,19 +10,19 @@
10#include "video_core/buffer_cache/buffer_cache.h" 10#include "video_core/buffer_cache/buffer_cache.h"
11#include "video_core/renderer_vulkan/vk_memory_manager.h" 11#include "video_core/renderer_vulkan/vk_memory_manager.h"
12#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" 12#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
13#include "video_core/renderer_vulkan/vk_memory_manager.h"
13#include "video_core/renderer_vulkan/vk_stream_buffer.h" 14#include "video_core/renderer_vulkan/vk_stream_buffer.h"
14#include "video_core/vulkan_common/vulkan_wrapper.h" 15#include "video_core/vulkan_common/vulkan_wrapper.h"
15 16
16namespace Vulkan { 17namespace Vulkan {
17 18
18class Device; 19class Device;
19class VKMemoryManager;
20class VKScheduler; 20class VKScheduler;
21 21
22class Buffer final : public VideoCommon::BufferBlock { 22class Buffer final : public VideoCommon::BufferBlock {
23public: 23public:
24 explicit Buffer(const Device& device, VKMemoryManager& memory_manager, VKScheduler& scheduler, 24 explicit Buffer(const Device& device, VKMemoryManager& memory_manager, VKScheduler& scheduler,
25 VKStagingBufferPool& staging_pool, VAddr cpu_addr_, std::size_t size_); 25 StagingBufferPool& staging_pool, VAddr cpu_addr_, std::size_t size_);
26 ~Buffer(); 26 ~Buffer();
27 27
28 void Upload(std::size_t offset, std::size_t data_size, const u8* data); 28 void Upload(std::size_t offset, std::size_t data_size, const u8* data);
@@ -33,7 +33,7 @@ public:
33 std::size_t copy_size); 33 std::size_t copy_size);
34 34
35 VkBuffer Handle() const { 35 VkBuffer Handle() const {
36 return *buffer.handle; 36 return *buffer;
37 } 37 }
38 38
39 u64 Address() const { 39 u64 Address() const {
@@ -43,9 +43,10 @@ public:
43private: 43private:
44 const Device& device; 44 const Device& device;
45 VKScheduler& scheduler; 45 VKScheduler& scheduler;
46 VKStagingBufferPool& staging_pool; 46 StagingBufferPool& staging_pool;
47 47
48 VKBuffer buffer; 48 vk::Buffer buffer;
49 MemoryCommit commit;
49}; 50};
50 51
51class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> { 52class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> {
@@ -54,7 +55,7 @@ public:
54 Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, 55 Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory,
55 const Device& device, VKMemoryManager& memory_manager, 56 const Device& device, VKMemoryManager& memory_manager,
56 VKScheduler& scheduler, VKStreamBuffer& stream_buffer, 57 VKScheduler& scheduler, VKStreamBuffer& stream_buffer,
57 VKStagingBufferPool& staging_pool); 58 StagingBufferPool& staging_pool);
58 ~VKBufferCache(); 59 ~VKBufferCache();
59 60
60 BufferInfo GetEmptyBuffer(std::size_t size) override; 61 BufferInfo GetEmptyBuffer(std::size_t size) override;
@@ -66,7 +67,7 @@ private:
66 const Device& device; 67 const Device& device;
67 VKMemoryManager& memory_manager; 68 VKMemoryManager& memory_manager;
68 VKScheduler& scheduler; 69 VKScheduler& scheduler;
69 VKStagingBufferPool& staging_pool; 70 StagingBufferPool& staging_pool;
70}; 71};
71 72
72} // namespace Vulkan 73} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 02a6d54b7..d38087f41 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -164,7 +164,7 @@ VkDescriptorSet VKComputePass::CommitDescriptorSet(
164 164
165QuadArrayPass::QuadArrayPass(const Device& device_, VKScheduler& scheduler_, 165QuadArrayPass::QuadArrayPass(const Device& device_, VKScheduler& scheduler_,
166 VKDescriptorPool& descriptor_pool_, 166 VKDescriptorPool& descriptor_pool_,
167 VKStagingBufferPool& staging_buffer_pool_, 167 StagingBufferPool& staging_buffer_pool_,
168 VKUpdateDescriptorQueue& update_descriptor_queue_) 168 VKUpdateDescriptorQueue& update_descriptor_queue_)
169 : VKComputePass(device_, descriptor_pool_, BuildQuadArrayPassDescriptorSetLayoutBinding(), 169 : VKComputePass(device_, descriptor_pool_, BuildQuadArrayPassDescriptorSetLayoutBinding(),
170 BuildQuadArrayPassDescriptorUpdateTemplateEntry(), 170 BuildQuadArrayPassDescriptorUpdateTemplateEntry(),
@@ -177,18 +177,18 @@ QuadArrayPass::~QuadArrayPass() = default;
177std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) { 177std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) {
178 const u32 num_triangle_vertices = (num_vertices / 4) * 6; 178 const u32 num_triangle_vertices = (num_vertices / 4) * 6;
179 const std::size_t staging_size = num_triangle_vertices * sizeof(u32); 179 const std::size_t staging_size = num_triangle_vertices * sizeof(u32);
180 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 180 const auto staging_ref = staging_buffer_pool.Request(staging_size, false);
181 181
182 update_descriptor_queue.Acquire(); 182 update_descriptor_queue.Acquire();
183 update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); 183 update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size);
184 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); 184 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
185 185
186 scheduler.RequestOutsideRenderPassOperationContext(); 186 scheduler.RequestOutsideRenderPassOperationContext();
187 187
188 ASSERT(num_vertices % 4 == 0); 188 ASSERT(num_vertices % 4 == 0);
189 const u32 num_quads = num_vertices / 4; 189 const u32 num_quads = num_vertices / 4;
190 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, num_quads, 190 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer,
191 first, set](vk::CommandBuffer cmdbuf) { 191 num_quads, first, set](vk::CommandBuffer cmdbuf) {
192 constexpr u32 dispatch_size = 1024; 192 constexpr u32 dispatch_size = 1024;
193 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); 193 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
194 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {}); 194 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
@@ -208,11 +208,11 @@ std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32
208 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 208 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
209 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {}); 209 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {});
210 }); 210 });
211 return {*buffer.handle, 0}; 211 return {staging_ref.buffer, 0};
212} 212}
213 213
214Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_, 214Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_,
215 VKDescriptorPool& descriptor_pool, VKStagingBufferPool& staging_buffer_pool_, 215 VKDescriptorPool& descriptor_pool, StagingBufferPool& staging_buffer_pool_,
216 VKUpdateDescriptorQueue& update_descriptor_queue_) 216 VKUpdateDescriptorQueue& update_descriptor_queue_)
217 : VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(), 217 : VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(),
218 BuildInputOutputDescriptorUpdateTemplate(), {}, VULKAN_UINT8_COMP_SPV), 218 BuildInputOutputDescriptorUpdateTemplate(), {}, VULKAN_UINT8_COMP_SPV),
@@ -224,15 +224,15 @@ Uint8Pass::~Uint8Pass() = default;
224std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer, 224std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer,
225 u64 src_offset) { 225 u64 src_offset) {
226 const u32 staging_size = static_cast<u32>(num_vertices * sizeof(u16)); 226 const u32 staging_size = static_cast<u32>(num_vertices * sizeof(u16));
227 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 227 const auto staging_ref = staging_buffer_pool.Request(staging_size, false);
228 228
229 update_descriptor_queue.Acquire(); 229 update_descriptor_queue.Acquire();
230 update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices); 230 update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
231 update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); 231 update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size);
232 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); 232 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
233 233
234 scheduler.RequestOutsideRenderPassOperationContext(); 234 scheduler.RequestOutsideRenderPassOperationContext();
235 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, 235 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer, set,
236 num_vertices](vk::CommandBuffer cmdbuf) { 236 num_vertices](vk::CommandBuffer cmdbuf) {
237 constexpr u32 dispatch_size = 1024; 237 constexpr u32 dispatch_size = 1024;
238 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); 238 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
@@ -252,12 +252,12 @@ std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buff
252 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 252 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
253 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); 253 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
254 }); 254 });
255 return {*buffer.handle, 0}; 255 return {staging_ref.buffer, 0};
256} 256}
257 257
258QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_, 258QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
259 VKDescriptorPool& descriptor_pool_, 259 VKDescriptorPool& descriptor_pool_,
260 VKStagingBufferPool& staging_buffer_pool_, 260 StagingBufferPool& staging_buffer_pool_,
261 VKUpdateDescriptorQueue& update_descriptor_queue_) 261 VKUpdateDescriptorQueue& update_descriptor_queue_)
262 : VKComputePass(device_, descriptor_pool_, BuildInputOutputDescriptorSetBindings(), 262 : VKComputePass(device_, descriptor_pool_, BuildInputOutputDescriptorSetBindings(),
263 BuildInputOutputDescriptorUpdateTemplate(), 263 BuildInputOutputDescriptorUpdateTemplate(),
@@ -286,15 +286,15 @@ std::pair<VkBuffer, u64> QuadIndexedPass::Assemble(
286 const u32 num_tri_vertices = (num_vertices / 4) * 6; 286 const u32 num_tri_vertices = (num_vertices / 4) * 6;
287 287
288 const std::size_t staging_size = num_tri_vertices * sizeof(u32); 288 const std::size_t staging_size = num_tri_vertices * sizeof(u32);
289 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 289 const auto staging_ref = staging_buffer_pool.Request(staging_size, false);
290 290
291 update_descriptor_queue.Acquire(); 291 update_descriptor_queue.Acquire();
292 update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size); 292 update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size);
293 update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); 293 update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size);
294 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); 294 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
295 295
296 scheduler.RequestOutsideRenderPassOperationContext(); 296 scheduler.RequestOutsideRenderPassOperationContext();
297 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, 297 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer, set,
298 num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) { 298 num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) {
299 static constexpr u32 dispatch_size = 1024; 299 static constexpr u32 dispatch_size = 1024;
300 const std::array push_constants = {base_vertex, index_shift}; 300 const std::array push_constants = {base_vertex, index_shift};
@@ -317,7 +317,7 @@ std::pair<VkBuffer, u64> QuadIndexedPass::Assemble(
317 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 317 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
318 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); 318 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
319 }); 319 });
320 return {*buffer.handle, 0}; 320 return {staging_ref.buffer, 0};
321} 321}
322 322
323} // namespace Vulkan 323} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 7ddb09afb..f4e4432a7 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -16,8 +16,8 @@
16namespace Vulkan { 16namespace Vulkan {
17 17
18class Device; 18class Device;
19class StagingBufferPool;
19class VKScheduler; 20class VKScheduler;
20class VKStagingBufferPool;
21class VKUpdateDescriptorQueue; 21class VKUpdateDescriptorQueue;
22 22
23class VKComputePass { 23class VKComputePass {
@@ -45,7 +45,7 @@ class QuadArrayPass final : public VKComputePass {
45public: 45public:
46 explicit QuadArrayPass(const Device& device_, VKScheduler& scheduler_, 46 explicit QuadArrayPass(const Device& device_, VKScheduler& scheduler_,
47 VKDescriptorPool& descriptor_pool_, 47 VKDescriptorPool& descriptor_pool_,
48 VKStagingBufferPool& staging_buffer_pool_, 48 StagingBufferPool& staging_buffer_pool_,
49 VKUpdateDescriptorQueue& update_descriptor_queue_); 49 VKUpdateDescriptorQueue& update_descriptor_queue_);
50 ~QuadArrayPass(); 50 ~QuadArrayPass();
51 51
@@ -53,7 +53,7 @@ public:
53 53
54private: 54private:
55 VKScheduler& scheduler; 55 VKScheduler& scheduler;
56 VKStagingBufferPool& staging_buffer_pool; 56 StagingBufferPool& staging_buffer_pool;
57 VKUpdateDescriptorQueue& update_descriptor_queue; 57 VKUpdateDescriptorQueue& update_descriptor_queue;
58}; 58};
59 59
@@ -61,7 +61,7 @@ class Uint8Pass final : public VKComputePass {
61public: 61public:
62 explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_, 62 explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_,
63 VKDescriptorPool& descriptor_pool_, 63 VKDescriptorPool& descriptor_pool_,
64 VKStagingBufferPool& staging_buffer_pool_, 64 StagingBufferPool& staging_buffer_pool_,
65 VKUpdateDescriptorQueue& update_descriptor_queue_); 65 VKUpdateDescriptorQueue& update_descriptor_queue_);
66 ~Uint8Pass(); 66 ~Uint8Pass();
67 67
@@ -69,7 +69,7 @@ public:
69 69
70private: 70private:
71 VKScheduler& scheduler; 71 VKScheduler& scheduler;
72 VKStagingBufferPool& staging_buffer_pool; 72 StagingBufferPool& staging_buffer_pool;
73 VKUpdateDescriptorQueue& update_descriptor_queue; 73 VKUpdateDescriptorQueue& update_descriptor_queue;
74}; 74};
75 75
@@ -77,7 +77,7 @@ class QuadIndexedPass final : public VKComputePass {
77public: 77public:
78 explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_, 78 explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
79 VKDescriptorPool& descriptor_pool_, 79 VKDescriptorPool& descriptor_pool_,
80 VKStagingBufferPool& staging_buffer_pool_, 80 StagingBufferPool& staging_buffer_pool_,
81 VKUpdateDescriptorQueue& update_descriptor_queue_); 81 VKUpdateDescriptorQueue& update_descriptor_queue_);
82 ~QuadIndexedPass(); 82 ~QuadIndexedPass();
83 83
@@ -87,7 +87,7 @@ public:
87 87
88private: 88private:
89 VKScheduler& scheduler; 89 VKScheduler& scheduler;
90 VKStagingBufferPool& staging_buffer_pool; 90 StagingBufferPool& staging_buffer_pool;
91 VKUpdateDescriptorQueue& update_descriptor_queue; 91 VKUpdateDescriptorQueue& update_descriptor_queue;
92}; 92};
93 93
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
index a6abd0eee..102987240 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
@@ -3,6 +3,7 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm> 5#include <algorithm>
6#include <bit>
6#include <optional> 7#include <optional>
7#include <tuple> 8#include <tuple>
8#include <vector> 9#include <vector>
@@ -16,92 +17,93 @@
16#include "video_core/vulkan_common/vulkan_wrapper.h" 17#include "video_core/vulkan_common/vulkan_wrapper.h"
17 18
18namespace Vulkan { 19namespace Vulkan {
19
20namespace { 20namespace {
21struct Range {
22 u64 begin;
23 u64 end;
21 24
22u64 GetAllocationChunkSize(u64 required_size) { 25 [[nodiscard]] bool Contains(u64 iterator, u64 size) const noexcept {
23 static constexpr u64 sizes[] = {16ULL << 20, 32ULL << 20, 64ULL << 20, 128ULL << 20}; 26 return iterator < end && begin < iterator + size;
24 auto it = std::lower_bound(std::begin(sizes), std::end(sizes), required_size); 27 }
25 return it != std::end(sizes) ? *it : Common::AlignUp(required_size, 256ULL << 20); 28};
26} 29
30[[nodiscard]] u64 GetAllocationChunkSize(u64 required_size) {
31 static constexpr std::array sizes{
32 0x1000ULL << 10, 0x1400ULL << 10, 0x1800ULL << 10, 0x1c00ULL << 10, 0x2000ULL << 10,
33 0x3200ULL << 10, 0x4000ULL << 10, 0x6000ULL << 10, 0x8000ULL << 10, 0xA000ULL << 10,
34 0x10000ULL << 10, 0x18000ULL << 10, 0x20000ULL << 10,
35 };
36 static_assert(std::is_sorted(sizes.begin(), sizes.end()));
27 37
38 const auto it = std::ranges::lower_bound(sizes, required_size);
39 return it != sizes.end() ? *it : Common::AlignUp(required_size, 4ULL << 20);
40}
28} // Anonymous namespace 41} // Anonymous namespace
29 42
30class VKMemoryAllocation final { 43class MemoryAllocation {
31public: 44public:
32 explicit VKMemoryAllocation(const Device& device_, vk::DeviceMemory memory_, 45 explicit MemoryAllocation(const Device& device_, vk::DeviceMemory memory_,
33 VkMemoryPropertyFlags properties_, u64 allocation_size_, u32 type_) 46 VkMemoryPropertyFlags properties_, u64 allocation_size_, u32 type_)
34 : device{device_}, memory{std::move(memory_)}, properties{properties_}, 47 : device{device_}, memory{std::move(memory_)}, properties{properties_},
35 allocation_size{allocation_size_}, shifted_type{ShiftType(type_)} {} 48 allocation_size{allocation_size_}, shifted_type{ShiftType(type_)} {}
36 49
37 VKMemoryCommit Commit(VkDeviceSize commit_size, VkDeviceSize alignment) { 50 [[nodiscard]] std::optional<MemoryCommit> Commit(VkDeviceSize size, VkDeviceSize alignment) {
38 auto found = TryFindFreeSection(free_iterator, allocation_size, 51 const std::optional<u64> alloc = FindFreeRegion(size, alignment);
39 static_cast<u64>(commit_size), static_cast<u64>(alignment)); 52 if (!alloc) {
40 if (!found) { 53 // Signal out of memory, it'll try to do more allocations.
41 found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size), 54 return std::nullopt;
42 static_cast<u64>(alignment));
43 if (!found) {
44 // Signal out of memory, it'll try to do more allocations.
45 return nullptr;
46 }
47 } 55 }
48 auto commit = std::make_unique<VKMemoryCommitImpl>(device, this, memory, *found, 56 const Range range{
49 *found + commit_size); 57 .begin = *alloc,
50 commits.push_back(commit.get()); 58 .end = *alloc + size,
51 59 };
52 // Last commit's address is highly probable to be free. 60 commits.insert(std::ranges::upper_bound(commits, *alloc, {}, &Range::begin), range);
53 free_iterator = *found + commit_size; 61 return std::make_optional<MemoryCommit>(device, this, *memory, *alloc, *alloc + size);
54
55 return commit;
56 } 62 }
57 63
58 void Free(const VKMemoryCommitImpl* commit) { 64 void Free(u64 begin) {
59 ASSERT(commit); 65 const auto it = std::ranges::find(commits, begin, &Range::begin);
66 ASSERT_MSG(it != commits.end(), "Invalid commit");
67 commits.erase(it);
68 }
60 69
61 const auto it = std::find(std::begin(commits), std::end(commits), commit); 70 [[nodiscard]] std::span<u8> Map() {
62 if (it == commits.end()) { 71 if (!memory_mapped_span.empty()) {
63 UNREACHABLE_MSG("Freeing unallocated commit!"); 72 return memory_mapped_span;
64 return;
65 } 73 }
66 commits.erase(it); 74 u8* const raw_pointer = memory.Map(0, allocation_size);
75 memory_mapped_span = std::span<u8>(raw_pointer, allocation_size);
76 return memory_mapped_span;
67 } 77 }
68 78
69 /// Returns whether this allocation is compatible with the arguments. 79 /// Returns whether this allocation is compatible with the arguments.
70 bool IsCompatible(VkMemoryPropertyFlags wanted_properties, u32 type_mask) const { 80 [[nodiscard]] bool IsCompatible(VkMemoryPropertyFlags wanted_properties, u32 type_mask) const {
71 return (wanted_properties & properties) && (type_mask & shifted_type) != 0; 81 return (wanted_properties & properties) && (type_mask & shifted_type) != 0;
72 } 82 }
73 83
74private: 84private:
75 static constexpr u32 ShiftType(u32 type) { 85 [[nodiscard]] static constexpr u32 ShiftType(u32 type) {
76 return 1U << type; 86 return 1U << type;
77 } 87 }
78 88
79 /// A memory allocator, it may return a free region between "start" and "end" with the solicited 89 [[nodiscard]] std::optional<u64> FindFreeRegion(u64 size, u64 alignment) noexcept {
80 /// requirements. 90 ASSERT(std::has_single_bit(alignment));
81 std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const { 91 const u64 alignment_log2 = std::countr_zero(alignment);
82 u64 iterator = Common::AlignUp(start, alignment); 92 std::optional<u64> candidate;
83 while (iterator + size <= end) { 93 u64 iterator = 0;
84 const u64 try_left = iterator; 94 auto commit = commits.begin();
85 const u64 try_right = try_left + size; 95 while (iterator + size <= allocation_size) {
86 96 candidate = candidate.value_or(iterator);
87 bool overlap = false; 97 if (commit == commits.end()) {
88 for (const auto& commit : commits) { 98 break;
89 const auto [commit_left, commit_right] = commit->interval;
90 if (try_left < commit_right && commit_left < try_right) {
91 // There's an overlap, continue the search where the overlapping commit ends.
92 iterator = Common::AlignUp(commit_right, alignment);
93 overlap = true;
94 break;
95 }
96 } 99 }
97 if (!overlap) { 100 if (commit->Contains(*candidate, size)) {
98 // A free address has been found. 101 candidate = std::nullopt;
99 return try_left;
100 } 102 }
103 iterator = Common::AlignUpLog2(commit->end, alignment_log2);
104 ++commit;
101 } 105 }
102 106 return candidate;
103 // No free regions where found, return an empty optional.
104 return std::nullopt;
105 } 107 }
106 108
107 const Device& device; ///< Vulkan device. 109 const Device& device; ///< Vulkan device.
@@ -109,21 +111,52 @@ private:
109 const VkMemoryPropertyFlags properties; ///< Vulkan properties. 111 const VkMemoryPropertyFlags properties; ///< Vulkan properties.
110 const u64 allocation_size; ///< Size of this allocation. 112 const u64 allocation_size; ///< Size of this allocation.
111 const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted. 113 const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
114 std::vector<Range> commits; ///< All commit ranges done from this allocation.
115 std::span<u8> memory_mapped_span; ///< Memory mapped span. Empty if not queried before.
116};
112 117
113 /// Hints where the next free region is likely going to be. 118MemoryCommit::MemoryCommit(const Device& device_, MemoryAllocation* allocation_,
114 u64 free_iterator{}; 119 VkDeviceMemory memory_, u64 begin, u64 end) noexcept
120 : device{&device_}, allocation{allocation_}, memory{memory_}, interval{begin, end} {}
115 121
116 /// Stores all commits done from this allocation. 122MemoryCommit::~MemoryCommit() {
117 std::vector<const VKMemoryCommitImpl*> commits; 123 Release();
118}; 124}
125
126MemoryCommit& MemoryCommit::operator=(MemoryCommit&& rhs) noexcept {
127 Release();
128 device = rhs.device;
129 allocation = std::exchange(rhs.allocation, nullptr);
130 memory = rhs.memory;
131 interval = rhs.interval;
132 span = std::exchange(rhs.span, std::span<u8>{});
133 return *this;
134}
135
136MemoryCommit::MemoryCommit(MemoryCommit&& rhs) noexcept
137 : device{rhs.device}, allocation{std::exchange(rhs.allocation, nullptr)}, memory{rhs.memory},
138 interval{rhs.interval}, span{std::exchange(rhs.span, std::span<u8>{})} {}
139
140std::span<u8> MemoryCommit::Map() {
141 if (!span.empty()) {
142 return span;
143 }
144 span = allocation->Map().subspan(interval.first, interval.second - interval.first);
145 return span;
146}
147
148void MemoryCommit::Release() {
149 if (allocation) {
150 allocation->Free(interval.first);
151 }
152}
119 153
120VKMemoryManager::VKMemoryManager(const Device& device_) 154VKMemoryManager::VKMemoryManager(const Device& device_)
121 : device{device_}, properties{device_.GetPhysical().GetMemoryProperties()} {} 155 : device{device_}, properties{device_.GetPhysical().GetMemoryProperties()} {}
122 156
123VKMemoryManager::~VKMemoryManager() = default; 157VKMemoryManager::~VKMemoryManager() = default;
124 158
125VKMemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements, 159MemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements, bool host_visible) {
126 bool host_visible) {
127 const u64 chunk_size = GetAllocationChunkSize(requirements.size); 160 const u64 chunk_size = GetAllocationChunkSize(requirements.size);
128 161
129 // When a host visible commit is asked, search for host visible and coherent, otherwise search 162 // When a host visible commit is asked, search for host visible and coherent, otherwise search
@@ -131,39 +164,31 @@ VKMemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements,
131 const VkMemoryPropertyFlags wanted_properties = 164 const VkMemoryPropertyFlags wanted_properties =
132 host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT 165 host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
133 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; 166 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
134 167 if (std::optional<MemoryCommit> commit = TryAllocCommit(requirements, wanted_properties)) {
135 if (auto commit = TryAllocCommit(requirements, wanted_properties)) { 168 return std::move(*commit);
136 return commit;
137 } 169 }
138
139 // Commit has failed, allocate more memory. 170 // Commit has failed, allocate more memory.
140 if (!AllocMemory(wanted_properties, requirements.memoryTypeBits, chunk_size)) { 171 // TODO(Rodrigo): Handle out of memory situations in some way like flushing to guest memory.
141 // TODO(Rodrigo): Handle these situations in some way like flushing to guest memory. 172 AllocMemory(wanted_properties, requirements.memoryTypeBits, chunk_size);
142 // Allocation has failed, panic.
143 UNREACHABLE_MSG("Ran out of VRAM!");
144 return {};
145 }
146 173
147 // Commit again, this time it won't fail since there's a fresh allocation above. If it does, 174 // Commit again, this time it won't fail since there's a fresh allocation above.
148 // there's a bug. 175 // If it does, there's a bug.
149 auto commit = TryAllocCommit(requirements, wanted_properties); 176 return TryAllocCommit(requirements, wanted_properties).value();
150 ASSERT(commit);
151 return commit;
152} 177}
153 178
154VKMemoryCommit VKMemoryManager::Commit(const vk::Buffer& buffer, bool host_visible) { 179MemoryCommit VKMemoryManager::Commit(const vk::Buffer& buffer, bool host_visible) {
155 auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), host_visible); 180 auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), host_visible);
156 buffer.BindMemory(commit->GetMemory(), commit->GetOffset()); 181 buffer.BindMemory(commit.Memory(), commit.Offset());
157 return commit; 182 return commit;
158} 183}
159 184
160VKMemoryCommit VKMemoryManager::Commit(const vk::Image& image, bool host_visible) { 185MemoryCommit VKMemoryManager::Commit(const vk::Image& image, bool host_visible) {
161 auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), host_visible); 186 auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), host_visible);
162 image.BindMemory(commit->GetMemory(), commit->GetOffset()); 187 image.BindMemory(commit.Memory(), commit.Offset());
163 return commit; 188 return commit;
164} 189}
165 190
166bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, 191void VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask,
167 u64 size) { 192 u64 size) {
168 const u32 type = [&] { 193 const u32 type = [&] {
169 for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) { 194 for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
@@ -176,26 +201,18 @@ bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 t
176 UNREACHABLE_MSG("Couldn't find a compatible memory type!"); 201 UNREACHABLE_MSG("Couldn't find a compatible memory type!");
177 return 0U; 202 return 0U;
178 }(); 203 }();
179 204 vk::DeviceMemory memory = device.GetLogical().AllocateMemory({
180 // Try to allocate found type.
181 vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory({
182 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, 205 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
183 .pNext = nullptr, 206 .pNext = nullptr,
184 .allocationSize = size, 207 .allocationSize = size,
185 .memoryTypeIndex = type, 208 .memoryTypeIndex = type,
186 }); 209 });
187 if (!memory) { 210 allocations.push_back(std::make_unique<MemoryAllocation>(device, std::move(memory),
188 LOG_CRITICAL(Render_Vulkan, "Device allocation failed!"); 211 wanted_properties, size, type));
189 return false;
190 }
191
192 allocations.push_back(std::make_unique<VKMemoryAllocation>(device, std::move(memory),
193 wanted_properties, size, type));
194 return true;
195} 212}
196 213
197VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requirements, 214std::optional<MemoryCommit> VKMemoryManager::TryAllocCommit(
198 VkMemoryPropertyFlags wanted_properties) { 215 const VkMemoryRequirements& requirements, VkMemoryPropertyFlags wanted_properties) {
199 for (auto& allocation : allocations) { 216 for (auto& allocation : allocations) {
200 if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) { 217 if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) {
201 continue; 218 continue;
@@ -204,27 +221,7 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requi
204 return commit; 221 return commit;
205 } 222 }
206 } 223 }
207 return {}; 224 return std::nullopt;
208}
209
210VKMemoryCommitImpl::VKMemoryCommitImpl(const Device& device_, VKMemoryAllocation* allocation_,
211 const vk::DeviceMemory& memory_, u64 begin_, u64 end_)
212 : device{device_}, memory{memory_}, interval{begin_, end_}, allocation{allocation_} {}
213
214VKMemoryCommitImpl::~VKMemoryCommitImpl() {
215 allocation->Free(this);
216}
217
218MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const {
219 return MemoryMap(this, std::span<u8>(memory.Map(interval.first + offset_, size), size));
220}
221
222void VKMemoryCommitImpl::Unmap() const {
223 memory.Unmap();
224}
225
226MemoryMap VKMemoryCommitImpl::Map() const {
227 return Map(interval.second - interval.first);
228} 225}
229 226
230} // namespace Vulkan 227} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
index 2452bca4e..2f7b836e1 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.h
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.h
@@ -15,118 +15,81 @@ namespace Vulkan {
15 15
16class Device; 16class Device;
17class MemoryMap; 17class MemoryMap;
18class VKMemoryAllocation; 18class MemoryAllocation;
19class VKMemoryCommitImpl;
20 19
21using VKMemoryCommit = std::unique_ptr<VKMemoryCommitImpl>; 20class MemoryCommit final {
22
23class VKMemoryManager final {
24public: 21public:
25 explicit VKMemoryManager(const Device& device_); 22 explicit MemoryCommit() noexcept = default;
26 VKMemoryManager(const VKMemoryManager&) = delete; 23 explicit MemoryCommit(const Device& device_, MemoryAllocation* allocation_,
27 ~VKMemoryManager(); 24 VkDeviceMemory memory_, u64 begin, u64 end) noexcept;
25 ~MemoryCommit();
28 26
29 /** 27 MemoryCommit& operator=(MemoryCommit&&) noexcept;
30 * Commits a memory with the specified requeriments. 28 MemoryCommit(MemoryCommit&&) noexcept;
31 * @param requirements Requirements returned from a Vulkan call.
32 * @param host_visible Signals the allocator that it *must* use host visible and coherent
33 * memory. When passing false, it will try to allocate device local memory.
34 * @returns A memory commit.
35 */
36 VKMemoryCommit Commit(const VkMemoryRequirements& requirements, bool host_visible);
37 29
38 /// Commits memory required by the buffer and binds it. 30 MemoryCommit& operator=(const MemoryCommit&) = delete;
39 VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible); 31 MemoryCommit(const MemoryCommit&) = delete;
40
41 /// Commits memory required by the image and binds it.
42 VKMemoryCommit Commit(const vk::Image& image, bool host_visible);
43
44private:
45 /// Allocates a chunk of memory.
46 bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
47
48 /// Tries to allocate a memory commit.
49 VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
50 VkMemoryPropertyFlags wanted_properties);
51
52 const Device& device; ///< Device handler.
53 const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
54 std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
55};
56
57class VKMemoryCommitImpl final {
58 friend VKMemoryAllocation;
59 friend MemoryMap;
60
61public:
62 explicit VKMemoryCommitImpl(const Device& device_, VKMemoryAllocation* allocation_,
63 const vk::DeviceMemory& memory_, u64 begin_, u64 end_);
64 ~VKMemoryCommitImpl();
65 32
66 /// Maps a memory region and returns a pointer to it. 33 /// Returns a host visible memory map.
67 /// It's illegal to have more than one memory map at the same time. 34 /// It will map the backing allocation if it hasn't been mapped before.
68 MemoryMap Map(u64 size, u64 offset = 0) const; 35 std::span<u8> Map();
69
70 /// Maps the whole commit and returns a pointer to it.
71 /// It's illegal to have more than one memory map at the same time.
72 MemoryMap Map() const;
73 36
74 /// Returns the Vulkan memory handler. 37 /// Returns the Vulkan memory handler.
75 VkDeviceMemory GetMemory() const { 38 VkDeviceMemory Memory() const {
76 return *memory; 39 return memory;
77 } 40 }
78 41
79 /// Returns the start position of the commit relative to the allocation. 42 /// Returns the start position of the commit relative to the allocation.
80 VkDeviceSize GetOffset() const { 43 VkDeviceSize Offset() const {
81 return static_cast<VkDeviceSize>(interval.first); 44 return static_cast<VkDeviceSize>(interval.first);
82 } 45 }
83 46
84private: 47private:
85 /// Unmaps memory. 48 void Release();
86 void Unmap() const;
87 49
88 const Device& device; ///< Vulkan device. 50 const Device* device{}; ///< Vulkan device.
89 const vk::DeviceMemory& memory; ///< Vulkan device memory handler. 51 MemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
90 std::pair<u64, u64> interval{}; ///< Interval where the commit exists. 52 VkDeviceMemory memory{}; ///< Vulkan device memory handler.
91 VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation. 53 std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
54 std::span<u8> span; ///< Host visible memory span. Empty if not queried before.
92}; 55};
93 56
94/// Holds ownership of a memory map. 57class VKMemoryManager final {
95class MemoryMap final {
96public: 58public:
97 explicit MemoryMap(const VKMemoryCommitImpl* commit_, std::span<u8> span_) 59 explicit VKMemoryManager(const Device& device_);
98 : commit{commit_}, span{span_} {} 60 ~VKMemoryManager();
99
100 ~MemoryMap() {
101 if (commit) {
102 commit->Unmap();
103 }
104 }
105 61
106 /// Prematurely releases the memory map. 62 VKMemoryManager& operator=(const VKMemoryManager&) = delete;
107 void Release() { 63 VKMemoryManager(const VKMemoryManager&) = delete;
108 commit->Unmap();
109 commit = nullptr;
110 }
111 64
112 /// Returns a span to the memory map. 65 /**
113 [[nodiscard]] std::span<u8> Span() const noexcept { 66 * Commits a memory with the specified requeriments.
114 return span; 67 *
115 } 68 * @param requirements Requirements returned from a Vulkan call.
69 * @param host_visible Signals the allocator that it *must* use host visible and coherent
70 * memory. When passing false, it will try to allocate device local memory.
71 *
72 * @returns A memory commit.
73 */
74 MemoryCommit Commit(const VkMemoryRequirements& requirements, bool host_visible);
116 75
117 /// Returns the address of the memory map. 76 /// Commits memory required by the buffer and binds it.
118 [[nodiscard]] u8* Address() const noexcept { 77 MemoryCommit Commit(const vk::Buffer& buffer, bool host_visible);
119 return span.data();
120 }
121 78
122 /// Returns the address of the memory map; 79 /// Commits memory required by the image and binds it.
123 [[nodiscard]] operator u8*() const noexcept { 80 MemoryCommit Commit(const vk::Image& image, bool host_visible);
124 return span.data();
125 }
126 81
127private: 82private:
128 const VKMemoryCommitImpl* commit{}; ///< Mapped memory commit. 83 /// Allocates a chunk of memory.
129 std::span<u8> span; ///< Address to the mapped memory. 84 void AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
85
86 /// Tries to allocate a memory commit.
87 std::optional<MemoryCommit> TryAllocCommit(const VkMemoryRequirements& requirements,
88 VkMemoryPropertyFlags wanted_properties);
89
90 const Device& device; ///< Device handler.
91 const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
92 std::vector<std::unique_ptr<MemoryAllocation>> allocations; ///< Current allocations.
130}; 93};
131 94
132} // namespace Vulkan 95} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 4695718e9..c3316742f 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -218,7 +218,7 @@ private:
218 VKScheduler& scheduler; 218 VKScheduler& scheduler;
219 219
220 VKStreamBuffer stream_buffer; 220 VKStreamBuffer stream_buffer;
221 VKStagingBufferPool staging_pool; 221 StagingBufferPool staging_pool;
222 VKDescriptorPool descriptor_pool; 222 VKDescriptorPool descriptor_pool;
223 VKUpdateDescriptorQueue update_descriptor_queue; 223 VKUpdateDescriptorQueue update_descriptor_queue;
224 BlitImageHelper blit_image; 224 BlitImageHelper blit_image;
@@ -234,7 +234,7 @@ private:
234 VKFenceManager fence_manager; 234 VKFenceManager fence_manager;
235 235
236 vk::Buffer default_buffer; 236 vk::Buffer default_buffer;
237 VKMemoryCommit default_buffer_commit; 237 MemoryCommit default_buffer_commit;
238 vk::Event wfi_event; 238 vk::Event wfi_event;
239 VideoCommon::Shader::AsyncShaders async_shaders; 239 VideoCommon::Shader::AsyncShaders async_shaders;
240 240
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 1e0b8b922..b085dcc1c 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -3,58 +3,64 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm> 5#include <algorithm>
6#include <unordered_map>
7#include <utility> 6#include <utility>
8#include <vector> 7#include <vector>
9 8
9#include <fmt/format.h>
10
10#include "common/bit_util.h" 11#include "common/bit_util.h"
11#include "common/common_types.h" 12#include "common/common_types.h"
12#include "video_core/renderer_vulkan/vk_scheduler.h" 13#include "video_core/renderer_vulkan/vk_scheduler.h"
13#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" 14#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
14#include "video_core/vulkan_common/vulkan_device.h"
15#include "video_core/vulkan_common/vulkan_wrapper.h" 15#include "video_core/vulkan_common/vulkan_wrapper.h"
16#include "video_core/vulkan_common/vulkan_device.h"
16 17
17namespace Vulkan { 18namespace Vulkan {
18 19
19VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer_) 20StagingBufferPool::StagingBufferPool(const Device& device_, VKMemoryManager& memory_manager_,
20 : buffer{std::move(buffer_)} {} 21 VKScheduler& scheduler_)
21
22VKStagingBufferPool::VKStagingBufferPool(const Device& device_, VKMemoryManager& memory_manager_,
23 VKScheduler& scheduler_)
24 : device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_} {} 22 : device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_} {}
25 23
26VKStagingBufferPool::~VKStagingBufferPool() = default; 24StagingBufferPool::~StagingBufferPool() = default;
27 25
28VKBuffer& VKStagingBufferPool::GetUnusedBuffer(std::size_t size, bool host_visible) { 26StagingBufferRef StagingBufferPool::Request(size_t size, bool host_visible) {
29 if (const auto buffer = TryGetReservedBuffer(size, host_visible)) { 27 if (const std::optional<StagingBufferRef> ref = TryGetReservedBuffer(size, host_visible)) {
30 return *buffer; 28 return *ref;
31 } 29 }
32 return CreateStagingBuffer(size, host_visible); 30 return CreateStagingBuffer(size, host_visible);
33} 31}
34 32
35void VKStagingBufferPool::TickFrame() { 33void StagingBufferPool::TickFrame() {
36 current_delete_level = (current_delete_level + 1) % NumLevels; 34 current_delete_level = (current_delete_level + 1) % NUM_LEVELS;
37 35
38 ReleaseCache(true); 36 ReleaseCache(true);
39 ReleaseCache(false); 37 ReleaseCache(false);
40} 38}
41 39
42VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) { 40std::optional<StagingBufferRef> StagingBufferPool::TryGetReservedBuffer(size_t size,
43 for (StagingBuffer& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) { 41 bool host_visible) {
44 if (!scheduler.IsFree(entry.tick)) { 42 StagingBuffers& cache_level = GetCache(host_visible)[Common::Log2Ceil64(size)];
45 continue; 43
44 const auto is_free = [this](const StagingBuffer& entry) {
45 return scheduler.IsFree(entry.tick);
46 };
47 auto& entries = cache_level.entries;
48 const auto hint_it = entries.begin() + cache_level.iterate_index;
49 auto it = std::find_if(entries.begin() + cache_level.iterate_index, entries.end(), is_free);
50 if (it == entries.end()) {
51 it = std::find_if(entries.begin(), hint_it, is_free);
52 if (it == hint_it) {
53 return std::nullopt;
46 } 54 }
47 entry.tick = scheduler.CurrentTick();
48 return &*entry.buffer;
49 } 55 }
50 return nullptr; 56 cache_level.iterate_index = std::distance(entries.begin(), it) + 1;
57 it->tick = scheduler.CurrentTick();
58 return it->Ref();
51} 59}
52 60
53VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) { 61StagingBufferRef StagingBufferPool::CreateStagingBuffer(size_t size, bool host_visible) {
54 const u32 log2 = Common::Log2Ceil64(size); 62 const u32 log2 = Common::Log2Ceil64(size);
55 63 vk::Buffer buffer = device.GetLogical().CreateBuffer({
56 auto buffer = std::make_unique<VKBuffer>();
57 buffer->handle = device.GetLogical().CreateBuffer({
58 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, 64 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
59 .pNext = nullptr, 65 .pNext = nullptr,
60 .flags = 0, 66 .flags = 0,
@@ -66,49 +72,53 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
66 .queueFamilyIndexCount = 0, 72 .queueFamilyIndexCount = 0,
67 .pQueueFamilyIndices = nullptr, 73 .pQueueFamilyIndices = nullptr,
68 }); 74 });
69 buffer->commit = memory_manager.Commit(buffer->handle, host_visible); 75 if (device.HasDebuggingToolAttached()) {
70 76 ++buffer_index;
71 std::vector<StagingBuffer>& entries = GetCache(host_visible)[log2].entries; 77 buffer.SetObjectNameEXT(fmt::format("Staging Buffer {}", buffer_index).c_str());
72 StagingBuffer& entry = entries.emplace_back(std::move(buffer)); 78 }
73 entry.tick = scheduler.CurrentTick(); 79 MemoryCommit commit = memory_manager.Commit(buffer, host_visible);
74 return *entry.buffer; 80 const std::span<u8> mapped_span = host_visible ? commit.Map() : std::span<u8>{};
81
82 StagingBuffer& entry = GetCache(host_visible)[log2].entries.emplace_back(StagingBuffer{
83 .buffer = std::move(buffer),
84 .commit = std::move(commit),
85 .mapped_span = mapped_span,
86 .tick = scheduler.CurrentTick(),
87 });
88 return entry.Ref();
75} 89}
76 90
77VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) { 91StagingBufferPool::StagingBuffersCache& StagingBufferPool::GetCache(bool host_visible) {
78 return host_visible ? host_staging_buffers : device_staging_buffers; 92 return host_visible ? host_staging_buffers : device_staging_buffers;
79} 93}
80 94
81void VKStagingBufferPool::ReleaseCache(bool host_visible) { 95void StagingBufferPool::ReleaseCache(bool host_visible) {
82 auto& cache = GetCache(host_visible); 96 ReleaseLevel(GetCache(host_visible), current_delete_level);
83 const u64 size = ReleaseLevel(cache, current_delete_level);
84 if (size == 0) {
85 return;
86 }
87} 97}
88 98
89u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t log2) { 99void StagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, size_t log2) {
90 static constexpr std::size_t deletions_per_tick = 16; 100 constexpr size_t deletions_per_tick = 16;
91
92 auto& staging = cache[log2]; 101 auto& staging = cache[log2];
93 auto& entries = staging.entries; 102 auto& entries = staging.entries;
94 const std::size_t old_size = entries.size(); 103 const size_t old_size = entries.size();
95 104
96 const auto is_deleteable = [this](const StagingBuffer& entry) { 105 const auto is_deleteable = [this](const StagingBuffer& entry) {
97 return scheduler.IsFree(entry.tick); 106 return scheduler.IsFree(entry.tick);
98 }; 107 };
99 const std::size_t begin_offset = staging.delete_index; 108 const size_t begin_offset = staging.delete_index;
100 const std::size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size); 109 const size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size);
101 const auto begin = std::begin(entries) + begin_offset; 110 const auto begin = entries.begin() + begin_offset;
102 const auto end = std::begin(entries) + end_offset; 111 const auto end = entries.begin() + end_offset;
103 entries.erase(std::remove_if(begin, end, is_deleteable), end); 112 entries.erase(std::remove_if(begin, end, is_deleteable), end);
104 113
105 const std::size_t new_size = entries.size(); 114 const size_t new_size = entries.size();
106 staging.delete_index += deletions_per_tick; 115 staging.delete_index += deletions_per_tick;
107 if (staging.delete_index >= new_size) { 116 if (staging.delete_index >= new_size) {
108 staging.delete_index = 0; 117 staging.delete_index = 0;
109 } 118 }
110 119 if (staging.iterate_index > new_size) {
111 return (1ULL << log2) * (old_size - new_size); 120 staging.iterate_index = 0;
121 }
112} 122}
113 123
114} // namespace Vulkan 124} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index 90dadcbbe..5234a95fa 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -17,46 +17,54 @@ namespace Vulkan {
17class Device; 17class Device;
18class VKScheduler; 18class VKScheduler;
19 19
20struct VKBuffer final { 20struct StagingBufferRef {
21 vk::Buffer handle; 21 VkBuffer buffer;
22 VKMemoryCommit commit; 22 std::span<u8> mapped_span;
23}; 23};
24 24
25class VKStagingBufferPool final { 25class StagingBufferPool {
26public: 26public:
27 explicit VKStagingBufferPool(const Device& device, VKMemoryManager& memory_manager, 27 explicit StagingBufferPool(const Device& device, VKMemoryManager& memory_manager,
28 VKScheduler& scheduler); 28 VKScheduler& scheduler);
29 ~VKStagingBufferPool(); 29 ~StagingBufferPool();
30 30
31 VKBuffer& GetUnusedBuffer(std::size_t size, bool host_visible); 31 StagingBufferRef Request(size_t size, bool host_visible);
32 32
33 void TickFrame(); 33 void TickFrame();
34 34
35private: 35private:
36 struct StagingBuffer final { 36 struct StagingBuffer {
37 explicit StagingBuffer(std::unique_ptr<VKBuffer> buffer); 37 vk::Buffer buffer;
38 38 MemoryCommit commit;
39 std::unique_ptr<VKBuffer> buffer; 39 std::span<u8> mapped_span;
40 u64 tick = 0; 40 u64 tick = 0;
41
42 StagingBufferRef Ref() const noexcept {
43 return {
44 .buffer = *buffer,
45 .mapped_span = mapped_span,
46 };
47 }
41 }; 48 };
42 49
43 struct StagingBuffers final { 50 struct StagingBuffers {
44 std::vector<StagingBuffer> entries; 51 std::vector<StagingBuffer> entries;
45 std::size_t delete_index = 0; 52 size_t delete_index = 0;
53 size_t iterate_index = 0;
46 }; 54 };
47 55
48 static constexpr std::size_t NumLevels = sizeof(std::size_t) * CHAR_BIT; 56 static constexpr size_t NUM_LEVELS = sizeof(size_t) * CHAR_BIT;
49 using StagingBuffersCache = std::array<StagingBuffers, NumLevels>; 57 using StagingBuffersCache = std::array<StagingBuffers, NUM_LEVELS>;
50 58
51 VKBuffer* TryGetReservedBuffer(std::size_t size, bool host_visible); 59 std::optional<StagingBufferRef> TryGetReservedBuffer(size_t size, bool host_visible);
52 60
53 VKBuffer& CreateStagingBuffer(std::size_t size, bool host_visible); 61 StagingBufferRef CreateStagingBuffer(size_t size, bool host_visible);
54 62
55 StagingBuffersCache& GetCache(bool host_visible); 63 StagingBuffersCache& GetCache(bool host_visible);
56 64
57 void ReleaseCache(bool host_visible); 65 void ReleaseCache(bool host_visible);
58 66
59 u64 ReleaseLevel(StagingBuffersCache& cache, std::size_t log2); 67 void ReleaseLevel(StagingBuffersCache& cache, size_t log2);
60 68
61 const Device& device; 69 const Device& device;
62 VKMemoryManager& memory_manager; 70 VKMemoryManager& memory_manager;
@@ -65,7 +73,8 @@ private:
65 StagingBuffersCache host_staging_buffers; 73 StagingBuffersCache host_staging_buffers;
66 StagingBuffersCache device_staging_buffers; 74 StagingBuffersCache device_staging_buffers;
67 75
68 std::size_t current_delete_level = 0; 76 size_t current_delete_level = 0;
77 u64 buffer_index = 0;
69}; 78};
70 79
71} // namespace Vulkan 80} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index bd11de012..5acbcad76 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -554,10 +554,10 @@ void TextureCacheRuntime::Finish() {
554} 554}
555 555
556ImageBufferMap TextureCacheRuntime::MapUploadBuffer(size_t size) { 556ImageBufferMap TextureCacheRuntime::MapUploadBuffer(size_t size) {
557 const auto& buffer = staging_buffer_pool.GetUnusedBuffer(size, true); 557 const auto staging_ref = staging_buffer_pool.Request(size, true);
558 return ImageBufferMap{ 558 return ImageBufferMap{
559 .handle = *buffer.handle, 559 .handle = staging_ref.buffer,
560 .map = buffer.commit->Map(size), 560 .span = staging_ref.mapped_span,
561 }; 561 };
562} 562}
563 563
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 92a7aad8b..134465fd4 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -19,14 +19,13 @@ using VideoCommon::Offset2D;
19using VideoCommon::RenderTargets; 19using VideoCommon::RenderTargets;
20using VideoCore::Surface::PixelFormat; 20using VideoCore::Surface::PixelFormat;
21 21
22class VKScheduler;
23class VKStagingBufferPool;
24
25class BlitImageHelper; 22class BlitImageHelper;
26class Device; 23class Device;
27class Image; 24class Image;
28class ImageView; 25class ImageView;
29class Framebuffer; 26class Framebuffer;
27class StagingBufferPool;
28class VKScheduler;
30 29
31struct RenderPassKey { 30struct RenderPassKey {
32 constexpr auto operator<=>(const RenderPassKey&) const noexcept = default; 31 constexpr auto operator<=>(const RenderPassKey&) const noexcept = default;
@@ -60,18 +59,18 @@ struct ImageBufferMap {
60 } 59 }
61 60
62 [[nodiscard]] std::span<u8> Span() const noexcept { 61 [[nodiscard]] std::span<u8> Span() const noexcept {
63 return map.Span(); 62 return span;
64 } 63 }
65 64
66 VkBuffer handle; 65 VkBuffer handle;
67 MemoryMap map; 66 std::span<u8> span;
68}; 67};
69 68
70struct TextureCacheRuntime { 69struct TextureCacheRuntime {
71 const Device& device; 70 const Device& device;
72 VKScheduler& scheduler; 71 VKScheduler& scheduler;
73 VKMemoryManager& memory_manager; 72 VKMemoryManager& memory_manager;
74 VKStagingBufferPool& staging_buffer_pool; 73 StagingBufferPool& staging_buffer_pool;
75 BlitImageHelper& blit_image_helper; 74 BlitImageHelper& blit_image_helper;
76 std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache; 75 std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache;
77 76
@@ -141,7 +140,7 @@ private:
141 VKScheduler* scheduler; 140 VKScheduler* scheduler;
142 vk::Image image; 141 vk::Image image;
143 vk::Buffer buffer; 142 vk::Buffer buffer;
144 VKMemoryCommit commit; 143 MemoryCommit commit;
145 VkImageAspectFlags aspect_mask = 0; 144 VkImageAspectFlags aspect_mask = 0;
146 bool initialized = false; 145 bool initialized = false;
147}; 146};