summaryrefslogtreecommitdiff
path: root/src/video_core/buffer_cache
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/buffer_cache')
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h53
1 files changed, 22 insertions, 31 deletions
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 308d8b55f..c6479af9f 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -41,16 +41,20 @@ class BufferCache {
41 static constexpr u64 BLOCK_PAGE_SIZE = 1ULL << BLOCK_PAGE_BITS; 41 static constexpr u64 BLOCK_PAGE_SIZE = 1ULL << BLOCK_PAGE_BITS;
42 42
43public: 43public:
44 using BufferInfo = std::pair<BufferType, u64>; 44 struct BufferInfo {
45 BufferType handle;
46 u64 offset;
47 u64 address;
48 };
45 49
46 BufferInfo UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4, 50 BufferInfo UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4,
47 bool is_written = false, bool use_fast_cbuf = false) { 51 bool is_written = false, bool use_fast_cbuf = false) {
48 std::lock_guard lock{mutex}; 52 std::lock_guard lock{mutex};
49 53
50 const auto& memory_manager = system.GPU().MemoryManager(); 54 auto& memory_manager = system.GPU().MemoryManager();
51 const std::optional<VAddr> cpu_addr_opt = memory_manager.GpuToCpuAddress(gpu_addr); 55 const std::optional<VAddr> cpu_addr_opt = memory_manager.GpuToCpuAddress(gpu_addr);
52 if (!cpu_addr_opt) { 56 if (!cpu_addr_opt) {
53 return {GetEmptyBuffer(size), 0}; 57 return GetEmptyBuffer(size);
54 } 58 }
55 const VAddr cpu_addr = *cpu_addr_opt; 59 const VAddr cpu_addr = *cpu_addr_opt;
56 60
@@ -59,7 +63,6 @@ public:
59 constexpr std::size_t max_stream_size = 0x800; 63 constexpr std::size_t max_stream_size = 0x800;
60 if (use_fast_cbuf || size < max_stream_size) { 64 if (use_fast_cbuf || size < max_stream_size) {
61 if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) { 65 if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) {
62 auto& memory_manager = system.GPU().MemoryManager();
63 const bool is_granular = memory_manager.IsGranularRange(gpu_addr, size); 66 const bool is_granular = memory_manager.IsGranularRange(gpu_addr, size);
64 if (use_fast_cbuf) { 67 if (use_fast_cbuf) {
65 u8* dest; 68 u8* dest;
@@ -89,7 +92,7 @@ public:
89 Buffer* const block = GetBlock(cpu_addr, size); 92 Buffer* const block = GetBlock(cpu_addr, size);
90 MapInterval* const map = MapAddress(block, gpu_addr, cpu_addr, size); 93 MapInterval* const map = MapAddress(block, gpu_addr, cpu_addr, size);
91 if (!map) { 94 if (!map) {
92 return {GetEmptyBuffer(size), 0}; 95 return GetEmptyBuffer(size);
93 } 96 }
94 if (is_written) { 97 if (is_written) {
95 map->MarkAsModified(true, GetModifiedTicks()); 98 map->MarkAsModified(true, GetModifiedTicks());
@@ -102,7 +105,7 @@ public:
102 } 105 }
103 } 106 }
104 107
105 return {block->Handle(), static_cast<u64>(block->Offset(cpu_addr))}; 108 return BufferInfo{block->Handle(), block->Offset(cpu_addr), block->Address()};
106 } 109 }
107 110
108 /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset. 111 /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset.
@@ -255,27 +258,17 @@ public:
255 committed_flushes.pop_front(); 258 committed_flushes.pop_front();
256 } 259 }
257 260
258 virtual BufferType GetEmptyBuffer(std::size_t size) = 0; 261 virtual BufferInfo GetEmptyBuffer(std::size_t size) = 0;
259 262
260protected: 263protected:
261 explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, 264 explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
262 std::unique_ptr<StreamBuffer> stream_buffer_) 265 std::unique_ptr<StreamBuffer> stream_buffer)
263 : rasterizer{rasterizer}, system{system}, stream_buffer{std::move(stream_buffer_)}, 266 : rasterizer{rasterizer}, system{system}, stream_buffer{std::move(stream_buffer)} {}
264 stream_buffer_handle{stream_buffer->Handle()} {}
265 267
266 ~BufferCache() = default; 268 ~BufferCache() = default;
267 269
268 virtual std::shared_ptr<Buffer> CreateBlock(VAddr cpu_addr, std::size_t size) = 0; 270 virtual std::shared_ptr<Buffer> CreateBlock(VAddr cpu_addr, std::size_t size) = 0;
269 271
270 virtual void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
271 const u8* data) = 0;
272
273 virtual void DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
274 u8* data) = 0;
275
276 virtual void CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
277 std::size_t dst_offset, std::size_t size) = 0;
278
279 virtual BufferInfo ConstBufferUpload(const void* raw_pointer, std::size_t size) { 272 virtual BufferInfo ConstBufferUpload(const void* raw_pointer, std::size_t size) {
280 return {}; 273 return {};
281 } 274 }
@@ -329,19 +322,18 @@ protected:
329 } 322 }
330 323
331private: 324private:
332 MapInterval* MapAddress(const Buffer* block, GPUVAddr gpu_addr, VAddr cpu_addr, 325 MapInterval* MapAddress(Buffer* block, GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size) {
333 std::size_t size) {
334 const VectorMapInterval overlaps = GetMapsInRange(cpu_addr, size); 326 const VectorMapInterval overlaps = GetMapsInRange(cpu_addr, size);
335 if (overlaps.empty()) { 327 if (overlaps.empty()) {
336 auto& memory_manager = system.GPU().MemoryManager(); 328 auto& memory_manager = system.GPU().MemoryManager();
337 const VAddr cpu_addr_end = cpu_addr + size; 329 const VAddr cpu_addr_end = cpu_addr + size;
338 if (memory_manager.IsGranularRange(gpu_addr, size)) { 330 if (memory_manager.IsGranularRange(gpu_addr, size)) {
339 u8* host_ptr = memory_manager.GetPointer(gpu_addr); 331 u8* host_ptr = memory_manager.GetPointer(gpu_addr);
340 UploadBlockData(*block, block->Offset(cpu_addr), size, host_ptr); 332 block->Upload(block->Offset(cpu_addr), size, host_ptr);
341 } else { 333 } else {
342 staging_buffer.resize(size); 334 staging_buffer.resize(size);
343 memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); 335 memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size);
344 UploadBlockData(*block, block->Offset(cpu_addr), size, staging_buffer.data()); 336 block->Upload(block->Offset(cpu_addr), size, staging_buffer.data());
345 } 337 }
346 return Register(MapInterval(cpu_addr, cpu_addr_end, gpu_addr)); 338 return Register(MapInterval(cpu_addr, cpu_addr_end, gpu_addr));
347 } 339 }
@@ -384,8 +376,7 @@ private:
384 return map; 376 return map;
385 } 377 }
386 378
387 void UpdateBlock(const Buffer* block, VAddr start, VAddr end, 379 void UpdateBlock(Buffer* block, VAddr start, VAddr end, const VectorMapInterval& overlaps) {
388 const VectorMapInterval& overlaps) {
389 const IntervalType base_interval{start, end}; 380 const IntervalType base_interval{start, end};
390 IntervalSet interval_set{}; 381 IntervalSet interval_set{};
391 interval_set.add(base_interval); 382 interval_set.add(base_interval);
@@ -400,7 +391,7 @@ private:
400 } 391 }
401 staging_buffer.resize(size); 392 staging_buffer.resize(size);
402 system.Memory().ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size); 393 system.Memory().ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size);
403 UploadBlockData(*block, block->Offset(interval.lower()), size, staging_buffer.data()); 394 block->Upload(block->Offset(interval.lower()), size, staging_buffer.data());
404 } 395 }
405 } 396 }
406 397
@@ -437,7 +428,7 @@ private:
437 428
438 const std::size_t size = map->end - map->start; 429 const std::size_t size = map->end - map->start;
439 staging_buffer.resize(size); 430 staging_buffer.resize(size);
440 DownloadBlockData(*block, block->Offset(map->start), size, staging_buffer.data()); 431 block->Download(block->Offset(map->start), size, staging_buffer.data());
441 system.Memory().WriteBlockUnsafe(map->start, staging_buffer.data(), size); 432 system.Memory().WriteBlockUnsafe(map->start, staging_buffer.data(), size);
442 map->MarkAsModified(false, 0); 433 map->MarkAsModified(false, 0);
443 } 434 }
@@ -450,7 +441,7 @@ private:
450 441
451 buffer_ptr += size; 442 buffer_ptr += size;
452 buffer_offset += size; 443 buffer_offset += size;
453 return {stream_buffer_handle, uploaded_offset}; 444 return BufferInfo{stream_buffer->Handle(), uploaded_offset, stream_buffer->Address()};
454 } 445 }
455 446
456 void AlignBuffer(std::size_t alignment) { 447 void AlignBuffer(std::size_t alignment) {
@@ -465,7 +456,7 @@ private:
465 const std::size_t new_size = old_size + BLOCK_PAGE_SIZE; 456 const std::size_t new_size = old_size + BLOCK_PAGE_SIZE;
466 const VAddr cpu_addr = buffer->CpuAddr(); 457 const VAddr cpu_addr = buffer->CpuAddr();
467 std::shared_ptr<Buffer> new_buffer = CreateBlock(cpu_addr, new_size); 458 std::shared_ptr<Buffer> new_buffer = CreateBlock(cpu_addr, new_size);
468 CopyBlock(*buffer, *new_buffer, 0, 0, old_size); 459 new_buffer->CopyFrom(*buffer, 0, 0, old_size);
469 QueueDestruction(std::move(buffer)); 460 QueueDestruction(std::move(buffer));
470 461
471 const VAddr cpu_addr_end = cpu_addr + new_size - 1; 462 const VAddr cpu_addr_end = cpu_addr + new_size - 1;
@@ -487,8 +478,8 @@ private:
487 const std::size_t new_size = size_1 + size_2; 478 const std::size_t new_size = size_1 + size_2;
488 479
489 std::shared_ptr<Buffer> new_buffer = CreateBlock(new_addr, new_size); 480 std::shared_ptr<Buffer> new_buffer = CreateBlock(new_addr, new_size);
490 CopyBlock(*first, *new_buffer, 0, new_buffer->Offset(first_addr), size_1); 481 new_buffer->CopyFrom(*first, 0, new_buffer->Offset(first_addr), size_1);
491 CopyBlock(*second, *new_buffer, 0, new_buffer->Offset(second_addr), size_2); 482 new_buffer->CopyFrom(*second, 0, new_buffer->Offset(second_addr), size_2);
492 QueueDestruction(std::move(first)); 483 QueueDestruction(std::move(first));
493 QueueDestruction(std::move(second)); 484 QueueDestruction(std::move(second));
494 485