diff options
| author | 2020-06-11 21:24:45 -0300 | |
|---|---|---|
| committer | 2020-09-06 05:28:48 -0300 | |
| commit | 9e871937250cb92a13336c6c06186c41f19e1738 (patch) | |
| tree | 5151b85f8c4c26e7a5971b32584723f9910ea67b /src/video_core/buffer_cache | |
| parent | Merge pull request #4596 from FearlessTobi/port-5495 (diff) | |
| download | yuzu-9e871937250cb92a13336c6c06186c41f19e1738.tar.gz yuzu-9e871937250cb92a13336c6c06186c41f19e1738.tar.xz yuzu-9e871937250cb92a13336c6c06186c41f19e1738.zip | |
video_core: Remove all Core::System references in renderer
Now that the GPU is initialized when video backends are initialized,
it's no longer needed to query components once the game is running: it
can be done when yuzu is booting.
This allows us to pass components between constructors and in the
process remove all Core::System references in the video backend.
Diffstat (limited to 'src/video_core/buffer_cache')
| -rw-r--r-- | src/video_core/buffer_cache/buffer_cache.h | 51 |
1 files changed, 25 insertions, 26 deletions
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index b5dc68902..e7edd733f 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h | |||
| @@ -51,46 +51,43 @@ public: | |||
| 51 | bool is_written = false, bool use_fast_cbuf = false) { | 51 | bool is_written = false, bool use_fast_cbuf = false) { |
| 52 | std::lock_guard lock{mutex}; | 52 | std::lock_guard lock{mutex}; |
| 53 | 53 | ||
| 54 | auto& memory_manager = system.GPU().MemoryManager(); | 54 | const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); |
| 55 | const std::optional<VAddr> cpu_addr_opt = memory_manager.GpuToCpuAddress(gpu_addr); | 55 | if (!cpu_addr) { |
| 56 | if (!cpu_addr_opt) { | ||
| 57 | return GetEmptyBuffer(size); | 56 | return GetEmptyBuffer(size); |
| 58 | } | 57 | } |
| 59 | const VAddr cpu_addr = *cpu_addr_opt; | ||
| 60 | 58 | ||
| 61 | // Cache management is a big overhead, so only cache entries with a given size. | 59 | // Cache management is a big overhead, so only cache entries with a given size. |
| 62 | // TODO: Figure out which size is the best for given games. | 60 | // TODO: Figure out which size is the best for given games. |
| 63 | constexpr std::size_t max_stream_size = 0x800; | 61 | constexpr std::size_t max_stream_size = 0x800; |
| 64 | if (use_fast_cbuf || size < max_stream_size) { | 62 | if (use_fast_cbuf || size < max_stream_size) { |
| 65 | if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) { | 63 | if (!is_written && !IsRegionWritten(*cpu_addr, *cpu_addr + size - 1)) { |
| 66 | const bool is_granular = memory_manager.IsGranularRange(gpu_addr, size); | 64 | const bool is_granular = gpu_memory.IsGranularRange(gpu_addr, size); |
| 67 | if (use_fast_cbuf) { | 65 | if (use_fast_cbuf) { |
| 68 | u8* dest; | 66 | u8* dest; |
| 69 | if (is_granular) { | 67 | if (is_granular) { |
| 70 | dest = memory_manager.GetPointer(gpu_addr); | 68 | dest = gpu_memory.GetPointer(gpu_addr); |
| 71 | } else { | 69 | } else { |
| 72 | staging_buffer.resize(size); | 70 | staging_buffer.resize(size); |
| 73 | dest = staging_buffer.data(); | 71 | dest = staging_buffer.data(); |
| 74 | memory_manager.ReadBlockUnsafe(gpu_addr, dest, size); | 72 | gpu_memory.ReadBlockUnsafe(gpu_addr, dest, size); |
| 75 | } | 73 | } |
| 76 | return ConstBufferUpload(dest, size); | 74 | return ConstBufferUpload(dest, size); |
| 77 | } | 75 | } |
| 78 | if (is_granular) { | 76 | if (is_granular) { |
| 79 | u8* const host_ptr = memory_manager.GetPointer(gpu_addr); | 77 | u8* const host_ptr = gpu_memory.GetPointer(gpu_addr); |
| 80 | return StreamBufferUpload(size, alignment, [host_ptr, size](u8* dest) { | 78 | return StreamBufferUpload(size, alignment, [host_ptr, size](u8* dest) { |
| 81 | std::memcpy(dest, host_ptr, size); | 79 | std::memcpy(dest, host_ptr, size); |
| 82 | }); | 80 | }); |
| 83 | } else { | 81 | } else { |
| 84 | return StreamBufferUpload( | 82 | return StreamBufferUpload(size, alignment, [this, gpu_addr, size](u8* dest) { |
| 85 | size, alignment, [&memory_manager, gpu_addr, size](u8* dest) { | 83 | gpu_memory.ReadBlockUnsafe(gpu_addr, dest, size); |
| 86 | memory_manager.ReadBlockUnsafe(gpu_addr, dest, size); | 84 | }); |
| 87 | }); | ||
| 88 | } | 85 | } |
| 89 | } | 86 | } |
| 90 | } | 87 | } |
| 91 | 88 | ||
| 92 | Buffer* const block = GetBlock(cpu_addr, size); | 89 | Buffer* const block = GetBlock(*cpu_addr, size); |
| 93 | MapInterval* const map = MapAddress(block, gpu_addr, cpu_addr, size); | 90 | MapInterval* const map = MapAddress(block, gpu_addr, *cpu_addr, size); |
| 94 | if (!map) { | 91 | if (!map) { |
| 95 | return GetEmptyBuffer(size); | 92 | return GetEmptyBuffer(size); |
| 96 | } | 93 | } |
| @@ -106,7 +103,7 @@ public: | |||
| 106 | } | 103 | } |
| 107 | } | 104 | } |
| 108 | 105 | ||
| 109 | return BufferInfo{block->Handle(), block->Offset(cpu_addr), block->Address()}; | 106 | return BufferInfo{block->Handle(), block->Offset(*cpu_addr), block->Address()}; |
| 110 | } | 107 | } |
| 111 | 108 | ||
| 112 | /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset. | 109 | /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset. |
| @@ -262,9 +259,11 @@ public: | |||
| 262 | virtual BufferInfo GetEmptyBuffer(std::size_t size) = 0; | 259 | virtual BufferInfo GetEmptyBuffer(std::size_t size) = 0; |
| 263 | 260 | ||
| 264 | protected: | 261 | protected: |
| 265 | explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, | 262 | explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_, |
| 266 | std::unique_ptr<StreamBuffer> stream_buffer) | 263 | Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, |
| 267 | : rasterizer{rasterizer}, system{system}, stream_buffer{std::move(stream_buffer)} {} | 264 | std::unique_ptr<StreamBuffer> stream_buffer_) |
| 265 | : rasterizer{rasterizer_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_}, | ||
| 266 | stream_buffer{std::move(stream_buffer_)}, stream_buffer_handle{stream_buffer->Handle()} {} | ||
| 268 | 267 | ||
| 269 | ~BufferCache() = default; | 268 | ~BufferCache() = default; |
| 270 | 269 | ||
| @@ -326,14 +325,13 @@ private: | |||
| 326 | MapInterval* MapAddress(Buffer* block, GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size) { | 325 | MapInterval* MapAddress(Buffer* block, GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size) { |
| 327 | const VectorMapInterval overlaps = GetMapsInRange(cpu_addr, size); | 326 | const VectorMapInterval overlaps = GetMapsInRange(cpu_addr, size); |
| 328 | if (overlaps.empty()) { | 327 | if (overlaps.empty()) { |
| 329 | auto& memory_manager = system.GPU().MemoryManager(); | ||
| 330 | const VAddr cpu_addr_end = cpu_addr + size; | 328 | const VAddr cpu_addr_end = cpu_addr + size; |
| 331 | if (memory_manager.IsGranularRange(gpu_addr, size)) { | 329 | if (gpu_memory.IsGranularRange(gpu_addr, size)) { |
| 332 | u8* host_ptr = memory_manager.GetPointer(gpu_addr); | 330 | u8* const host_ptr = gpu_memory.GetPointer(gpu_addr); |
| 333 | block->Upload(block->Offset(cpu_addr), size, host_ptr); | 331 | block->Upload(block->Offset(cpu_addr), size, host_ptr); |
| 334 | } else { | 332 | } else { |
| 335 | staging_buffer.resize(size); | 333 | staging_buffer.resize(size); |
| 336 | memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); | 334 | gpu_memory.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); |
| 337 | block->Upload(block->Offset(cpu_addr), size, staging_buffer.data()); | 335 | block->Upload(block->Offset(cpu_addr), size, staging_buffer.data()); |
| 338 | } | 336 | } |
| 339 | return Register(MapInterval(cpu_addr, cpu_addr_end, gpu_addr)); | 337 | return Register(MapInterval(cpu_addr, cpu_addr_end, gpu_addr)); |
| @@ -392,7 +390,7 @@ private: | |||
| 392 | continue; | 390 | continue; |
| 393 | } | 391 | } |
| 394 | staging_buffer.resize(size); | 392 | staging_buffer.resize(size); |
| 395 | system.Memory().ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size); | 393 | cpu_memory.ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size); |
| 396 | block->Upload(block->Offset(interval.lower()), size, staging_buffer.data()); | 394 | block->Upload(block->Offset(interval.lower()), size, staging_buffer.data()); |
| 397 | } | 395 | } |
| 398 | } | 396 | } |
| @@ -431,7 +429,7 @@ private: | |||
| 431 | const std::size_t size = map->end - map->start; | 429 | const std::size_t size = map->end - map->start; |
| 432 | staging_buffer.resize(size); | 430 | staging_buffer.resize(size); |
| 433 | block->Download(block->Offset(map->start), size, staging_buffer.data()); | 431 | block->Download(block->Offset(map->start), size, staging_buffer.data()); |
| 434 | system.Memory().WriteBlockUnsafe(map->start, staging_buffer.data(), size); | 432 | cpu_memory.WriteBlockUnsafe(map->start, staging_buffer.data(), size); |
| 435 | map->MarkAsModified(false, 0); | 433 | map->MarkAsModified(false, 0); |
| 436 | } | 434 | } |
| 437 | 435 | ||
| @@ -567,7 +565,8 @@ private: | |||
| 567 | } | 565 | } |
| 568 | 566 | ||
| 569 | VideoCore::RasterizerInterface& rasterizer; | 567 | VideoCore::RasterizerInterface& rasterizer; |
| 570 | Core::System& system; | 568 | Tegra::MemoryManager& gpu_memory; |
| 569 | Core::Memory::Memory& cpu_memory; | ||
| 571 | 570 | ||
| 572 | std::unique_ptr<StreamBuffer> stream_buffer; | 571 | std::unique_ptr<StreamBuffer> stream_buffer; |
| 573 | BufferType stream_buffer_handle; | 572 | BufferType stream_buffer_handle; |