diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/memory.cpp | 115 | ||||
| -rw-r--r-- | src/core/memory.h | 10 | ||||
| -rw-r--r-- | src/video_core/buffer_cache/buffer_block.h | 42 | ||||
| -rw-r--r-- | src/video_core/buffer_cache/buffer_cache.h | 144 | ||||
| -rw-r--r-- | src/video_core/buffer_cache/map_interval.h | 12 | ||||
| -rw-r--r-- | src/video_core/memory_manager.h | 5 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/gl_buffer_cache.cpp | 8 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/gl_buffer_cache.h | 4 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/gl_rasterizer.cpp | 4 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_buffer_cache.cpp | 8 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_buffer_cache.h | 4 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_rasterizer.cpp | 4 |
12 files changed, 254 insertions, 106 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 9ceb7fabc..6061d37ae 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -257,10 +257,59 @@ struct Memory::Impl { | |||
| 257 | } | 257 | } |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | void ReadBlockUnsafe(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, | ||
| 261 | const std::size_t size) { | ||
| 262 | const auto& page_table = process.VMManager().page_table; | ||
| 263 | |||
| 264 | std::size_t remaining_size = size; | ||
| 265 | std::size_t page_index = src_addr >> PAGE_BITS; | ||
| 266 | std::size_t page_offset = src_addr & PAGE_MASK; | ||
| 267 | |||
| 268 | while (remaining_size > 0) { | ||
| 269 | const std::size_t copy_amount = | ||
| 270 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | ||
| 271 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||
| 272 | |||
| 273 | switch (page_table.attributes[page_index]) { | ||
| 274 | case Common::PageType::Unmapped: { | ||
| 275 | LOG_ERROR(HW_Memory, | ||
| 276 | "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | ||
| 277 | current_vaddr, src_addr, size); | ||
| 278 | std::memset(dest_buffer, 0, copy_amount); | ||
| 279 | break; | ||
| 280 | } | ||
| 281 | case Common::PageType::Memory: { | ||
| 282 | DEBUG_ASSERT(page_table.pointers[page_index]); | ||
| 283 | |||
| 284 | const u8* const src_ptr = | ||
| 285 | page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS); | ||
| 286 | std::memcpy(dest_buffer, src_ptr, copy_amount); | ||
| 287 | break; | ||
| 288 | } | ||
| 289 | case Common::PageType::RasterizerCachedMemory: { | ||
| 290 | const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); | ||
| 291 | std::memcpy(dest_buffer, host_ptr, copy_amount); | ||
| 292 | break; | ||
| 293 | } | ||
| 294 | default: | ||
| 295 | UNREACHABLE(); | ||
| 296 | } | ||
| 297 | |||
| 298 | page_index++; | ||
| 299 | page_offset = 0; | ||
| 300 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; | ||
| 301 | remaining_size -= copy_amount; | ||
| 302 | } | ||
| 303 | } | ||
| 304 | |||
| 260 | void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { | 305 | void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { |
| 261 | ReadBlock(*system.CurrentProcess(), src_addr, dest_buffer, size); | 306 | ReadBlock(*system.CurrentProcess(), src_addr, dest_buffer, size); |
| 262 | } | 307 | } |
| 263 | 308 | ||
| 309 | void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { | ||
| 310 | ReadBlockUnsafe(*system.CurrentProcess(), src_addr, dest_buffer, size); | ||
| 311 | } | ||
| 312 | |||
| 264 | void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, | 313 | void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, |
| 265 | const std::size_t size) { | 314 | const std::size_t size) { |
| 266 | const auto& page_table = process.VMManager().page_table; | 315 | const auto& page_table = process.VMManager().page_table; |
| @@ -305,10 +354,57 @@ struct Memory::Impl { | |||
| 305 | } | 354 | } |
| 306 | } | 355 | } |
| 307 | 356 | ||
| 357 | void WriteBlockUnsafe(const Kernel::Process& process, const VAddr dest_addr, | ||
| 358 | const void* src_buffer, const std::size_t size) { | ||
| 359 | const auto& page_table = process.VMManager().page_table; | ||
| 360 | std::size_t remaining_size = size; | ||
| 361 | std::size_t page_index = dest_addr >> PAGE_BITS; | ||
| 362 | std::size_t page_offset = dest_addr & PAGE_MASK; | ||
| 363 | |||
| 364 | while (remaining_size > 0) { | ||
| 365 | const std::size_t copy_amount = | ||
| 366 | std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | ||
| 367 | const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||
| 368 | |||
| 369 | switch (page_table.attributes[page_index]) { | ||
| 370 | case Common::PageType::Unmapped: { | ||
| 371 | LOG_ERROR(HW_Memory, | ||
| 372 | "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | ||
| 373 | current_vaddr, dest_addr, size); | ||
| 374 | break; | ||
| 375 | } | ||
| 376 | case Common::PageType::Memory: { | ||
| 377 | DEBUG_ASSERT(page_table.pointers[page_index]); | ||
| 378 | |||
| 379 | u8* const dest_ptr = | ||
| 380 | page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS); | ||
| 381 | std::memcpy(dest_ptr, src_buffer, copy_amount); | ||
| 382 | break; | ||
| 383 | } | ||
| 384 | case Common::PageType::RasterizerCachedMemory: { | ||
| 385 | u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); | ||
| 386 | std::memcpy(host_ptr, src_buffer, copy_amount); | ||
| 387 | break; | ||
| 388 | } | ||
| 389 | default: | ||
| 390 | UNREACHABLE(); | ||
| 391 | } | ||
| 392 | |||
| 393 | page_index++; | ||
| 394 | page_offset = 0; | ||
| 395 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; | ||
| 396 | remaining_size -= copy_amount; | ||
| 397 | } | ||
| 398 | } | ||
| 399 | |||
| 308 | void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { | 400 | void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { |
| 309 | WriteBlock(*system.CurrentProcess(), dest_addr, src_buffer, size); | 401 | WriteBlock(*system.CurrentProcess(), dest_addr, src_buffer, size); |
| 310 | } | 402 | } |
| 311 | 403 | ||
| 404 | void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { | ||
| 405 | WriteBlockUnsafe(*system.CurrentProcess(), dest_addr, src_buffer, size); | ||
| 406 | } | ||
| 407 | |||
| 312 | void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { | 408 | void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { |
| 313 | const auto& page_table = process.VMManager().page_table; | 409 | const auto& page_table = process.VMManager().page_table; |
| 314 | std::size_t remaining_size = size; | 410 | std::size_t remaining_size = size; |
| @@ -696,6 +792,15 @@ void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_ | |||
| 696 | impl->ReadBlock(src_addr, dest_buffer, size); | 792 | impl->ReadBlock(src_addr, dest_buffer, size); |
| 697 | } | 793 | } |
| 698 | 794 | ||
| 795 | void Memory::ReadBlockUnsafe(const Kernel::Process& process, const VAddr src_addr, | ||
| 796 | void* dest_buffer, const std::size_t size) { | ||
| 797 | impl->ReadBlockUnsafe(process, src_addr, dest_buffer, size); | ||
| 798 | } | ||
| 799 | |||
| 800 | void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { | ||
| 801 | impl->ReadBlockUnsafe(src_addr, dest_buffer, size); | ||
| 802 | } | ||
| 803 | |||
| 699 | void Memory::WriteBlock(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer, | 804 | void Memory::WriteBlock(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer, |
| 700 | std::size_t size) { | 805 | std::size_t size) { |
| 701 | impl->WriteBlock(process, dest_addr, src_buffer, size); | 806 | impl->WriteBlock(process, dest_addr, src_buffer, size); |
| @@ -705,6 +810,16 @@ void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std | |||
| 705 | impl->WriteBlock(dest_addr, src_buffer, size); | 810 | impl->WriteBlock(dest_addr, src_buffer, size); |
| 706 | } | 811 | } |
| 707 | 812 | ||
| 813 | void Memory::WriteBlockUnsafe(const Kernel::Process& process, VAddr dest_addr, | ||
| 814 | const void* src_buffer, std::size_t size) { | ||
| 815 | impl->WriteBlockUnsafe(process, dest_addr, src_buffer, size); | ||
| 816 | } | ||
| 817 | |||
| 818 | void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, | ||
| 819 | const std::size_t size) { | ||
| 820 | impl->WriteBlockUnsafe(dest_addr, src_buffer, size); | ||
| 821 | } | ||
| 822 | |||
| 708 | void Memory::ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size) { | 823 | void Memory::ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size) { |
| 709 | impl->ZeroBlock(process, dest_addr, size); | 824 | impl->ZeroBlock(process, dest_addr, size); |
| 710 | } | 825 | } |
diff --git a/src/core/memory.h b/src/core/memory.h index 8913a9da4..97750f851 100644 --- a/src/core/memory.h +++ b/src/core/memory.h | |||
| @@ -294,6 +294,9 @@ public: | |||
| 294 | void ReadBlock(const Kernel::Process& process, VAddr src_addr, void* dest_buffer, | 294 | void ReadBlock(const Kernel::Process& process, VAddr src_addr, void* dest_buffer, |
| 295 | std::size_t size); | 295 | std::size_t size); |
| 296 | 296 | ||
| 297 | void ReadBlockUnsafe(const Kernel::Process& process, VAddr src_addr, void* dest_buffer, | ||
| 298 | std::size_t size); | ||
| 299 | |||
| 297 | /** | 300 | /** |
| 298 | * Reads a contiguous block of bytes from the current process' address space. | 301 | * Reads a contiguous block of bytes from the current process' address space. |
| 299 | * | 302 | * |
| @@ -312,6 +315,8 @@ public: | |||
| 312 | */ | 315 | */ |
| 313 | void ReadBlock(VAddr src_addr, void* dest_buffer, std::size_t size); | 316 | void ReadBlock(VAddr src_addr, void* dest_buffer, std::size_t size); |
| 314 | 317 | ||
| 318 | void ReadBlockUnsafe(VAddr src_addr, void* dest_buffer, std::size_t size); | ||
| 319 | |||
| 315 | /** | 320 | /** |
| 316 | * Writes a range of bytes into a given process' address space at the specified | 321 | * Writes a range of bytes into a given process' address space at the specified |
| 317 | * virtual address. | 322 | * virtual address. |
| @@ -335,6 +340,9 @@ public: | |||
| 335 | void WriteBlock(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer, | 340 | void WriteBlock(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer, |
| 336 | std::size_t size); | 341 | std::size_t size); |
| 337 | 342 | ||
| 343 | void WriteBlockUnsafe(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer, | ||
| 344 | std::size_t size); | ||
| 345 | |||
| 338 | /** | 346 | /** |
| 339 | * Writes a range of bytes into the current process' address space at the specified | 347 | * Writes a range of bytes into the current process' address space at the specified |
| 340 | * virtual address. | 348 | * virtual address. |
| @@ -356,6 +364,8 @@ public: | |||
| 356 | */ | 364 | */ |
| 357 | void WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size); | 365 | void WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size); |
| 358 | 366 | ||
| 367 | void WriteBlockUnsafe(VAddr dest_addr, const void* src_buffer, std::size_t size); | ||
| 368 | |||
| 359 | /** | 369 | /** |
| 360 | * Fills the specified address range within a process' address space with zeroes. | 370 | * Fills the specified address range within a process' address space with zeroes. |
| 361 | * | 371 | * |
diff --git a/src/video_core/buffer_cache/buffer_block.h b/src/video_core/buffer_cache/buffer_block.h index 4b9193182..e35ee0b67 100644 --- a/src/video_core/buffer_cache/buffer_block.h +++ b/src/video_core/buffer_cache/buffer_block.h | |||
| @@ -15,37 +15,29 @@ namespace VideoCommon { | |||
| 15 | 15 | ||
| 16 | class BufferBlock { | 16 | class BufferBlock { |
| 17 | public: | 17 | public: |
| 18 | bool Overlaps(const CacheAddr start, const CacheAddr end) const { | 18 | bool Overlaps(const VAddr start, const VAddr end) const { |
| 19 | return (cache_addr < end) && (cache_addr_end > start); | 19 | return (cpu_addr < end) && (cpu_addr_end > start); |
| 20 | } | 20 | } |
| 21 | 21 | ||
| 22 | bool IsInside(const CacheAddr other_start, const CacheAddr other_end) const { | 22 | bool IsInside(const VAddr other_start, const VAddr other_end) const { |
| 23 | return cache_addr <= other_start && other_end <= cache_addr_end; | 23 | return cpu_addr <= other_start && other_end <= cpu_addr_end; |
| 24 | } | 24 | } |
| 25 | 25 | ||
| 26 | u8* GetWritableHostPtr() const { | 26 | std::size_t GetOffset(const VAddr in_addr) { |
| 27 | return FromCacheAddr(cache_addr); | 27 | return static_cast<std::size_t>(in_addr - cpu_addr); |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | u8* GetWritableHostPtr(std::size_t offset) const { | 30 | VAddr GetCpuAddr() const { |
| 31 | return FromCacheAddr(cache_addr + offset); | 31 | return cpu_addr; |
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | std::size_t GetOffset(const CacheAddr in_addr) { | 34 | VAddr GetCpuAddrEnd() const { |
| 35 | return static_cast<std::size_t>(in_addr - cache_addr); | 35 | return cpu_addr_end; |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | CacheAddr GetCacheAddr() const { | 38 | void SetCpuAddr(const VAddr new_addr) { |
| 39 | return cache_addr; | 39 | cpu_addr = new_addr; |
| 40 | } | 40 | cpu_addr_end = new_addr + size; |
| 41 | |||
| 42 | CacheAddr GetCacheAddrEnd() const { | ||
| 43 | return cache_addr_end; | ||
| 44 | } | ||
| 45 | |||
| 46 | void SetCacheAddr(const CacheAddr new_addr) { | ||
| 47 | cache_addr = new_addr; | ||
| 48 | cache_addr_end = new_addr + size; | ||
| 49 | } | 41 | } |
| 50 | 42 | ||
| 51 | std::size_t GetSize() const { | 43 | std::size_t GetSize() const { |
| @@ -61,14 +53,14 @@ public: | |||
| 61 | } | 53 | } |
| 62 | 54 | ||
| 63 | protected: | 55 | protected: |
| 64 | explicit BufferBlock(CacheAddr cache_addr, const std::size_t size) : size{size} { | 56 | explicit BufferBlock(VAddr cpu_addr, const std::size_t size) : size{size} { |
| 65 | SetCacheAddr(cache_addr); | 57 | SetCpuAddr(cpu_addr); |
| 66 | } | 58 | } |
| 67 | ~BufferBlock() = default; | 59 | ~BufferBlock() = default; |
| 68 | 60 | ||
| 69 | private: | 61 | private: |
| 70 | CacheAddr cache_addr{}; | 62 | VAddr cpu_addr{}; |
| 71 | CacheAddr cache_addr_end{}; | 63 | VAddr cpu_addr_end{}; |
| 72 | std::size_t size{}; | 64 | std::size_t size{}; |
| 73 | u64 epoch{}; | 65 | u64 epoch{}; |
| 74 | }; | 66 | }; |
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 186aca61d..262d0fc6e 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h | |||
| @@ -37,28 +37,45 @@ public: | |||
| 37 | bool is_written = false, bool use_fast_cbuf = false) { | 37 | bool is_written = false, bool use_fast_cbuf = false) { |
| 38 | std::lock_guard lock{mutex}; | 38 | std::lock_guard lock{mutex}; |
| 39 | 39 | ||
| 40 | auto& memory_manager = system.GPU().MemoryManager(); | 40 | const std::optional<VAddr> cpu_addr_opt = |
| 41 | const auto host_ptr = memory_manager.GetPointer(gpu_addr); | 41 | system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr); |
| 42 | if (!host_ptr) { | 42 | |
| 43 | if (!cpu_addr_opt) { | ||
| 43 | return {GetEmptyBuffer(size), 0}; | 44 | return {GetEmptyBuffer(size), 0}; |
| 44 | } | 45 | } |
| 45 | const auto cache_addr = ToCacheAddr(host_ptr); | 46 | |
| 47 | VAddr cpu_addr = *cpu_addr_opt; | ||
| 46 | 48 | ||
| 47 | // Cache management is a big overhead, so only cache entries with a given size. | 49 | // Cache management is a big overhead, so only cache entries with a given size. |
| 48 | // TODO: Figure out which size is the best for given games. | 50 | // TODO: Figure out which size is the best for given games. |
| 49 | constexpr std::size_t max_stream_size = 0x800; | 51 | constexpr std::size_t max_stream_size = 0x800; |
| 50 | if (use_fast_cbuf || size < max_stream_size) { | 52 | if (use_fast_cbuf || size < max_stream_size) { |
| 51 | if (!is_written && !IsRegionWritten(cache_addr, cache_addr + size - 1)) { | 53 | if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) { |
| 54 | auto& memory_manager = system.GPU().MemoryManager(); | ||
| 52 | if (use_fast_cbuf) { | 55 | if (use_fast_cbuf) { |
| 53 | return ConstBufferUpload(host_ptr, size); | 56 | if (Tegra::MemoryManager::IsGranularRange(gpu_addr, size)) { |
| 57 | const auto host_ptr = memory_manager.GetPointer(gpu_addr); | ||
| 58 | return ConstBufferUpload(host_ptr, size); | ||
| 59 | } else { | ||
| 60 | staging_buffer.resize(size); | ||
| 61 | memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); | ||
| 62 | return ConstBufferUpload(staging_buffer.data(), size); | ||
| 63 | } | ||
| 54 | } else { | 64 | } else { |
| 55 | return StreamBufferUpload(host_ptr, size, alignment); | 65 | if (Tegra::MemoryManager::IsGranularRange(gpu_addr, size)) { |
| 66 | const auto host_ptr = memory_manager.GetPointer(gpu_addr); | ||
| 67 | return StreamBufferUpload(host_ptr, size, alignment); | ||
| 68 | } else { | ||
| 69 | staging_buffer.resize(size); | ||
| 70 | memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); | ||
| 71 | return StreamBufferUpload(staging_buffer.data(), size, alignment); | ||
| 72 | } | ||
| 56 | } | 73 | } |
| 57 | } | 74 | } |
| 58 | } | 75 | } |
| 59 | 76 | ||
| 60 | auto block = GetBlock(cache_addr, size); | 77 | auto block = GetBlock(cpu_addr, size); |
| 61 | auto map = MapAddress(block, gpu_addr, cache_addr, size); | 78 | auto map = MapAddress(block, gpu_addr, cpu_addr, size); |
| 62 | if (is_written) { | 79 | if (is_written) { |
| 63 | map->MarkAsModified(true, GetModifiedTicks()); | 80 | map->MarkAsModified(true, GetModifiedTicks()); |
| 64 | if (!map->IsWritten()) { | 81 | if (!map->IsWritten()) { |
| @@ -71,7 +88,7 @@ public: | |||
| 71 | } | 88 | } |
| 72 | } | 89 | } |
| 73 | 90 | ||
| 74 | const u64 offset = static_cast<u64>(block->GetOffset(cache_addr)); | 91 | const u64 offset = static_cast<u64>(block->GetOffset(cpu_addr)); |
| 75 | 92 | ||
| 76 | return {ToHandle(block), offset}; | 93 | return {ToHandle(block), offset}; |
| 77 | } | 94 | } |
| @@ -112,7 +129,7 @@ public: | |||
| 112 | } | 129 | } |
| 113 | 130 | ||
| 114 | /// Write any cached resources overlapping the specified region back to memory | 131 | /// Write any cached resources overlapping the specified region back to memory |
| 115 | void FlushRegion(CacheAddr addr, std::size_t size) { | 132 | void FlushRegion(VAddr addr, std::size_t size) { |
| 116 | std::lock_guard lock{mutex}; | 133 | std::lock_guard lock{mutex}; |
| 117 | 134 | ||
| 118 | std::vector<MapInterval> objects = GetMapsInRange(addr, size); | 135 | std::vector<MapInterval> objects = GetMapsInRange(addr, size); |
| @@ -127,7 +144,7 @@ public: | |||
| 127 | } | 144 | } |
| 128 | 145 | ||
| 129 | /// Mark the specified region as being invalidated | 146 | /// Mark the specified region as being invalidated |
| 130 | void InvalidateRegion(CacheAddr addr, u64 size) { | 147 | void InvalidateRegion(VAddr addr, u64 size) { |
| 131 | std::lock_guard lock{mutex}; | 148 | std::lock_guard lock{mutex}; |
| 132 | 149 | ||
| 133 | std::vector<MapInterval> objects = GetMapsInRange(addr, size); | 150 | std::vector<MapInterval> objects = GetMapsInRange(addr, size); |
| @@ -152,7 +169,7 @@ protected: | |||
| 152 | 169 | ||
| 153 | virtual void WriteBarrier() = 0; | 170 | virtual void WriteBarrier() = 0; |
| 154 | 171 | ||
| 155 | virtual TBuffer CreateBlock(CacheAddr cache_addr, std::size_t size) = 0; | 172 | virtual TBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0; |
| 156 | 173 | ||
| 157 | virtual void UploadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size, | 174 | virtual void UploadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size, |
| 158 | const u8* data) = 0; | 175 | const u8* data) = 0; |
| @@ -169,20 +186,17 @@ protected: | |||
| 169 | 186 | ||
| 170 | /// Register an object into the cache | 187 | /// Register an object into the cache |
| 171 | void Register(const MapInterval& new_map, bool inherit_written = false) { | 188 | void Register(const MapInterval& new_map, bool inherit_written = false) { |
| 172 | const CacheAddr cache_ptr = new_map->GetStart(); | 189 | const VAddr cpu_addr = new_map->GetStart(); |
| 173 | const std::optional<VAddr> cpu_addr = | 190 | if (!cpu_addr) { |
| 174 | system.GPU().MemoryManager().GpuToCpuAddress(new_map->GetGpuAddress()); | ||
| 175 | if (!cache_ptr || !cpu_addr) { | ||
| 176 | LOG_CRITICAL(HW_GPU, "Failed to register buffer with unmapped gpu_address 0x{:016x}", | 191 | LOG_CRITICAL(HW_GPU, "Failed to register buffer with unmapped gpu_address 0x{:016x}", |
| 177 | new_map->GetGpuAddress()); | 192 | new_map->GetGpuAddress()); |
| 178 | return; | 193 | return; |
| 179 | } | 194 | } |
| 180 | const std::size_t size = new_map->GetEnd() - new_map->GetStart(); | 195 | const std::size_t size = new_map->GetEnd() - new_map->GetStart(); |
| 181 | new_map->SetCpuAddress(*cpu_addr); | ||
| 182 | new_map->MarkAsRegistered(true); | 196 | new_map->MarkAsRegistered(true); |
| 183 | const IntervalType interval{new_map->GetStart(), new_map->GetEnd()}; | 197 | const IntervalType interval{new_map->GetStart(), new_map->GetEnd()}; |
| 184 | mapped_addresses.insert({interval, new_map}); | 198 | mapped_addresses.insert({interval, new_map}); |
| 185 | rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1); | 199 | rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1); |
| 186 | if (inherit_written) { | 200 | if (inherit_written) { |
| 187 | MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1); | 201 | MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1); |
| 188 | new_map->MarkAsWritten(true); | 202 | new_map->MarkAsWritten(true); |
| @@ -192,7 +206,7 @@ protected: | |||
| 192 | /// Unregisters an object from the cache | 206 | /// Unregisters an object from the cache |
| 193 | void Unregister(MapInterval& map) { | 207 | void Unregister(MapInterval& map) { |
| 194 | const std::size_t size = map->GetEnd() - map->GetStart(); | 208 | const std::size_t size = map->GetEnd() - map->GetStart(); |
| 195 | rasterizer.UpdatePagesCachedCount(map->GetCpuAddress(), size, -1); | 209 | rasterizer.UpdatePagesCachedCount(map->GetStart(), size, -1); |
| 196 | map->MarkAsRegistered(false); | 210 | map->MarkAsRegistered(false); |
| 197 | if (map->IsWritten()) { | 211 | if (map->IsWritten()) { |
| 198 | UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1); | 212 | UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1); |
| @@ -202,32 +216,39 @@ protected: | |||
| 202 | } | 216 | } |
| 203 | 217 | ||
| 204 | private: | 218 | private: |
| 205 | MapInterval CreateMap(const CacheAddr start, const CacheAddr end, const GPUVAddr gpu_addr) { | 219 | MapInterval CreateMap(const VAddr start, const VAddr end, const GPUVAddr gpu_addr) { |
| 206 | return std::make_shared<MapIntervalBase>(start, end, gpu_addr); | 220 | return std::make_shared<MapIntervalBase>(start, end, gpu_addr); |
| 207 | } | 221 | } |
| 208 | 222 | ||
| 209 | MapInterval MapAddress(const TBuffer& block, const GPUVAddr gpu_addr, | 223 | MapInterval MapAddress(const TBuffer& block, const GPUVAddr gpu_addr, const VAddr cpu_addr, |
| 210 | const CacheAddr cache_addr, const std::size_t size) { | 224 | const std::size_t size) { |
| 211 | 225 | ||
| 212 | std::vector<MapInterval> overlaps = GetMapsInRange(cache_addr, size); | 226 | std::vector<MapInterval> overlaps = GetMapsInRange(cpu_addr, size); |
| 213 | if (overlaps.empty()) { | 227 | if (overlaps.empty()) { |
| 214 | const CacheAddr cache_addr_end = cache_addr + size; | 228 | auto& memory_manager = system.GPU().MemoryManager(); |
| 215 | MapInterval new_map = CreateMap(cache_addr, cache_addr_end, gpu_addr); | 229 | const VAddr cpu_addr_end = cpu_addr + size; |
| 216 | u8* host_ptr = FromCacheAddr(cache_addr); | 230 | MapInterval new_map = CreateMap(cpu_addr, cpu_addr_end, gpu_addr); |
| 217 | UploadBlockData(block, block->GetOffset(cache_addr), size, host_ptr); | 231 | if (Tegra::MemoryManager::IsGranularRange(gpu_addr, size)) { |
| 232 | u8* host_ptr = memory_manager.GetPointer(gpu_addr); | ||
| 233 | UploadBlockData(block, block->GetOffset(cpu_addr), size, host_ptr); | ||
| 234 | } else { | ||
| 235 | staging_buffer.resize(size); | ||
| 236 | memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); | ||
| 237 | UploadBlockData(block, block->GetOffset(cpu_addr), size, staging_buffer.data()); | ||
| 238 | } | ||
| 218 | Register(new_map); | 239 | Register(new_map); |
| 219 | return new_map; | 240 | return new_map; |
| 220 | } | 241 | } |
| 221 | 242 | ||
| 222 | const CacheAddr cache_addr_end = cache_addr + size; | 243 | const VAddr cpu_addr_end = cpu_addr + size; |
| 223 | if (overlaps.size() == 1) { | 244 | if (overlaps.size() == 1) { |
| 224 | MapInterval& current_map = overlaps[0]; | 245 | MapInterval& current_map = overlaps[0]; |
| 225 | if (current_map->IsInside(cache_addr, cache_addr_end)) { | 246 | if (current_map->IsInside(cpu_addr, cpu_addr_end)) { |
| 226 | return current_map; | 247 | return current_map; |
| 227 | } | 248 | } |
| 228 | } | 249 | } |
| 229 | CacheAddr new_start = cache_addr; | 250 | VAddr new_start = cpu_addr; |
| 230 | CacheAddr new_end = cache_addr_end; | 251 | VAddr new_end = cpu_addr_end; |
| 231 | bool write_inheritance = false; | 252 | bool write_inheritance = false; |
| 232 | bool modified_inheritance = false; | 253 | bool modified_inheritance = false; |
| 233 | // Calculate new buffer parameters | 254 | // Calculate new buffer parameters |
| @@ -237,7 +258,7 @@ private: | |||
| 237 | write_inheritance |= overlap->IsWritten(); | 258 | write_inheritance |= overlap->IsWritten(); |
| 238 | modified_inheritance |= overlap->IsModified(); | 259 | modified_inheritance |= overlap->IsModified(); |
| 239 | } | 260 | } |
| 240 | GPUVAddr new_gpu_addr = gpu_addr + new_start - cache_addr; | 261 | GPUVAddr new_gpu_addr = gpu_addr + new_start - cpu_addr; |
| 241 | for (auto& overlap : overlaps) { | 262 | for (auto& overlap : overlaps) { |
| 242 | Unregister(overlap); | 263 | Unregister(overlap); |
| 243 | } | 264 | } |
| @@ -250,7 +271,7 @@ private: | |||
| 250 | return new_map; | 271 | return new_map; |
| 251 | } | 272 | } |
| 252 | 273 | ||
| 253 | void UpdateBlock(const TBuffer& block, CacheAddr start, CacheAddr end, | 274 | void UpdateBlock(const TBuffer& block, VAddr start, VAddr end, |
| 254 | std::vector<MapInterval>& overlaps) { | 275 | std::vector<MapInterval>& overlaps) { |
| 255 | const IntervalType base_interval{start, end}; | 276 | const IntervalType base_interval{start, end}; |
| 256 | IntervalSet interval_set{}; | 277 | IntervalSet interval_set{}; |
| @@ -262,13 +283,15 @@ private: | |||
| 262 | for (auto& interval : interval_set) { | 283 | for (auto& interval : interval_set) { |
| 263 | std::size_t size = interval.upper() - interval.lower(); | 284 | std::size_t size = interval.upper() - interval.lower(); |
| 264 | if (size > 0) { | 285 | if (size > 0) { |
| 265 | u8* host_ptr = FromCacheAddr(interval.lower()); | 286 | staging_buffer.resize(size); |
| 266 | UploadBlockData(block, block->GetOffset(interval.lower()), size, host_ptr); | 287 | system.Memory().ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size); |
| 288 | UploadBlockData(block, block->GetOffset(interval.lower()), size, | ||
| 289 | staging_buffer.data()); | ||
| 267 | } | 290 | } |
| 268 | } | 291 | } |
| 269 | } | 292 | } |
| 270 | 293 | ||
| 271 | std::vector<MapInterval> GetMapsInRange(CacheAddr addr, std::size_t size) { | 294 | std::vector<MapInterval> GetMapsInRange(VAddr addr, std::size_t size) { |
| 272 | if (size == 0) { | 295 | if (size == 0) { |
| 273 | return {}; | 296 | return {}; |
| 274 | } | 297 | } |
| @@ -290,8 +313,9 @@ private: | |||
| 290 | void FlushMap(MapInterval map) { | 313 | void FlushMap(MapInterval map) { |
| 291 | std::size_t size = map->GetEnd() - map->GetStart(); | 314 | std::size_t size = map->GetEnd() - map->GetStart(); |
| 292 | TBuffer block = blocks[map->GetStart() >> block_page_bits]; | 315 | TBuffer block = blocks[map->GetStart() >> block_page_bits]; |
| 293 | u8* host_ptr = FromCacheAddr(map->GetStart()); | 316 | staging_buffer.resize(size); |
| 294 | DownloadBlockData(block, block->GetOffset(map->GetStart()), size, host_ptr); | 317 | DownloadBlockData(block, block->GetOffset(map->GetStart()), size, staging_buffer.data()); |
| 318 | system.Memory().WriteBlockUnsafe(map->GetStart(), staging_buffer.data(), size); | ||
| 295 | map->MarkAsModified(false, 0); | 319 | map->MarkAsModified(false, 0); |
| 296 | } | 320 | } |
| 297 | 321 | ||
| @@ -316,14 +340,14 @@ private: | |||
| 316 | TBuffer EnlargeBlock(TBuffer buffer) { | 340 | TBuffer EnlargeBlock(TBuffer buffer) { |
| 317 | const std::size_t old_size = buffer->GetSize(); | 341 | const std::size_t old_size = buffer->GetSize(); |
| 318 | const std::size_t new_size = old_size + block_page_size; | 342 | const std::size_t new_size = old_size + block_page_size; |
| 319 | const CacheAddr cache_addr = buffer->GetCacheAddr(); | 343 | const VAddr cpu_addr = buffer->GetCpuAddr(); |
| 320 | TBuffer new_buffer = CreateBlock(cache_addr, new_size); | 344 | TBuffer new_buffer = CreateBlock(cpu_addr, new_size); |
| 321 | CopyBlock(buffer, new_buffer, 0, 0, old_size); | 345 | CopyBlock(buffer, new_buffer, 0, 0, old_size); |
| 322 | buffer->SetEpoch(epoch); | 346 | buffer->SetEpoch(epoch); |
| 323 | pending_destruction.push_back(buffer); | 347 | pending_destruction.push_back(buffer); |
| 324 | const CacheAddr cache_addr_end = cache_addr + new_size - 1; | 348 | const VAddr cpu_addr_end = cpu_addr + new_size - 1; |
| 325 | u64 page_start = cache_addr >> block_page_bits; | 349 | u64 page_start = cpu_addr >> block_page_bits; |
| 326 | const u64 page_end = cache_addr_end >> block_page_bits; | 350 | const u64 page_end = cpu_addr_end >> block_page_bits; |
| 327 | while (page_start <= page_end) { | 351 | while (page_start <= page_end) { |
| 328 | blocks[page_start] = new_buffer; | 352 | blocks[page_start] = new_buffer; |
| 329 | ++page_start; | 353 | ++page_start; |
| @@ -334,9 +358,9 @@ private: | |||
| 334 | TBuffer MergeBlocks(TBuffer first, TBuffer second) { | 358 | TBuffer MergeBlocks(TBuffer first, TBuffer second) { |
| 335 | const std::size_t size_1 = first->GetSize(); | 359 | const std::size_t size_1 = first->GetSize(); |
| 336 | const std::size_t size_2 = second->GetSize(); | 360 | const std::size_t size_2 = second->GetSize(); |
| 337 | const CacheAddr first_addr = first->GetCacheAddr(); | 361 | const VAddr first_addr = first->GetCpuAddr(); |
| 338 | const CacheAddr second_addr = second->GetCacheAddr(); | 362 | const VAddr second_addr = second->GetCpuAddr(); |
| 339 | const CacheAddr new_addr = std::min(first_addr, second_addr); | 363 | const VAddr new_addr = std::min(first_addr, second_addr); |
| 340 | const std::size_t new_size = size_1 + size_2; | 364 | const std::size_t new_size = size_1 + size_2; |
| 341 | TBuffer new_buffer = CreateBlock(new_addr, new_size); | 365 | TBuffer new_buffer = CreateBlock(new_addr, new_size); |
| 342 | CopyBlock(first, new_buffer, 0, new_buffer->GetOffset(first_addr), size_1); | 366 | CopyBlock(first, new_buffer, 0, new_buffer->GetOffset(first_addr), size_1); |
| @@ -345,9 +369,9 @@ private: | |||
| 345 | second->SetEpoch(epoch); | 369 | second->SetEpoch(epoch); |
| 346 | pending_destruction.push_back(first); | 370 | pending_destruction.push_back(first); |
| 347 | pending_destruction.push_back(second); | 371 | pending_destruction.push_back(second); |
| 348 | const CacheAddr cache_addr_end = new_addr + new_size - 1; | 372 | const VAddr cpu_addr_end = new_addr + new_size - 1; |
| 349 | u64 page_start = new_addr >> block_page_bits; | 373 | u64 page_start = new_addr >> block_page_bits; |
| 350 | const u64 page_end = cache_addr_end >> block_page_bits; | 374 | const u64 page_end = cpu_addr_end >> block_page_bits; |
| 351 | while (page_start <= page_end) { | 375 | while (page_start <= page_end) { |
| 352 | blocks[page_start] = new_buffer; | 376 | blocks[page_start] = new_buffer; |
| 353 | ++page_start; | 377 | ++page_start; |
| @@ -355,18 +379,18 @@ private: | |||
| 355 | return new_buffer; | 379 | return new_buffer; |
| 356 | } | 380 | } |
| 357 | 381 | ||
| 358 | TBuffer GetBlock(const CacheAddr cache_addr, const std::size_t size) { | 382 | TBuffer GetBlock(const VAddr cpu_addr, const std::size_t size) { |
| 359 | TBuffer found{}; | 383 | TBuffer found{}; |
| 360 | const CacheAddr cache_addr_end = cache_addr + size - 1; | 384 | const VAddr cpu_addr_end = cpu_addr + size - 1; |
| 361 | u64 page_start = cache_addr >> block_page_bits; | 385 | u64 page_start = cpu_addr >> block_page_bits; |
| 362 | const u64 page_end = cache_addr_end >> block_page_bits; | 386 | const u64 page_end = cpu_addr_end >> block_page_bits; |
| 363 | while (page_start <= page_end) { | 387 | while (page_start <= page_end) { |
| 364 | auto it = blocks.find(page_start); | 388 | auto it = blocks.find(page_start); |
| 365 | if (it == blocks.end()) { | 389 | if (it == blocks.end()) { |
| 366 | if (found) { | 390 | if (found) { |
| 367 | found = EnlargeBlock(found); | 391 | found = EnlargeBlock(found); |
| 368 | } else { | 392 | } else { |
| 369 | const CacheAddr start_addr = (page_start << block_page_bits); | 393 | const VAddr start_addr = (page_start << block_page_bits); |
| 370 | found = CreateBlock(start_addr, block_page_size); | 394 | found = CreateBlock(start_addr, block_page_size); |
| 371 | blocks[page_start] = found; | 395 | blocks[page_start] = found; |
| 372 | } | 396 | } |
| @@ -386,7 +410,7 @@ private: | |||
| 386 | return found; | 410 | return found; |
| 387 | } | 411 | } |
| 388 | 412 | ||
| 389 | void MarkRegionAsWritten(const CacheAddr start, const CacheAddr end) { | 413 | void MarkRegionAsWritten(const VAddr start, const VAddr end) { |
| 390 | u64 page_start = start >> write_page_bit; | 414 | u64 page_start = start >> write_page_bit; |
| 391 | const u64 page_end = end >> write_page_bit; | 415 | const u64 page_end = end >> write_page_bit; |
| 392 | while (page_start <= page_end) { | 416 | while (page_start <= page_end) { |
| @@ -400,7 +424,7 @@ private: | |||
| 400 | } | 424 | } |
| 401 | } | 425 | } |
| 402 | 426 | ||
| 403 | void UnmarkRegionAsWritten(const CacheAddr start, const CacheAddr end) { | 427 | void UnmarkRegionAsWritten(const VAddr start, const VAddr end) { |
| 404 | u64 page_start = start >> write_page_bit; | 428 | u64 page_start = start >> write_page_bit; |
| 405 | const u64 page_end = end >> write_page_bit; | 429 | const u64 page_end = end >> write_page_bit; |
| 406 | while (page_start <= page_end) { | 430 | while (page_start <= page_end) { |
| @@ -416,7 +440,7 @@ private: | |||
| 416 | } | 440 | } |
| 417 | } | 441 | } |
| 418 | 442 | ||
| 419 | bool IsRegionWritten(const CacheAddr start, const CacheAddr end) const { | 443 | bool IsRegionWritten(const VAddr start, const VAddr end) const { |
| 420 | u64 page_start = start >> write_page_bit; | 444 | u64 page_start = start >> write_page_bit; |
| 421 | const u64 page_end = end >> write_page_bit; | 445 | const u64 page_end = end >> write_page_bit; |
| 422 | while (page_start <= page_end) { | 446 | while (page_start <= page_end) { |
| @@ -440,8 +464,8 @@ private: | |||
| 440 | u64 buffer_offset = 0; | 464 | u64 buffer_offset = 0; |
| 441 | u64 buffer_offset_base = 0; | 465 | u64 buffer_offset_base = 0; |
| 442 | 466 | ||
| 443 | using IntervalSet = boost::icl::interval_set<CacheAddr>; | 467 | using IntervalSet = boost::icl::interval_set<VAddr>; |
| 444 | using IntervalCache = boost::icl::interval_map<CacheAddr, MapInterval>; | 468 | using IntervalCache = boost::icl::interval_map<VAddr, MapInterval>; |
| 445 | using IntervalType = typename IntervalCache::interval_type; | 469 | using IntervalType = typename IntervalCache::interval_type; |
| 446 | IntervalCache mapped_addresses; | 470 | IntervalCache mapped_addresses; |
| 447 | 471 | ||
| @@ -456,6 +480,8 @@ private: | |||
| 456 | u64 epoch = 0; | 480 | u64 epoch = 0; |
| 457 | u64 modified_ticks = 0; | 481 | u64 modified_ticks = 0; |
| 458 | 482 | ||
| 483 | std::vector<u8> staging_buffer; | ||
| 484 | |||
| 459 | std::recursive_mutex mutex; | 485 | std::recursive_mutex mutex; |
| 460 | }; | 486 | }; |
| 461 | 487 | ||
diff --git a/src/video_core/buffer_cache/map_interval.h b/src/video_core/buffer_cache/map_interval.h index 3a104d5cd..b0956029d 100644 --- a/src/video_core/buffer_cache/map_interval.h +++ b/src/video_core/buffer_cache/map_interval.h | |||
| @@ -11,7 +11,7 @@ namespace VideoCommon { | |||
| 11 | 11 | ||
| 12 | class MapIntervalBase { | 12 | class MapIntervalBase { |
| 13 | public: | 13 | public: |
| 14 | MapIntervalBase(const CacheAddr start, const CacheAddr end, const GPUVAddr gpu_addr) | 14 | MapIntervalBase(const VAddr start, const VAddr end, const GPUVAddr gpu_addr) |
| 15 | : start{start}, end{end}, gpu_addr{gpu_addr} {} | 15 | : start{start}, end{end}, gpu_addr{gpu_addr} {} |
| 16 | 16 | ||
| 17 | void SetCpuAddress(VAddr new_cpu_addr) { | 17 | void SetCpuAddress(VAddr new_cpu_addr) { |
| @@ -26,7 +26,7 @@ public: | |||
| 26 | return gpu_addr; | 26 | return gpu_addr; |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | bool IsInside(const CacheAddr other_start, const CacheAddr other_end) const { | 29 | bool IsInside(const VAddr other_start, const VAddr other_end) const { |
| 30 | return (start <= other_start && other_end <= end); | 30 | return (start <= other_start && other_end <= end); |
| 31 | } | 31 | } |
| 32 | 32 | ||
| @@ -46,11 +46,11 @@ public: | |||
| 46 | return is_registered; | 46 | return is_registered; |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | CacheAddr GetStart() const { | 49 | VAddr GetStart() const { |
| 50 | return start; | 50 | return start; |
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | CacheAddr GetEnd() const { | 53 | VAddr GetEnd() const { |
| 54 | return end; | 54 | return end; |
| 55 | } | 55 | } |
| 56 | 56 | ||
| @@ -76,8 +76,8 @@ public: | |||
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | private: | 78 | private: |
| 79 | CacheAddr start; | 79 | VAddr start; |
| 80 | CacheAddr end; | 80 | VAddr end; |
| 81 | GPUVAddr gpu_addr; | 81 | GPUVAddr gpu_addr; |
| 82 | VAddr cpu_addr{}; | 82 | VAddr cpu_addr{}; |
| 83 | bool is_written{}; | 83 | bool is_written{}; |
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 073bdb491..f4ec77a3d 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h | |||
| @@ -97,6 +97,11 @@ public: | |||
| 97 | void WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, std::size_t size); | 97 | void WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, std::size_t size); |
| 98 | void CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size); | 98 | void CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size); |
| 99 | 99 | ||
| 100 | static bool IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { | ||
| 101 | const std::size_t page = (gpu_addr & page_mask) + size; | ||
| 102 | return page <= page_size; | ||
| 103 | } | ||
| 104 | |||
| 100 | private: | 105 | private: |
| 101 | using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>; | 106 | using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>; |
| 102 | using VMAHandle = VMAMap::const_iterator; | 107 | using VMAHandle = VMAMap::const_iterator; |
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index 0375fca17..4eb37a96c 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp | |||
| @@ -21,8 +21,8 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs; | |||
| 21 | 21 | ||
| 22 | MICROPROFILE_DEFINE(OpenGL_Buffer_Download, "OpenGL", "Buffer Download", MP_RGB(192, 192, 128)); | 22 | MICROPROFILE_DEFINE(OpenGL_Buffer_Download, "OpenGL", "Buffer Download", MP_RGB(192, 192, 128)); |
| 23 | 23 | ||
| 24 | CachedBufferBlock::CachedBufferBlock(CacheAddr cache_addr, const std::size_t size) | 24 | CachedBufferBlock::CachedBufferBlock(VAddr cpu_addr, const std::size_t size) |
| 25 | : VideoCommon::BufferBlock{cache_addr, size} { | 25 | : VideoCommon::BufferBlock{cpu_addr, size} { |
| 26 | gl_buffer.Create(); | 26 | gl_buffer.Create(); |
| 27 | glNamedBufferData(gl_buffer.handle, static_cast<GLsizeiptr>(size), nullptr, GL_DYNAMIC_DRAW); | 27 | glNamedBufferData(gl_buffer.handle, static_cast<GLsizeiptr>(size), nullptr, GL_DYNAMIC_DRAW); |
| 28 | } | 28 | } |
| @@ -47,8 +47,8 @@ OGLBufferCache::~OGLBufferCache() { | |||
| 47 | glDeleteBuffers(static_cast<GLsizei>(std::size(cbufs)), std::data(cbufs)); | 47 | glDeleteBuffers(static_cast<GLsizei>(std::size(cbufs)), std::data(cbufs)); |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | Buffer OGLBufferCache::CreateBlock(CacheAddr cache_addr, std::size_t size) { | 50 | Buffer OGLBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) { |
| 51 | return std::make_shared<CachedBufferBlock>(cache_addr, size); | 51 | return std::make_shared<CachedBufferBlock>(cpu_addr, size); |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | void OGLBufferCache::WriteBarrier() { | 54 | void OGLBufferCache::WriteBarrier() { |
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h index 8c7145443..d94a11252 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.h +++ b/src/video_core/renderer_opengl/gl_buffer_cache.h | |||
| @@ -31,7 +31,7 @@ using GenericBufferCache = VideoCommon::BufferCache<Buffer, GLuint, OGLStreamBuf | |||
| 31 | 31 | ||
| 32 | class CachedBufferBlock : public VideoCommon::BufferBlock { | 32 | class CachedBufferBlock : public VideoCommon::BufferBlock { |
| 33 | public: | 33 | public: |
| 34 | explicit CachedBufferBlock(CacheAddr cache_addr, const std::size_t size); | 34 | explicit CachedBufferBlock(VAddr cpu_addr, const std::size_t size); |
| 35 | ~CachedBufferBlock(); | 35 | ~CachedBufferBlock(); |
| 36 | 36 | ||
| 37 | const GLuint* GetHandle() const { | 37 | const GLuint* GetHandle() const { |
| @@ -55,7 +55,7 @@ public: | |||
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | protected: | 57 | protected: |
| 58 | Buffer CreateBlock(CacheAddr cache_addr, std::size_t size) override; | 58 | Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; |
| 59 | 59 | ||
| 60 | void WriteBarrier() override; | 60 | void WriteBarrier() override; |
| 61 | 61 | ||
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index a25e7486d..cb4928bbe 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp | |||
| @@ -663,7 +663,7 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) { | |||
| 663 | } | 663 | } |
| 664 | CacheAddr cache_addr = ToCacheAddr(system.Memory().GetPointer(addr)); | 664 | CacheAddr cache_addr = ToCacheAddr(system.Memory().GetPointer(addr)); |
| 665 | texture_cache.FlushRegion(addr, size); | 665 | texture_cache.FlushRegion(addr, size); |
| 666 | buffer_cache.FlushRegion(cache_addr, size); | 666 | buffer_cache.FlushRegion(addr, size); |
| 667 | query_cache.FlushRegion(cache_addr, size); | 667 | query_cache.FlushRegion(cache_addr, size); |
| 668 | } | 668 | } |
| 669 | 669 | ||
| @@ -675,7 +675,7 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) { | |||
| 675 | CacheAddr cache_addr = ToCacheAddr(system.Memory().GetPointer(addr)); | 675 | CacheAddr cache_addr = ToCacheAddr(system.Memory().GetPointer(addr)); |
| 676 | texture_cache.InvalidateRegion(addr, size); | 676 | texture_cache.InvalidateRegion(addr, size); |
| 677 | shader_cache.InvalidateRegion(cache_addr, size); | 677 | shader_cache.InvalidateRegion(cache_addr, size); |
| 678 | buffer_cache.InvalidateRegion(cache_addr, size); | 678 | buffer_cache.InvalidateRegion(addr, size); |
| 679 | query_cache.InvalidateRegion(cache_addr, size); | 679 | query_cache.InvalidateRegion(cache_addr, size); |
| 680 | } | 680 | } |
| 681 | 681 | ||
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index 1ba544943..326d74f29 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp | |||
| @@ -42,8 +42,8 @@ auto CreateStreamBuffer(const VKDevice& device, VKScheduler& scheduler) { | |||
| 42 | } // Anonymous namespace | 42 | } // Anonymous namespace |
| 43 | 43 | ||
| 44 | CachedBufferBlock::CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager, | 44 | CachedBufferBlock::CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager, |
| 45 | CacheAddr cache_addr, std::size_t size) | 45 | VAddr cpu_addr, std::size_t size) |
| 46 | : VideoCommon::BufferBlock{cache_addr, size} { | 46 | : VideoCommon::BufferBlock{cpu_addr, size} { |
| 47 | const vk::BufferCreateInfo buffer_ci({}, static_cast<vk::DeviceSize>(size), | 47 | const vk::BufferCreateInfo buffer_ci({}, static_cast<vk::DeviceSize>(size), |
| 48 | BufferUsage | vk::BufferUsageFlagBits::eTransferSrc | | 48 | BufferUsage | vk::BufferUsageFlagBits::eTransferSrc | |
| 49 | vk::BufferUsageFlagBits::eTransferDst, | 49 | vk::BufferUsageFlagBits::eTransferDst, |
| @@ -68,8 +68,8 @@ VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::S | |||
| 68 | 68 | ||
| 69 | VKBufferCache::~VKBufferCache() = default; | 69 | VKBufferCache::~VKBufferCache() = default; |
| 70 | 70 | ||
| 71 | Buffer VKBufferCache::CreateBlock(CacheAddr cache_addr, std::size_t size) { | 71 | Buffer VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) { |
| 72 | return std::make_shared<CachedBufferBlock>(device, memory_manager, cache_addr, size); | 72 | return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size); |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | const vk::Buffer* VKBufferCache::ToHandle(const Buffer& buffer) { | 75 | const vk::Buffer* VKBufferCache::ToHandle(const Buffer& buffer) { |
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h index 3f38eed0c..508214618 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.h +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h | |||
| @@ -30,7 +30,7 @@ class VKScheduler; | |||
| 30 | class CachedBufferBlock final : public VideoCommon::BufferBlock { | 30 | class CachedBufferBlock final : public VideoCommon::BufferBlock { |
| 31 | public: | 31 | public: |
| 32 | explicit CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager, | 32 | explicit CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager, |
| 33 | CacheAddr cache_addr, std::size_t size); | 33 | VAddr cpu_addr, std::size_t size); |
| 34 | ~CachedBufferBlock(); | 34 | ~CachedBufferBlock(); |
| 35 | 35 | ||
| 36 | const vk::Buffer* GetHandle() const { | 36 | const vk::Buffer* GetHandle() const { |
| @@ -55,7 +55,7 @@ public: | |||
| 55 | protected: | 55 | protected: |
| 56 | void WriteBarrier() override {} | 56 | void WriteBarrier() override {} |
| 57 | 57 | ||
| 58 | Buffer CreateBlock(CacheAddr cache_addr, std::size_t size) override; | 58 | Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; |
| 59 | 59 | ||
| 60 | const vk::Buffer* ToHandle(const Buffer& buffer) override; | 60 | const vk::Buffer* ToHandle(const Buffer& buffer) override; |
| 61 | 61 | ||
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index cc76d96ea..b6ba5de12 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp | |||
| @@ -501,7 +501,7 @@ void RasterizerVulkan::FlushRegion(VAddr addr, u64 size) { | |||
| 501 | } | 501 | } |
| 502 | CacheAddr cache_addr = ToCacheAddr(system.Memory().GetPointer(addr)); | 502 | CacheAddr cache_addr = ToCacheAddr(system.Memory().GetPointer(addr)); |
| 503 | texture_cache.FlushRegion(addr, size); | 503 | texture_cache.FlushRegion(addr, size); |
| 504 | buffer_cache.FlushRegion(cache_addr, size); | 504 | buffer_cache.FlushRegion(addr, size); |
| 505 | query_cache.FlushRegion(cache_addr, size); | 505 | query_cache.FlushRegion(cache_addr, size); |
| 506 | } | 506 | } |
| 507 | 507 | ||
| @@ -512,7 +512,7 @@ void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) { | |||
| 512 | CacheAddr cache_addr = ToCacheAddr(system.Memory().GetPointer(addr)); | 512 | CacheAddr cache_addr = ToCacheAddr(system.Memory().GetPointer(addr)); |
| 513 | texture_cache.InvalidateRegion(addr, size); | 513 | texture_cache.InvalidateRegion(addr, size); |
| 514 | pipeline_cache.InvalidateRegion(cache_addr, size); | 514 | pipeline_cache.InvalidateRegion(cache_addr, size); |
| 515 | buffer_cache.InvalidateRegion(cache_addr, size); | 515 | buffer_cache.InvalidateRegion(addr, size); |
| 516 | query_cache.InvalidateRegion(cache_addr, size); | 516 | query_cache.InvalidateRegion(cache_addr, size); |
| 517 | } | 517 | } |
| 518 | 518 | ||