diff options
| author | 2019-05-14 06:40:33 -0400 | |
|---|---|---|
| committer | 2019-05-14 06:40:33 -0400 | |
| commit | c4d549919fbcae062d7bfc6ecb4162fdbcf858f4 (patch) | |
| tree | 1077cf9d4fff4ee0dc63c4d2006c712d42afa006 | |
| parent | Merge pull request #2461 from lioncash/unused-var (diff) | |
| parent | video_core/memory_manager: Mark IsBlockContinuous() as a const member function (diff) | |
| download | yuzu-c4d549919fbcae062d7bfc6ecb4162fdbcf858f4.tar.gz yuzu-c4d549919fbcae062d7bfc6ecb4162fdbcf858f4.tar.xz yuzu-c4d549919fbcae062d7bfc6ecb4162fdbcf858f4.zip | |
Merge pull request #2462 from lioncash/video-mm
video_core/memory_manager: Minor tidying
| -rw-r--r-- | src/video_core/memory_manager.cpp | 6 | ||||
| -rw-r--r-- | src/video_core/memory_manager.h | 31 |
2 files changed, 20 insertions, 17 deletions
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 6c98c6701..5d8d126c1 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp | |||
| @@ -25,6 +25,8 @@ MemoryManager::MemoryManager(VideoCore::RasterizerInterface& rasterizer) : raste | |||
| 25 | UpdatePageTableForVMA(initial_vma); | 25 | UpdatePageTableForVMA(initial_vma); |
| 26 | } | 26 | } |
| 27 | 27 | ||
| 28 | MemoryManager::~MemoryManager() = default; | ||
| 29 | |||
| 28 | GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) { | 30 | GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) { |
| 29 | const u64 aligned_size{Common::AlignUp(size, page_size)}; | 31 | const u64 aligned_size{Common::AlignUp(size, page_size)}; |
| 30 | const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)}; | 32 | const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)}; |
| @@ -199,11 +201,11 @@ const u8* MemoryManager::GetPointer(GPUVAddr addr) const { | |||
| 199 | return {}; | 201 | return {}; |
| 200 | } | 202 | } |
| 201 | 203 | ||
| 202 | bool MemoryManager::IsBlockContinous(const GPUVAddr start, const std::size_t size) { | 204 | bool MemoryManager::IsBlockContinuous(const GPUVAddr start, const std::size_t size) const { |
| 203 | const GPUVAddr end = start + size; | 205 | const GPUVAddr end = start + size; |
| 204 | const auto host_ptr_start = reinterpret_cast<std::uintptr_t>(GetPointer(start)); | 206 | const auto host_ptr_start = reinterpret_cast<std::uintptr_t>(GetPointer(start)); |
| 205 | const auto host_ptr_end = reinterpret_cast<std::uintptr_t>(GetPointer(end)); | 207 | const auto host_ptr_end = reinterpret_cast<std::uintptr_t>(GetPointer(end)); |
| 206 | const std::size_t range = static_cast<std::size_t>(host_ptr_end - host_ptr_start); | 208 | const auto range = static_cast<std::size_t>(host_ptr_end - host_ptr_start); |
| 207 | return range == size; | 209 | return range == size; |
| 208 | } | 210 | } |
| 209 | 211 | ||
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index e4f0c4bd6..113f9d8f3 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h | |||
| @@ -47,7 +47,8 @@ struct VirtualMemoryArea { | |||
| 47 | 47 | ||
| 48 | class MemoryManager final { | 48 | class MemoryManager final { |
| 49 | public: | 49 | public: |
| 50 | MemoryManager(VideoCore::RasterizerInterface& rasterizer); | 50 | explicit MemoryManager(VideoCore::RasterizerInterface& rasterizer); |
| 51 | ~MemoryManager(); | ||
| 51 | 52 | ||
| 52 | GPUVAddr AllocateSpace(u64 size, u64 align); | 53 | GPUVAddr AllocateSpace(u64 size, u64 align); |
| 53 | GPUVAddr AllocateSpace(GPUVAddr addr, u64 size, u64 align); | 54 | GPUVAddr AllocateSpace(GPUVAddr addr, u64 size, u64 align); |
| @@ -65,18 +66,18 @@ public: | |||
| 65 | u8* GetPointer(GPUVAddr addr); | 66 | u8* GetPointer(GPUVAddr addr); |
| 66 | const u8* GetPointer(GPUVAddr addr) const; | 67 | const u8* GetPointer(GPUVAddr addr) const; |
| 67 | 68 | ||
| 68 | // Returns true if the block is continous in host memory, false otherwise | 69 | /// Returns true if the block is continuous in host memory, false otherwise |
| 69 | bool IsBlockContinous(const GPUVAddr start, const std::size_t size); | 70 | bool IsBlockContinuous(GPUVAddr start, std::size_t size) const; |
| 70 | 71 | ||
| 71 | /** | 72 | /** |
| 72 | * ReadBlock and WriteBlock are full read and write operations over virtual | 73 | * ReadBlock and WriteBlock are full read and write operations over virtual |
| 73 | * GPU Memory. It's important to use these when GPU memory may not be continous | 74 | * GPU Memory. It's important to use these when GPU memory may not be continuous |
| 74 | * in the Host Memory counterpart. Note: This functions cause Host GPU Memory | 75 | * in the Host Memory counterpart. Note: This functions cause Host GPU Memory |
| 75 | * Flushes and Invalidations, respectively to each operation. | 76 | * Flushes and Invalidations, respectively to each operation. |
| 76 | */ | 77 | */ |
| 77 | void ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::size_t size) const; | 78 | void ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size) const; |
| 78 | void WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const std::size_t size); | 79 | void WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::size_t size); |
| 79 | void CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size); | 80 | void CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size); |
| 80 | 81 | ||
| 81 | /** | 82 | /** |
| 82 | * ReadBlockUnsafe and WriteBlockUnsafe are special versions of ReadBlock and | 83 | * ReadBlockUnsafe and WriteBlockUnsafe are special versions of ReadBlock and |
| @@ -88,9 +89,9 @@ public: | |||
| 88 | * WriteBlockUnsafe instead of WriteBlock since it shouldn't invalidate the texture | 89 | * WriteBlockUnsafe instead of WriteBlock since it shouldn't invalidate the texture |
| 89 | * being flushed. | 90 | * being flushed. |
| 90 | */ | 91 | */ |
| 91 | void ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer, const std::size_t size) const; | 92 | void ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer, std::size_t size) const; |
| 92 | void WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, const std::size_t size); | 93 | void WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, std::size_t size); |
| 93 | void CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size); | 94 | void CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size); |
| 94 | 95 | ||
| 95 | private: | 96 | private: |
| 96 | using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>; | 97 | using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>; |
| @@ -111,10 +112,10 @@ private: | |||
| 111 | /** | 112 | /** |
| 112 | * Maps an unmanaged host memory pointer at a given address. | 113 | * Maps an unmanaged host memory pointer at a given address. |
| 113 | * | 114 | * |
| 114 | * @param target The guest address to start the mapping at. | 115 | * @param target The guest address to start the mapping at. |
| 115 | * @param memory The memory to be mapped. | 116 | * @param memory The memory to be mapped. |
| 116 | * @param size Size of the mapping. | 117 | * @param size Size of the mapping in bytes. |
| 117 | * @param state MemoryState tag to attach to the VMA. | 118 | * @param backing_addr The base address of the range to back this mapping. |
| 118 | */ | 119 | */ |
| 119 | VMAHandle MapBackingMemory(GPUVAddr target, u8* memory, u64 size, VAddr backing_addr); | 120 | VMAHandle MapBackingMemory(GPUVAddr target, u8* memory, u64 size, VAddr backing_addr); |
| 120 | 121 | ||
| @@ -124,7 +125,7 @@ private: | |||
| 124 | /// Converts a VMAHandle to a mutable VMAIter. | 125 | /// Converts a VMAHandle to a mutable VMAIter. |
| 125 | VMAIter StripIterConstness(const VMAHandle& iter); | 126 | VMAIter StripIterConstness(const VMAHandle& iter); |
| 126 | 127 | ||
| 127 | /// Marks as the specfied VMA as allocated. | 128 | /// Marks as the specified VMA as allocated. |
| 128 | VMAIter Allocate(VMAIter vma); | 129 | VMAIter Allocate(VMAIter vma); |
| 129 | 130 | ||
| 130 | /** | 131 | /** |