summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Lioncash2018-08-28 10:57:56 -0400
committerGravatar Lioncash2018-08-28 11:11:50 -0400
commit45fb74d2623182b38af422bc6c8a51040860143f (patch)
treead65e21b3984d876241fc478d7624abfceb55e86 /src/core
parentMerge pull request #1165 from bunnei/shader-cache (diff)
downloadyuzu-45fb74d2623182b38af422bc6c8a51040860143f.tar.gz
yuzu-45fb74d2623182b38af422bc6c8a51040860143f.tar.xz
yuzu-45fb74d2623182b38af422bc6c8a51040860143f.zip
gpu: Make memory_manager private
Makes the class interface consistent and provides accessors for obtaining a reference to the memory manager instance. Given we also return references, this makes our more flimsy uses of const apparent, given const doesn't propagate through pointers in the way one would typically expect. This makes our mutable state more apparent in some places.
Diffstat (limited to 'src/core')
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp12
-rw-r--r--src/core/memory.cpp4
2 files changed, 8 insertions, 8 deletions
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index be2b79256..75487c4e8 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -56,9 +56,9 @@ u32 nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>&
56 auto& gpu = Core::System::GetInstance().GPU(); 56 auto& gpu = Core::System::GetInstance().GPU();
57 const u64 size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)}; 57 const u64 size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
58 if (params.flags & 1) { 58 if (params.flags & 1) {
59 params.offset = gpu.memory_manager->AllocateSpace(params.offset, size, 1); 59 params.offset = gpu.MemoryManager().AllocateSpace(params.offset, size, 1);
60 } else { 60 } else {
61 params.offset = gpu.memory_manager->AllocateSpace(size, params.align); 61 params.offset = gpu.MemoryManager().AllocateSpace(size, params.align);
62 } 62 }
63 63
64 std::memcpy(output.data(), &params, output.size()); 64 std::memcpy(output.data(), &params, output.size());
@@ -88,7 +88,7 @@ u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output)
88 u64 size = static_cast<u64>(entry.pages) << 0x10; 88 u64 size = static_cast<u64>(entry.pages) << 0x10;
89 ASSERT(size <= object->size); 89 ASSERT(size <= object->size);
90 90
91 Tegra::GPUVAddr returned = gpu.memory_manager->MapBufferEx(object->addr, offset, size); 91 Tegra::GPUVAddr returned = gpu.MemoryManager().MapBufferEx(object->addr, offset, size);
92 ASSERT(returned == offset); 92 ASSERT(returned == offset);
93 } 93 }
94 std::memcpy(output.data(), entries.data(), output.size()); 94 std::memcpy(output.data(), entries.data(), output.size());
@@ -125,9 +125,9 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
125 auto& gpu = Core::System::GetInstance().GPU(); 125 auto& gpu = Core::System::GetInstance().GPU();
126 126
127 if (params.flags & 1) { 127 if (params.flags & 1) {
128 params.offset = gpu.memory_manager->MapBufferEx(object->addr, params.offset, object->size); 128 params.offset = gpu.MemoryManager().MapBufferEx(object->addr, params.offset, object->size);
129 } else { 129 } else {
130 params.offset = gpu.memory_manager->MapBufferEx(object->addr, object->size); 130 params.offset = gpu.MemoryManager().MapBufferEx(object->addr, object->size);
131 } 131 }
132 132
133 // Create a new mapping entry for this operation. 133 // Create a new mapping entry for this operation.
@@ -161,7 +161,7 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou
161 itr->second.size); 161 itr->second.size);
162 162
163 auto& gpu = system_instance.GPU(); 163 auto& gpu = system_instance.GPU();
164 params.offset = gpu.memory_manager->UnmapBuffer(params.offset, itr->second.size); 164 params.offset = gpu.MemoryManager().UnmapBuffer(params.offset, itr->second.size);
165 165
166 buffer_mappings.erase(itr->second.offset); 166 buffer_mappings.erase(itr->second.offset);
167 167
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 1133bcbaf..bc34bfd6d 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -264,7 +264,7 @@ void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached)
264 u64 num_pages = ((gpu_addr + size - 1) >> PAGE_BITS) - (gpu_addr >> PAGE_BITS) + 1; 264 u64 num_pages = ((gpu_addr + size - 1) >> PAGE_BITS) - (gpu_addr >> PAGE_BITS) + 1;
265 for (unsigned i = 0; i < num_pages; ++i, gpu_addr += PAGE_SIZE) { 265 for (unsigned i = 0; i < num_pages; ++i, gpu_addr += PAGE_SIZE) {
266 boost::optional<VAddr> maybe_vaddr = 266 boost::optional<VAddr> maybe_vaddr =
267 Core::System::GetInstance().GPU().memory_manager->GpuToCpuAddress(gpu_addr); 267 Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
268 // The GPU <-> CPU virtual memory mapping is not 1:1 268 // The GPU <-> CPU virtual memory mapping is not 1:1
269 if (!maybe_vaddr) { 269 if (!maybe_vaddr) {
270 LOG_ERROR(HW_Memory, 270 LOG_ERROR(HW_Memory,
@@ -346,7 +346,7 @@ void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
346 const VAddr overlap_end = std::min(end, region_end); 346 const VAddr overlap_end = std::min(end, region_end);
347 347
348 const std::vector<Tegra::GPUVAddr> gpu_addresses = 348 const std::vector<Tegra::GPUVAddr> gpu_addresses =
349 system_instance.GPU().memory_manager->CpuToGpuAddress(overlap_start); 349 system_instance.GPU().MemoryManager().CpuToGpuAddress(overlap_start);
350 350
351 if (gpu_addresses.empty()) { 351 if (gpu_addresses.empty()) {
352 return; 352 return;