summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2022-02-05 18:15:26 +0100
committerGravatar Fernando Sahmkow2022-10-06 21:00:52 +0200
commit4d60410dd979fb688de7735d2b4b25a557bdeac7 (patch)
treeb472c870acc010ef938f8d0d478d5e511f94aa11 /src/core
parentVulkan: Fix Scissor on Clears (diff)
downloadyuzu-4d60410dd979fb688de7735d2b4b25a557bdeac7.tar.gz
yuzu-4d60410dd979fb688de7735d2b4b25a557bdeac7.tar.xz
yuzu-4d60410dd979fb688de7735d2b4b25a557bdeac7.zip
MemoryManager: initial multi paging system implementation.
Diffstat (limited to 'src/core')
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp45
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h1
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp10
3 files changed, 36 insertions, 20 deletions
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index db2a6c3b2..d95a88393 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -133,7 +133,8 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>&
133 const u64 end_big_pages{(vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits}; 133 const u64 end_big_pages{(vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits};
134 vm.big_page_allocator = std::make_unique<VM::Allocator>(start_big_pages, end_big_pages); 134 vm.big_page_allocator = std::make_unique<VM::Allocator>(start_big_pages, end_big_pages);
135 135
136 gmmu = std::make_shared<Tegra::MemoryManager>(system, 40, VM::PAGE_SIZE_BITS); 136 gmmu = std::make_shared<Tegra::MemoryManager>(system, 40, vm.big_page_size_bits,
137 VM::PAGE_SIZE_BITS);
137 system.GPU().InitAddressSpace(*gmmu); 138 system.GPU().InitAddressSpace(*gmmu);
138 vm.initialised = true; 139 vm.initialised = true;
139 140
@@ -189,6 +190,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<
189 .size = size, 190 .size = size,
190 .page_size = params.page_size, 191 .page_size = params.page_size,
191 .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None, 192 .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None,
193 .big_pages = params.page_size != VM::YUZU_PAGESIZE,
192 }; 194 };
193 195
194 std::memcpy(output.data(), &params, output.size()); 196 std::memcpy(output.data(), &params, output.size());
@@ -209,7 +211,7 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
209 // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state 211 // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
210 // Only FreeSpace can unmap them fully 212 // Only FreeSpace can unmap them fully
211 if (mapping->sparse_alloc) 213 if (mapping->sparse_alloc)
212 gmmu->MapSparse(offset, mapping->size); 214 gmmu->MapSparse(offset, mapping->size, mapping->big_page);
213 else 215 else
214 gmmu->Unmap(offset, mapping->size); 216 gmmu->Unmap(offset, mapping->size);
215 217
@@ -294,8 +296,9 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
294 return NvResult::BadValue; 296 return NvResult::BadValue;
295 } 297 }
296 298
299 const bool use_big_pages = alloc->second.big_pages;
297 if (!entry.handle) { 300 if (!entry.handle) {
298 gmmu->MapSparse(virtual_address, size); 301 gmmu->MapSparse(virtual_address, size, use_big_pages);
299 } else { 302 } else {
300 auto handle{nvmap.GetHandle(entry.handle)}; 303 auto handle{nvmap.GetHandle(entry.handle)};
301 if (!handle) { 304 if (!handle) {
@@ -306,7 +309,7 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
306 handle->address + 309 handle->address +
307 (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; 310 (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
308 311
309 gmmu->Map(virtual_address, cpu_address, size); 312 gmmu->Map(virtual_address, cpu_address, size, use_big_pages);
310 } 313 }
311 } 314 }
312 315
@@ -345,7 +348,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
345 u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)}; 348 u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
346 VAddr cpu_address{mapping->ptr + params.buffer_offset}; 349 VAddr cpu_address{mapping->ptr + params.buffer_offset};
347 350
348 gmmu->Map(gpu_address, cpu_address, params.mapping_size); 351 gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page);
349 352
350 return NvResult::Success; 353 return NvResult::Success;
351 } catch ([[maybe_unused]] const std::out_of_range& e) { 354 } catch ([[maybe_unused]] const std::out_of_range& e) {
@@ -363,6 +366,17 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
363 VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)}; 366 VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)};
364 u64 size{params.mapping_size ? params.mapping_size : handle->orig_size}; 367 u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
365 368
369 bool big_page{[&]() {
370 if (Common::IsAligned(handle->align, vm.big_page_size))
371 return true;
372 else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE))
373 return false;
374 else {
375 UNREACHABLE();
376 return false;
377 }
378 }()};
379
366 if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) { 380 if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) {
367 auto alloc{allocation_map.upper_bound(params.offset)}; 381 auto alloc{allocation_map.upper_bound(params.offset)};
368 382
@@ -372,23 +386,14 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
372 return NvResult::BadValue; 386 return NvResult::BadValue;
373 } 387 }
374 388
375 gmmu->Map(params.offset, cpu_address, size); 389 const bool use_big_pages = alloc->second.big_pages && big_page;
390 gmmu->Map(params.offset, cpu_address, size, use_big_pages);
376 391
377 auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true, false, 392 auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
378 alloc->second.sparse)}; 393 use_big_pages, alloc->second.sparse)};
379 alloc->second.mappings.push_back(mapping); 394 alloc->second.mappings.push_back(mapping);
380 mapping_map[params.offset] = mapping; 395 mapping_map[params.offset] = mapping;
381 } else { 396 } else {
382 bool big_page{[&]() {
383 if (Common::IsAligned(handle->align, vm.big_page_size))
384 return true;
385 else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE))
386 return false;
387 else {
388 UNREACHABLE();
389 return false;
390 }
391 }()};
392 397
393 auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; 398 auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
394 u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE}; 399 u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
@@ -402,7 +407,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
402 return NvResult::InsufficientMemory; 407 return NvResult::InsufficientMemory;
403 } 408 }
404 409
405 gmmu->Map(params.offset, cpu_address, size); 410 gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), big_page);
406 411
407 auto mapping{ 412 auto mapping{
408 std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)}; 413 std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
@@ -439,7 +444,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8
439 // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state 444 // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
440 // Only FreeSpace can unmap them fully 445 // Only FreeSpace can unmap them fully
441 if (mapping->sparse_alloc) { 446 if (mapping->sparse_alloc) {
442 gmmu->MapSparse(params.offset, mapping->size); 447 gmmu->MapSparse(params.offset, mapping->size, mapping->big_page);
443 } else { 448 } else {
444 gmmu->Unmap(params.offset, mapping->size); 449 gmmu->Unmap(params.offset, mapping->size);
445 } 450 }
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
index 1d27739e2..12e881f0d 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
@@ -177,6 +177,7 @@ private:
177 std::list<std::shared_ptr<Mapping>> mappings; 177 std::list<std::shared_ptr<Mapping>> mappings;
178 u32 page_size; 178 u32 page_size;
179 bool sparse; 179 bool sparse;
180 bool big_pages;
180 }; 181 };
181 182
182 std::map<u64, std::shared_ptr<Mapping>> 183 std::map<u64, std::shared_ptr<Mapping>>
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index 279997e81..992c117f1 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -9,6 +9,8 @@
9#include "common/assert.h" 9#include "common/assert.h"
10#include "common/logging/log.h" 10#include "common/logging/log.h"
11#include "core/core.h" 11#include "core/core.h"
12#include "core/hle/kernel/k_page_table.h"
13#include "core/hle/kernel/k_process.h"
12#include "core/hle/service/nvdrv/core/container.h" 14#include "core/hle/service/nvdrv/core/container.h"
13#include "core/hle/service/nvdrv/core/nvmap.h" 15#include "core/hle/service/nvdrv/core/nvmap.h"
14#include "core/hle/service/nvdrv/devices/nvmap.h" 16#include "core/hle/service/nvdrv/devices/nvmap.h"
@@ -136,6 +138,10 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
136 LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); 138 LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
137 return result; 139 return result;
138 } 140 }
141 ASSERT(system.CurrentProcess()
142 ->PageTable()
143 .LockForDeviceAddressSpace(handle_description->address, handle_description->size)
144 .IsSuccess());
139 std::memcpy(output.data(), &params, sizeof(params)); 145 std::memcpy(output.data(), &params, sizeof(params));
140 return result; 146 return result;
141} 147}
@@ -256,6 +262,10 @@ NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
256 } 262 }
257 263
258 if (auto freeInfo{file.FreeHandle(params.handle, false)}) { 264 if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
265 ASSERT(system.CurrentProcess()
266 ->PageTable()
267 .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
268 .IsSuccess());
259 params.address = freeInfo->address; 269 params.address = freeInfo->address;
260 params.size = static_cast<u32>(freeInfo->size); 270 params.size = static_cast<u32>(freeInfo->size);
261 params.flags.raw = 0; 271 params.flags.raw = 0;