summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
authorGravatar Fernando S2023-12-24 16:23:14 +0100
committerGravatar GitHub2023-12-24 16:23:14 +0100
commit05e3db3ac9edbff0e4885ef8b42d3a2427c9f027 (patch)
tree2f959b67638ab1134cfca19ac1f041552a68c335 /src/core/memory.cpp
parentMerge pull request #12412 from ameerj/gl-query-prims (diff)
parentkernel: fix resource limit imbalance (diff)
downloadyuzu-05e3db3ac9edbff0e4885ef8b42d3a2427c9f027.tar.gz
yuzu-05e3db3ac9edbff0e4885ef8b42d3a2427c9f027.tar.xz
yuzu-05e3db3ac9edbff0e4885ef8b42d3a2427c9f027.zip
Merge pull request #12394 from liamwhite/per-process-memory
general: properly support multiple memory instances
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp28
1 files changed, 14 insertions, 14 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 169bf4c8c..c7eb32c19 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -45,7 +45,13 @@ struct Memory::Impl {
45 45
46 void SetCurrentPageTable(Kernel::KProcess& process) { 46 void SetCurrentPageTable(Kernel::KProcess& process) {
47 current_page_table = &process.GetPageTable().GetImpl(); 47 current_page_table = &process.GetPageTable().GetImpl();
48 current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer(); 48
49 if (std::addressof(process) == system.ApplicationProcess() &&
50 Settings::IsFastmemEnabled()) {
51 current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
52 } else {
53 current_page_table->fastmem_arena = nullptr;
54 }
49 } 55 }
50 56
51 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, 57 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
@@ -57,7 +63,7 @@ struct Memory::Impl {
57 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target, 63 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
58 Common::PageType::Memory); 64 Common::PageType::Memory);
59 65
60 if (Settings::IsFastmemEnabled()) { 66 if (current_page_table->fastmem_arena) {
61 system.DeviceMemory().buffer.Map(GetInteger(base), 67 system.DeviceMemory().buffer.Map(GetInteger(base),
62 GetInteger(target) - DramMemoryMap::Base, size, perms); 68 GetInteger(target) - DramMemoryMap::Base, size, perms);
63 } 69 }
@@ -69,7 +75,7 @@ struct Memory::Impl {
69 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, 75 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
70 Common::PageType::Unmapped); 76 Common::PageType::Unmapped);
71 77
72 if (Settings::IsFastmemEnabled()) { 78 if (current_page_table->fastmem_arena) {
73 system.DeviceMemory().buffer.Unmap(GetInteger(base), size); 79 system.DeviceMemory().buffer.Unmap(GetInteger(base), size);
74 } 80 }
75 } 81 }
@@ -79,7 +85,7 @@ struct Memory::Impl {
79 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 85 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
80 ASSERT_MSG((vaddr & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", vaddr); 86 ASSERT_MSG((vaddr & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", vaddr);
81 87
82 if (!Settings::IsFastmemEnabled()) { 88 if (!current_page_table->fastmem_arena) {
83 return; 89 return;
84 } 90 }
85 91
@@ -88,11 +94,6 @@ struct Memory::Impl {
88 const bool is_x = 94 const bool is_x =
89 True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled(); 95 True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled();
90 96
91 if (!current_page_table) {
92 system.DeviceMemory().buffer.Protect(vaddr, size, is_r, is_w, is_x);
93 return;
94 }
95
96 u64 protect_bytes{}; 97 u64 protect_bytes{};
97 u64 protect_begin{}; 98 u64 protect_begin{};
98 for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) { 99 for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) {
@@ -239,7 +240,7 @@ struct Memory::Impl {
239 240
240 bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped, 241 bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped,
241 auto on_memory, auto on_rasterizer, auto increment) { 242 auto on_memory, auto on_rasterizer, auto increment) {
242 const auto& page_table = system.ApplicationProcess()->GetPageTable().GetImpl(); 243 const auto& page_table = *current_page_table;
243 std::size_t remaining_size = size; 244 std::size_t remaining_size = size;
244 std::size_t page_index = addr >> YUZU_PAGEBITS; 245 std::size_t page_index = addr >> YUZU_PAGEBITS;
245 std::size_t page_offset = addr & YUZU_PAGEMASK; 246 std::size_t page_offset = addr & YUZU_PAGEMASK;
@@ -484,7 +485,7 @@ struct Memory::Impl {
484 return; 485 return;
485 } 486 }
486 487
487 if (Settings::IsFastmemEnabled()) { 488 if (current_page_table->fastmem_arena) {
488 system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug); 489 system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug);
489 } 490 }
490 491
@@ -541,7 +542,7 @@ struct Memory::Impl {
541 return; 542 return;
542 } 543 }
543 544
544 if (Settings::IsFastmemEnabled()) { 545 if (current_page_table->fastmem_arena) {
545 const bool is_read_enable = 546 const bool is_read_enable =
546 !Settings::values.use_reactive_flushing.GetValue() || !cached; 547 !Settings::values.use_reactive_flushing.GetValue() || !cached;
547 system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached); 548 system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
@@ -886,8 +887,7 @@ void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress
886} 887}
887 888
888bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const { 889bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
889 const Kernel::KProcess& process = *system.ApplicationProcess(); 890 const auto& page_table = *impl->current_page_table;
890 const auto& page_table = process.GetPageTable().GetImpl();
891 const size_t page = vaddr >> YUZU_PAGEBITS; 891 const size_t page = vaddr >> YUZU_PAGEBITS;
892 if (page >= page_table.pointers.size()) { 892 if (page >= page_table.pointers.size()) {
893 return false; 893 return false;