summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/common/page_table.cpp30
-rw-r--r--src/common/page_table.h17
-rw-r--r--src/core/CMakeLists.txt6
-rw-r--r--src/core/debugger/gdbstub.cpp102
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp13
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.h7
-rw-r--r--src/core/hle/kernel/k_capabilities.cpp36
-rw-r--r--src/core/hle/kernel/k_capabilities.h17
-rw-r--r--src/core/hle/kernel/k_device_address_space.cpp4
-rw-r--r--src/core/hle/kernel/k_device_address_space.h10
-rw-r--r--src/core/hle/kernel/k_memory_layout.h8
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp12
-rw-r--r--src/core/hle/kernel/k_page_table.cpp3519
-rw-r--r--src/core/hle/kernel/k_page_table.h542
-rw-r--r--src/core/hle/kernel/k_page_table_base.cpp5718
-rw-r--r--src/core/hle/kernel/k_page_table_base.h759
-rw-r--r--src/core/hle/kernel/k_process.cpp18
-rw-r--r--src/core/hle/kernel/k_process.h14
-rw-r--r--src/core/hle/kernel/k_process_page_table.h480
-rw-r--r--src/core/hle/kernel/k_server_session.cpp2
-rw-r--r--src/core/hle/kernel/k_system_resource.cpp2
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp4
-rw-r--r--src/core/hle/kernel/process_capability.cpp389
-rw-r--r--src/core/hle/kernel/process_capability.h266
-rw-r--r--src/core/hle/kernel/svc/svc_memory.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_physical_memory.cpp9
-rw-r--r--src/core/hle/kernel/svc/svc_process_memory.cpp3
-rw-r--r--src/core/hle/kernel/svc/svc_query_memory.cpp8
-rw-r--r--src/core/hle/result.h31
-rw-r--r--src/core/hle/service/ldr/ldr.cpp45
-rw-r--r--src/core/memory.cpp6
31 files changed, 7204 insertions, 4879 deletions
diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp
index 4b1690269..166dc3dce 100644
--- a/src/common/page_table.cpp
+++ b/src/common/page_table.cpp
@@ -9,12 +9,12 @@ PageTable::PageTable() = default;
9 9
10PageTable::~PageTable() noexcept = default; 10PageTable::~PageTable() noexcept = default;
11 11
12bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context, 12bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context,
13 u64 address) const { 13 Common::ProcessAddress address) const {
14 // Setup invalid defaults. 14 // Setup invalid defaults.
15 out_entry.phys_addr = 0; 15 out_entry->phys_addr = 0;
16 out_entry.block_size = page_size; 16 out_entry->block_size = page_size;
17 out_context.next_page = 0; 17 out_context->next_page = 0;
18 18
19 // Validate that we can read the actual entry. 19 // Validate that we can read the actual entry.
20 const auto page = address / page_size; 20 const auto page = address / page_size;
@@ -29,20 +29,20 @@ bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_
29 } 29 }
30 30
31 // Populate the results. 31 // Populate the results.
32 out_entry.phys_addr = phys_addr + address; 32 out_entry->phys_addr = phys_addr + GetInteger(address);
33 out_context.next_page = page + 1; 33 out_context->next_page = page + 1;
34 out_context.next_offset = address + page_size; 34 out_context->next_offset = GetInteger(address) + page_size;
35 35
36 return true; 36 return true;
37} 37}
38 38
39bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const { 39bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const {
40 // Setup invalid defaults. 40 // Setup invalid defaults.
41 out_entry.phys_addr = 0; 41 out_entry->phys_addr = 0;
42 out_entry.block_size = page_size; 42 out_entry->block_size = page_size;
43 43
44 // Validate that we can read the actual entry. 44 // Validate that we can read the actual entry.
45 const auto page = context.next_page; 45 const auto page = context->next_page;
46 if (page >= backing_addr.size()) { 46 if (page >= backing_addr.size()) {
47 return false; 47 return false;
48 } 48 }
@@ -54,9 +54,9 @@ bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& c
54 } 54 }
55 55
56 // Populate the results. 56 // Populate the results.
57 out_entry.phys_addr = phys_addr + context.next_offset; 57 out_entry->phys_addr = phys_addr + context->next_offset;
58 context.next_page = page + 1; 58 context->next_page = page + 1;
59 context.next_offset += page_size; 59 context->next_offset += page_size;
60 60
61 return true; 61 return true;
62} 62}
diff --git a/src/common/page_table.h b/src/common/page_table.h
index e653d52ad..5340f7d86 100644
--- a/src/common/page_table.h
+++ b/src/common/page_table.h
@@ -6,6 +6,7 @@
6#include <atomic> 6#include <atomic>
7 7
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "common/typed_address.h"
9#include "common/virtual_buffer.h" 10#include "common/virtual_buffer.h"
10 11
11namespace Common { 12namespace Common {
@@ -100,9 +101,9 @@ struct PageTable {
100 PageTable(PageTable&&) noexcept = default; 101 PageTable(PageTable&&) noexcept = default;
101 PageTable& operator=(PageTable&&) noexcept = default; 102 PageTable& operator=(PageTable&&) noexcept = default;
102 103
103 bool BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context, 104 bool BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context,
104 u64 address) const; 105 Common::ProcessAddress address) const;
105 bool ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const; 106 bool ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const;
106 107
107 /** 108 /**
108 * Resizes the page table to be able to accommodate enough pages within 109 * Resizes the page table to be able to accommodate enough pages within
@@ -117,6 +118,16 @@ struct PageTable {
117 return current_address_space_width_in_bits; 118 return current_address_space_width_in_bits;
118 } 119 }
119 120
121 bool GetPhysicalAddress(Common::PhysicalAddress* out_phys_addr,
122 Common::ProcessAddress virt_addr) const {
123 if (virt_addr > (1ULL << this->GetAddressSpaceBits())) {
124 return false;
125 }
126
127 *out_phys_addr = backing_addr[virt_addr / page_size] + GetInteger(virt_addr);
128 return true;
129 }
130
120 /** 131 /**
121 * Vector of memory pointers backing each page. An entry can only be non-null if the 132 * Vector of memory pointers backing each page. An entry can only be non-null if the
122 * corresponding attribute element is of type `Memory`. 133 * corresponding attribute element is of type `Memory`.
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index e4f499135..8be3bdd08 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -271,8 +271,9 @@ add_library(core STATIC
271 hle/kernel/k_page_heap.h 271 hle/kernel/k_page_heap.h
272 hle/kernel/k_page_group.cpp 272 hle/kernel/k_page_group.cpp
273 hle/kernel/k_page_group.h 273 hle/kernel/k_page_group.h
274 hle/kernel/k_page_table.cpp
275 hle/kernel/k_page_table.h 274 hle/kernel/k_page_table.h
275 hle/kernel/k_page_table_base.cpp
276 hle/kernel/k_page_table_base.h
276 hle/kernel/k_page_table_manager.h 277 hle/kernel/k_page_table_manager.h
277 hle/kernel/k_page_table_slab_heap.h 278 hle/kernel/k_page_table_slab_heap.h
278 hle/kernel/k_port.cpp 279 hle/kernel/k_port.cpp
@@ -280,6 +281,7 @@ add_library(core STATIC
280 hle/kernel/k_priority_queue.h 281 hle/kernel/k_priority_queue.h
281 hle/kernel/k_process.cpp 282 hle/kernel/k_process.cpp
282 hle/kernel/k_process.h 283 hle/kernel/k_process.h
284 hle/kernel/k_process_page_table.h
283 hle/kernel/k_readable_event.cpp 285 hle/kernel/k_readable_event.cpp
284 hle/kernel/k_readable_event.h 286 hle/kernel/k_readable_event.h
285 hle/kernel/k_resource_limit.cpp 287 hle/kernel/k_resource_limit.cpp
@@ -330,8 +332,6 @@ add_library(core STATIC
330 hle/kernel/physical_core.cpp 332 hle/kernel/physical_core.cpp
331 hle/kernel/physical_core.h 333 hle/kernel/physical_core.h
332 hle/kernel/physical_memory.h 334 hle/kernel/physical_memory.h
333 hle/kernel/process_capability.cpp
334 hle/kernel/process_capability.h
335 hle/kernel/slab_helpers.h 335 hle/kernel/slab_helpers.h
336 hle/kernel/svc.cpp 336 hle/kernel/svc.cpp
337 hle/kernel/svc.h 337 hle/kernel/svc.h
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp
index 6f5f5156b..e9bf57895 100644
--- a/src/core/debugger/gdbstub.cpp
+++ b/src/core/debugger/gdbstub.cpp
@@ -727,29 +727,34 @@ static constexpr const char* GetMemoryPermissionString(const Kernel::Svc::Memory
727 } 727 }
728} 728}
729 729
730static VAddr GetModuleEnd(Kernel::KPageTable& page_table, VAddr base) { 730static VAddr GetModuleEnd(Kernel::KProcessPageTable& page_table, VAddr base) {
731 Kernel::Svc::MemoryInfo mem_info; 731 Kernel::KMemoryInfo mem_info;
732 Kernel::Svc::MemoryInfo svc_mem_info;
733 Kernel::Svc::PageInfo page_info;
732 VAddr cur_addr{base}; 734 VAddr cur_addr{base};
733 735
734 // Expect: r-x Code (.text) 736 // Expect: r-x Code (.text)
735 mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); 737 R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
736 cur_addr = mem_info.base_address + mem_info.size; 738 svc_mem_info = mem_info.GetSvcMemoryInfo();
737 if (mem_info.state != Kernel::Svc::MemoryState::Code || 739 cur_addr = svc_mem_info.base_address + svc_mem_info.size;
738 mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) { 740 if (svc_mem_info.state != Kernel::Svc::MemoryState::Code ||
741 svc_mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) {
739 return cur_addr - 1; 742 return cur_addr - 1;
740 } 743 }
741 744
742 // Expect: r-- Code (.rodata) 745 // Expect: r-- Code (.rodata)
743 mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); 746 R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
744 cur_addr = mem_info.base_address + mem_info.size; 747 svc_mem_info = mem_info.GetSvcMemoryInfo();
745 if (mem_info.state != Kernel::Svc::MemoryState::Code || 748 cur_addr = svc_mem_info.base_address + svc_mem_info.size;
746 mem_info.permission != Kernel::Svc::MemoryPermission::Read) { 749 if (svc_mem_info.state != Kernel::Svc::MemoryState::Code ||
750 svc_mem_info.permission != Kernel::Svc::MemoryPermission::Read) {
747 return cur_addr - 1; 751 return cur_addr - 1;
748 } 752 }
749 753
750 // Expect: rw- CodeData (.data) 754 // Expect: rw- CodeData (.data)
751 mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); 755 R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
752 cur_addr = mem_info.base_address + mem_info.size; 756 svc_mem_info = mem_info.GetSvcMemoryInfo();
757 cur_addr = svc_mem_info.base_address + svc_mem_info.size;
753 return cur_addr - 1; 758 return cur_addr - 1;
754} 759}
755 760
@@ -767,7 +772,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
767 772
768 if (command_str == "get fastmem") { 773 if (command_str == "get fastmem") {
769 if (Settings::IsFastmemEnabled()) { 774 if (Settings::IsFastmemEnabled()) {
770 const auto& impl = page_table.PageTableImpl(); 775 const auto& impl = page_table.GetImpl();
771 const auto region = reinterpret_cast<uintptr_t>(impl.fastmem_arena); 776 const auto region = reinterpret_cast<uintptr_t>(impl.fastmem_arena);
772 const auto region_bits = impl.current_address_space_width_in_bits; 777 const auto region_bits = impl.current_address_space_width_in_bits;
773 const auto region_size = 1ULL << region_bits; 778 const auto region_size = 1ULL << region_bits;
@@ -785,20 +790,22 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
785 reply = fmt::format("Process: {:#x} ({})\n" 790 reply = fmt::format("Process: {:#x} ({})\n"
786 "Program Id: {:#018x}\n", 791 "Program Id: {:#018x}\n",
787 process->GetProcessId(), process->GetName(), process->GetProgramId()); 792 process->GetProcessId(), process->GetName(), process->GetProgramId());
788 reply += fmt::format("Layout:\n" 793 reply += fmt::format(
789 " Alias: {:#012x} - {:#012x}\n" 794 "Layout:\n"
790 " Heap: {:#012x} - {:#012x}\n" 795 " Alias: {:#012x} - {:#012x}\n"
791 " Aslr: {:#012x} - {:#012x}\n" 796 " Heap: {:#012x} - {:#012x}\n"
792 " Stack: {:#012x} - {:#012x}\n" 797 " Aslr: {:#012x} - {:#012x}\n"
793 "Modules:\n", 798 " Stack: {:#012x} - {:#012x}\n"
794 GetInteger(page_table.GetAliasRegionStart()), 799 "Modules:\n",
795 GetInteger(page_table.GetAliasRegionEnd()), 800 GetInteger(page_table.GetAliasRegionStart()),
796 GetInteger(page_table.GetHeapRegionStart()), 801 GetInteger(page_table.GetAliasRegionStart()) + page_table.GetAliasRegionSize() - 1,
797 GetInteger(page_table.GetHeapRegionEnd()), 802 GetInteger(page_table.GetHeapRegionStart()),
798 GetInteger(page_table.GetAliasCodeRegionStart()), 803 GetInteger(page_table.GetHeapRegionStart()) + page_table.GetHeapRegionSize() - 1,
799 GetInteger(page_table.GetAliasCodeRegionEnd()), 804 GetInteger(page_table.GetAliasCodeRegionStart()),
800 GetInteger(page_table.GetStackRegionStart()), 805 GetInteger(page_table.GetAliasCodeRegionStart()) + page_table.GetAliasCodeRegionSize() -
801 GetInteger(page_table.GetStackRegionEnd())); 806 1,
807 GetInteger(page_table.GetStackRegionStart()),
808 GetInteger(page_table.GetStackRegionStart()) + page_table.GetStackRegionSize() - 1);
802 809
803 for (const auto& [vaddr, name] : modules) { 810 for (const auto& [vaddr, name] : modules) {
804 reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr, 811 reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr,
@@ -811,27 +818,34 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
811 while (true) { 818 while (true) {
812 using MemoryAttribute = Kernel::Svc::MemoryAttribute; 819 using MemoryAttribute = Kernel::Svc::MemoryAttribute;
813 820
814 auto mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); 821 Kernel::KMemoryInfo mem_info{};
815 822 Kernel::Svc::PageInfo page_info{};
816 if (mem_info.state != Kernel::Svc::MemoryState::Inaccessible || 823 R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info),
817 mem_info.base_address + mem_info.size - 1 != std::numeric_limits<u64>::max()) { 824 cur_addr));
818 const char* state = GetMemoryStateName(mem_info.state); 825 auto svc_mem_info = mem_info.GetSvcMemoryInfo();
819 const char* perm = GetMemoryPermissionString(mem_info); 826
820 827 if (svc_mem_info.state != Kernel::Svc::MemoryState::Inaccessible ||
821 const char l = True(mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-'; 828 svc_mem_info.base_address + svc_mem_info.size - 1 !=
822 const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-'; 829 std::numeric_limits<u64>::max()) {
823 const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-'; 830 const char* state = GetMemoryStateName(svc_mem_info.state);
824 const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-'; 831 const char* perm = GetMemoryPermissionString(svc_mem_info);
832
833 const char l = True(svc_mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-';
834 const char i =
835 True(svc_mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
836 const char d =
837 True(svc_mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
838 const char u = True(svc_mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
825 const char p = 839 const char p =
826 True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-'; 840 True(svc_mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
827 841
828 reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", 842 reply += fmt::format(
829 mem_info.base_address, 843 " {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", svc_mem_info.base_address,
830 mem_info.base_address + mem_info.size - 1, perm, state, l, i, 844 svc_mem_info.base_address + svc_mem_info.size - 1, perm, state, l, i, d, u, p,
831 d, u, p, mem_info.ipc_count, mem_info.device_count); 845 svc_mem_info.ipc_count, svc_mem_info.device_count);
832 } 846 }
833 847
834 const uintptr_t next_address = mem_info.base_address + mem_info.size; 848 const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size;
835 if (next_address <= cur_addr) { 849 if (next_address <= cur_addr) {
836 break; 850 break;
837 } 851 }
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 59364efa1..37fa39a73 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -222,7 +222,7 @@ Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress*
222 }; 222 };
223 223
224 // We succeeded. 224 // We succeeded.
225 *out = KPageTable::GetHeapVirtualAddress(kernel.MemoryLayout(), paddr); 225 *out = KPageTable::GetHeapVirtualAddress(kernel, paddr);
226 R_SUCCEED(); 226 R_SUCCEED();
227} 227}
228 228
@@ -238,8 +238,17 @@ void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress addres
238 ASSERT(Common::IsAligned(size, alignment)); 238 ASSERT(Common::IsAligned(size, alignment));
239 239
240 // Close the secure region's pages. 240 // Close the secure region's pages.
241 kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), address), 241 kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel, address),
242 size / PageSize); 242 size / PageSize);
243} 243}
244 244
245// Insecure Memory.
246KResourceLimit* KSystemControl::GetInsecureMemoryResourceLimit(KernelCore& kernel) {
247 return kernel.GetSystemResourceLimit();
248}
249
250u32 KSystemControl::GetInsecureMemoryPool() {
251 return static_cast<u32>(KMemoryManager::Pool::SystemNonSecure);
252}
253
245} // namespace Kernel::Board::Nintendo::Nx 254} // namespace Kernel::Board::Nintendo::Nx
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
index ff1feec70..60c5e58b7 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
@@ -8,7 +8,8 @@
8 8
9namespace Kernel { 9namespace Kernel {
10class KernelCore; 10class KernelCore;
11} 11class KResourceLimit;
12} // namespace Kernel
12 13
13namespace Kernel::Board::Nintendo::Nx { 14namespace Kernel::Board::Nintendo::Nx {
14 15
@@ -40,6 +41,10 @@ public:
40 u32 pool); 41 u32 pool);
41 static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size, 42 static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
42 u32 pool); 43 u32 pool);
44
45 // Insecure Memory.
46 static KResourceLimit* GetInsecureMemoryResourceLimit(KernelCore& kernel);
47 static u32 GetInsecureMemoryPool();
43}; 48};
44 49
45} // namespace Kernel::Board::Nintendo::Nx 50} // namespace Kernel::Board::Nintendo::Nx
diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp
index e7da7a21d..fb890f978 100644
--- a/src/core/hle/kernel/k_capabilities.cpp
+++ b/src/core/hle/kernel/k_capabilities.cpp
@@ -4,14 +4,15 @@
4#include "core/hardware_properties.h" 4#include "core/hardware_properties.h"
5#include "core/hle/kernel/k_capabilities.h" 5#include "core/hle/kernel/k_capabilities.h"
6#include "core/hle/kernel/k_memory_layout.h" 6#include "core/hle/kernel/k_memory_layout.h"
7#include "core/hle/kernel/k_page_table.h" 7#include "core/hle/kernel/k_process_page_table.h"
8#include "core/hle/kernel/kernel.h" 8#include "core/hle/kernel/kernel.h"
9#include "core/hle/kernel/svc_results.h" 9#include "core/hle/kernel/svc_results.h"
10#include "core/hle/kernel/svc_version.h" 10#include "core/hle/kernel/svc_version.h"
11 11
12namespace Kernel { 12namespace Kernel {
13 13
14Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) { 14Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps,
15 KProcessPageTable* page_table) {
15 // We're initializing an initial process. 16 // We're initializing an initial process.
16 m_svc_access_flags.reset(); 17 m_svc_access_flags.reset();
17 m_irq_access_flags.reset(); 18 m_irq_access_flags.reset();
@@ -41,7 +42,8 @@ Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTabl
41 R_RETURN(this->SetCapabilities(kern_caps, page_table)); 42 R_RETURN(this->SetCapabilities(kern_caps, page_table));
42} 43}
43 44
44Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table) { 45Result KCapabilities::InitializeForUser(std::span<const u32> user_caps,
46 KProcessPageTable* page_table) {
45 // We're initializing a user process. 47 // We're initializing a user process.
46 m_svc_access_flags.reset(); 48 m_svc_access_flags.reset();
47 m_irq_access_flags.reset(); 49 m_irq_access_flags.reset();
@@ -121,7 +123,7 @@ Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) {
121 R_SUCCEED(); 123 R_SUCCEED();
122} 124}
123 125
124Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) { 126Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table) {
125 const auto range_pack = MapRange{cap}; 127 const auto range_pack = MapRange{cap};
126 const auto size_pack = MapRangeSize{size_cap}; 128 const auto size_pack = MapRangeSize{size_cap};
127 129
@@ -142,16 +144,13 @@ Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* p
142 ? KMemoryPermission::UserRead 144 ? KMemoryPermission::UserRead
143 : KMemoryPermission::UserReadWrite; 145 : KMemoryPermission::UserReadWrite;
144 if (MapRangeSize{size_cap}.normal) { 146 if (MapRangeSize{size_cap}.normal) {
145 // R_RETURN(page_table->MapStatic(phys_addr, size, perm)); 147 R_RETURN(page_table->MapStatic(phys_addr, size, perm));
146 } else { 148 } else {
147 // R_RETURN(page_table->MapIo(phys_addr, size, perm)); 149 R_RETURN(page_table->MapIo(phys_addr, size, perm));
148 } 150 }
149
150 UNIMPLEMENTED();
151 R_SUCCEED();
152} 151}
153 152
154Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) { 153Result KCapabilities::MapIoPage_(const u32 cap, KProcessPageTable* page_table) {
155 // Get/validate address/size 154 // Get/validate address/size
156 const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize; 155 const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize;
157 const size_t num_pages = 1; 156 const size_t num_pages = 1;
@@ -160,10 +159,7 @@ Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
160 R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress); 159 R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
161 160
162 // Do the mapping. 161 // Do the mapping.
163 // R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite)); 162 R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission::UserReadWrite));
164
165 UNIMPLEMENTED();
166 R_SUCCEED();
167} 163}
168 164
169template <typename F> 165template <typename F>
@@ -200,13 +196,11 @@ Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) {
200 R_SUCCEED(); 196 R_SUCCEED();
201} 197}
202 198
203Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) { 199Result KCapabilities::MapRegion_(const u32 cap, KProcessPageTable* page_table) {
204 // Map each region into the process's page table. 200 // Map each region into the process's page table.
205 return ProcessMapRegionCapability( 201 return ProcessMapRegionCapability(
206 cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result { 202 cap, [page_table](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
207 // R_RETURN(page_table->MapRegion(region_type, perm)); 203 R_RETURN(page_table->MapRegion(region_type, perm));
208 UNIMPLEMENTED();
209 R_SUCCEED();
210 }); 204 });
211} 205}
212 206
@@ -280,7 +274,7 @@ Result KCapabilities::SetDebugFlagsCapability(const u32 cap) {
280} 274}
281 275
282Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc, 276Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
283 KPageTable* page_table) { 277 KProcessPageTable* page_table) {
284 // Validate this is a capability we can act on. 278 // Validate this is a capability we can act on.
285 const auto type = GetCapabilityType(cap); 279 const auto type = GetCapabilityType(cap);
286 R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument); 280 R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument);
@@ -318,7 +312,7 @@ Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
318 } 312 }
319} 313}
320 314
321Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* page_table) { 315Result KCapabilities::SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table) {
322 u32 set_flags = 0, set_svc = 0; 316 u32 set_flags = 0, set_svc = 0;
323 317
324 for (size_t i = 0; i < caps.size(); i++) { 318 for (size_t i = 0; i < caps.size(); i++) {
diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h
index ebd4eedb1..013d952ad 100644
--- a/src/core/hle/kernel/k_capabilities.h
+++ b/src/core/hle/kernel/k_capabilities.h
@@ -15,15 +15,15 @@
15 15
16namespace Kernel { 16namespace Kernel {
17 17
18class KPageTable; 18class KProcessPageTable;
19class KernelCore; 19class KernelCore;
20 20
21class KCapabilities { 21class KCapabilities {
22public: 22public:
23 constexpr explicit KCapabilities() = default; 23 constexpr explicit KCapabilities() = default;
24 24
25 Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table); 25 Result InitializeForKip(std::span<const u32> kern_caps, KProcessPageTable* page_table);
26 Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table); 26 Result InitializeForUser(std::span<const u32> user_caps, KProcessPageTable* page_table);
27 27
28 static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps); 28 static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
29 29
@@ -264,9 +264,9 @@ private:
264 264
265 Result SetCorePriorityCapability(const u32 cap); 265 Result SetCorePriorityCapability(const u32 cap);
266 Result SetSyscallMaskCapability(const u32 cap, u32& set_svc); 266 Result SetSyscallMaskCapability(const u32 cap, u32& set_svc);
267 Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table); 267 Result MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table);
268 Result MapIoPage_(const u32 cap, KPageTable* page_table); 268 Result MapIoPage_(const u32 cap, KProcessPageTable* page_table);
269 Result MapRegion_(const u32 cap, KPageTable* page_table); 269 Result MapRegion_(const u32 cap, KProcessPageTable* page_table);
270 Result SetInterruptPairCapability(const u32 cap); 270 Result SetInterruptPairCapability(const u32 cap);
271 Result SetProgramTypeCapability(const u32 cap); 271 Result SetProgramTypeCapability(const u32 cap);
272 Result SetKernelVersionCapability(const u32 cap); 272 Result SetKernelVersionCapability(const u32 cap);
@@ -277,8 +277,9 @@ private:
277 static Result ProcessMapRegionCapability(const u32 cap, F f); 277 static Result ProcessMapRegionCapability(const u32 cap, F f);
278 static Result CheckMapRegion(KernelCore& kernel, const u32 cap); 278 static Result CheckMapRegion(KernelCore& kernel, const u32 cap);
279 279
280 Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table); 280 Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
281 Result SetCapabilities(std::span<const u32> caps, KPageTable* page_table); 281 KProcessPageTable* page_table);
282 Result SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table);
282 283
283private: 284private:
284 Svc::SvcAccessFlagSet m_svc_access_flags{}; 285 Svc::SvcAccessFlagSet m_svc_access_flags{};
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp
index f48896715..f0703f795 100644
--- a/src/core/hle/kernel/k_device_address_space.cpp
+++ b/src/core/hle/kernel/k_device_address_space.cpp
@@ -54,7 +54,7 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
54 R_SUCCEED(); 54 R_SUCCEED();
55} 55}
56 56
57Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address, 57Result KDeviceAddressSpace::Map(KProcessPageTable* page_table, KProcessAddress process_address,
58 size_t size, u64 device_address, u32 option, bool is_aligned) { 58 size_t size, u64 device_address, u32 option, bool is_aligned) {
59 // Check that the address falls within the space. 59 // Check that the address falls within the space.
60 R_UNLESS((m_space_address <= device_address && 60 R_UNLESS((m_space_address <= device_address &&
@@ -113,7 +113,7 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_
113 R_SUCCEED(); 113 R_SUCCEED();
114} 114}
115 115
116Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address, 116Result KDeviceAddressSpace::Unmap(KProcessPageTable* page_table, KProcessAddress process_address,
117 size_t size, u64 device_address) { 117 size_t size, u64 device_address) {
118 // Check that the address falls within the space. 118 // Check that the address falls within the space.
119 R_UNLESS((m_space_address <= device_address && 119 R_UNLESS((m_space_address <= device_address &&
diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h
index 18556e3cc..ff0ec8152 100644
--- a/src/core/hle/kernel/k_device_address_space.h
+++ b/src/core/hle/kernel/k_device_address_space.h
@@ -5,7 +5,7 @@
5 5
6#include <string> 6#include <string>
7 7
8#include "core/hle/kernel/k_page_table.h" 8#include "core/hle/kernel/k_process_page_table.h"
9#include "core/hle/kernel/k_typed_address.h" 9#include "core/hle/kernel/k_typed_address.h"
10#include "core/hle/kernel/slab_helpers.h" 10#include "core/hle/kernel/slab_helpers.h"
11#include "core/hle/result.h" 11#include "core/hle/result.h"
@@ -31,23 +31,23 @@ public:
31 Result Attach(Svc::DeviceName device_name); 31 Result Attach(Svc::DeviceName device_name);
32 Result Detach(Svc::DeviceName device_name); 32 Result Detach(Svc::DeviceName device_name);
33 33
34 Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size, 34 Result MapByForce(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
35 u64 device_address, u32 option) { 35 u64 device_address, u32 option) {
36 R_RETURN(this->Map(page_table, process_address, size, device_address, option, false)); 36 R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
37 } 37 }
38 38
39 Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size, 39 Result MapAligned(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
40 u64 device_address, u32 option) { 40 u64 device_address, u32 option) {
41 R_RETURN(this->Map(page_table, process_address, size, device_address, option, true)); 41 R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
42 } 42 }
43 43
44 Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size, 44 Result Unmap(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
45 u64 device_address); 45 u64 device_address);
46 46
47 static void Initialize(); 47 static void Initialize();
48 48
49private: 49private:
50 Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size, 50 Result Map(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
51 u64 device_address, u32 option, bool is_aligned); 51 u64 device_address, u32 option, bool is_aligned);
52 52
53private: 53private:
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index c8122644f..d7adb3169 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -394,6 +394,14 @@ private:
394 return region.GetEndAddress(); 394 return region.GetEndAddress();
395 } 395 }
396 396
397public:
398 static const KMemoryRegion* Find(const KMemoryLayout& layout, KVirtualAddress address) {
399 return Find(address, layout.GetVirtualMemoryRegionTree());
400 }
401 static const KMemoryRegion* Find(const KMemoryLayout& layout, KPhysicalAddress address) {
402 return Find(address, layout.GetPhysicalMemoryRegionTree());
403 }
404
397private: 405private:
398 u64 m_linear_phys_to_virt_diff{}; 406 u64 m_linear_phys_to_virt_diff{};
399 u64 m_linear_virt_to_phys_diff{}; 407 u64 m_linear_virt_to_phys_diff{};
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index cdc5572d8..0a973ec8c 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -456,8 +456,7 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
456} 456}
457 457
458void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) { 458void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
459 auto optimize_pa = 459 auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
460 KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
461 auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); 460 auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
462 461
463 std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); 462 std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize()));
@@ -465,8 +464,7 @@ void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
465 464
466void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, 465void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
467 size_t num_pages) { 466 size_t num_pages) {
468 auto optimize_pa = 467 auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
469 KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
470 auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); 468 auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
471 469
472 // Get the range we're tracking. 470 // Get the range we're tracking.
@@ -485,8 +483,7 @@ void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysi
485 483
486void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, 484void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
487 size_t num_pages) { 485 size_t num_pages) {
488 auto optimize_pa = 486 auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
489 KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
490 auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); 487 auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
491 488
492 // Get the range we're tracking. 489 // Get the range we're tracking.
@@ -506,8 +503,7 @@ void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysica
506bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, 503bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
507 size_t num_pages, u8 fill_pattern) { 504 size_t num_pages, u8 fill_pattern) {
508 auto& device_memory = kernel.System().DeviceMemory(); 505 auto& device_memory = kernel.System().DeviceMemory();
509 auto optimize_pa = 506 auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
510 KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
511 auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa); 507 auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa);
512 508
513 // We want to return whether any pages were newly allocated. 509 // We want to return whether any pages were newly allocated.
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
deleted file mode 100644
index 1d47bdf6b..000000000
--- a/src/core/hle/kernel/k_page_table.cpp
+++ /dev/null
@@ -1,3519 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "common/assert.h"
6#include "common/literals.h"
7#include "common/scope_exit.h"
8#include "common/settings.h"
9#include "core/core.h"
10#include "core/hle/kernel/k_address_space_info.h"
11#include "core/hle/kernel/k_memory_block.h"
12#include "core/hle/kernel/k_memory_block_manager.h"
13#include "core/hle/kernel/k_page_group.h"
14#include "core/hle/kernel/k_page_table.h"
15#include "core/hle/kernel/k_process.h"
16#include "core/hle/kernel/k_resource_limit.h"
17#include "core/hle/kernel/k_scoped_resource_reservation.h"
18#include "core/hle/kernel/k_system_control.h"
19#include "core/hle/kernel/k_system_resource.h"
20#include "core/hle/kernel/kernel.h"
21#include "core/hle/kernel/svc_results.h"
22#include "core/memory.h"
23
24namespace Kernel {
25
26namespace {
27
28class KScopedLightLockPair {
29 YUZU_NON_COPYABLE(KScopedLightLockPair);
30 YUZU_NON_MOVEABLE(KScopedLightLockPair);
31
32private:
33 KLightLock* m_lower;
34 KLightLock* m_upper;
35
36public:
37 KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
38 // Ensure our locks are in a consistent order.
39 if (std::addressof(lhs) <= std::addressof(rhs)) {
40 m_lower = std::addressof(lhs);
41 m_upper = std::addressof(rhs);
42 } else {
43 m_lower = std::addressof(rhs);
44 m_upper = std::addressof(lhs);
45 }
46
47 // Acquire both locks.
48 m_lower->Lock();
49 if (m_lower != m_upper) {
50 m_upper->Lock();
51 }
52 }
53
54 ~KScopedLightLockPair() {
55 // Unlock the upper lock.
56 if (m_upper != nullptr && m_upper != m_lower) {
57 m_upper->Unlock();
58 }
59
60 // Unlock the lower lock.
61 if (m_lower != nullptr) {
62 m_lower->Unlock();
63 }
64 }
65
66public:
67 // Utility.
68 void TryUnlockHalf(KLightLock& lock) {
69 // Only allow unlocking if the lock is half the pair.
70 if (m_lower != m_upper) {
71 // We want to be sure the lock is one we own.
72 if (m_lower == std::addressof(lock)) {
73 lock.Unlock();
74 m_lower = nullptr;
75 } else if (m_upper == std::addressof(lock)) {
76 lock.Unlock();
77 m_upper = nullptr;
78 }
79 }
80 }
81};
82
83using namespace Common::Literals;
84
85constexpr size_t GetAddressSpaceWidthFromType(Svc::CreateProcessFlag as_type) {
86 switch (as_type) {
87 case Svc::CreateProcessFlag::AddressSpace32Bit:
88 case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
89 return 32;
90 case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
91 return 36;
92 case Svc::CreateProcessFlag::AddressSpace64Bit:
93 return 39;
94 default:
95 ASSERT(false);
96 return {};
97 }
98}
99
100} // namespace
101
102KPageTable::KPageTable(Core::System& system_)
103 : m_general_lock{system_.Kernel()},
104 m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {}
105
106KPageTable::~KPageTable() = default;
107
108Result KPageTable::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
109 bool enable_das_merge, bool from_back,
110 KMemoryManager::Pool pool, KProcessAddress code_addr,
111 size_t code_size, KSystemResource* system_resource,
112 KResourceLimit* resource_limit,
113 Core::Memory::Memory& memory) {
114
115 const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
116 return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
117 };
118 const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) {
119 return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
120 };
121
122 // Set the tracking memory
123 m_memory = std::addressof(memory);
124
125 // Set our width and heap/alias sizes
126 m_address_space_width = GetAddressSpaceWidthFromType(as_type);
127 const KProcessAddress start = 0;
128 const KProcessAddress end{1ULL << m_address_space_width};
129 size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
130 size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
131
132 ASSERT(code_addr < code_addr + code_size);
133 ASSERT(code_addr + code_size - 1 <= end - 1);
134
135 // Adjust heap/alias size if we don't have an alias region
136 if (as_type == Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) {
137 heap_region_size += alias_region_size;
138 alias_region_size = 0;
139 }
140
141 // Set code regions and determine remaining
142 constexpr size_t RegionAlignment{2_MiB};
143 KProcessAddress process_code_start{};
144 KProcessAddress process_code_end{};
145 size_t stack_region_size{};
146 size_t kernel_map_region_size{};
147
148 if (m_address_space_width == 39) {
149 alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
150 heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
151 stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
152 kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
153 m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
154 m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
155 m_alias_code_region_start = m_code_region_start;
156 m_alias_code_region_end = m_code_region_end;
157 process_code_start = Common::AlignDown(GetInteger(code_addr), RegionAlignment);
158 process_code_end = Common::AlignUp(GetInteger(code_addr) + code_size, RegionAlignment);
159 } else {
160 stack_region_size = 0;
161 kernel_map_region_size = 0;
162 m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
163 m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
164 m_stack_region_start = m_code_region_start;
165 m_alias_code_region_start = m_code_region_start;
166 m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
167 GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
168 m_stack_region_end = m_code_region_end;
169 m_kernel_map_region_start = m_code_region_start;
170 m_kernel_map_region_end = m_code_region_end;
171 process_code_start = m_code_region_start;
172 process_code_end = m_code_region_end;
173 }
174
175 // Set other basic fields
176 m_enable_aslr = enable_aslr;
177 m_enable_device_address_space_merge = enable_das_merge;
178 m_address_space_start = start;
179 m_address_space_end = end;
180 m_is_kernel = false;
181 m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
182 m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
183 m_resource_limit = resource_limit;
184
185 // Determine the region we can place our undetermineds in
186 KProcessAddress alloc_start{};
187 size_t alloc_size{};
188 if ((process_code_start - m_code_region_start) >= (end - process_code_end)) {
189 alloc_start = m_code_region_start;
190 alloc_size = process_code_start - m_code_region_start;
191 } else {
192 alloc_start = process_code_end;
193 alloc_size = end - process_code_end;
194 }
195 const size_t needed_size =
196 (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
197 R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
198
199 const size_t remaining_size{alloc_size - needed_size};
200
201 // Determine random placements for each region
202 size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
203 if (enable_aslr) {
204 alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
205 RegionAlignment;
206 heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
207 RegionAlignment;
208 stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
209 RegionAlignment;
210 kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
211 RegionAlignment;
212 }
213
214 // Setup heap and alias regions
215 m_alias_region_start = alloc_start + alias_rnd;
216 m_alias_region_end = m_alias_region_start + alias_region_size;
217 m_heap_region_start = alloc_start + heap_rnd;
218 m_heap_region_end = m_heap_region_start + heap_region_size;
219
220 if (alias_rnd <= heap_rnd) {
221 m_heap_region_start += alias_region_size;
222 m_heap_region_end += alias_region_size;
223 } else {
224 m_alias_region_start += heap_region_size;
225 m_alias_region_end += heap_region_size;
226 }
227
228 // Setup stack region
229 if (stack_region_size) {
230 m_stack_region_start = alloc_start + stack_rnd;
231 m_stack_region_end = m_stack_region_start + stack_region_size;
232
233 if (alias_rnd < stack_rnd) {
234 m_stack_region_start += alias_region_size;
235 m_stack_region_end += alias_region_size;
236 } else {
237 m_alias_region_start += stack_region_size;
238 m_alias_region_end += stack_region_size;
239 }
240
241 if (heap_rnd < stack_rnd) {
242 m_stack_region_start += heap_region_size;
243 m_stack_region_end += heap_region_size;
244 } else {
245 m_heap_region_start += stack_region_size;
246 m_heap_region_end += stack_region_size;
247 }
248 }
249
250 // Setup kernel map region
251 if (kernel_map_region_size) {
252 m_kernel_map_region_start = alloc_start + kmap_rnd;
253 m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
254
255 if (alias_rnd < kmap_rnd) {
256 m_kernel_map_region_start += alias_region_size;
257 m_kernel_map_region_end += alias_region_size;
258 } else {
259 m_alias_region_start += kernel_map_region_size;
260 m_alias_region_end += kernel_map_region_size;
261 }
262
263 if (heap_rnd < kmap_rnd) {
264 m_kernel_map_region_start += heap_region_size;
265 m_kernel_map_region_end += heap_region_size;
266 } else {
267 m_heap_region_start += kernel_map_region_size;
268 m_heap_region_end += kernel_map_region_size;
269 }
270
271 if (stack_region_size) {
272 if (stack_rnd < kmap_rnd) {
273 m_kernel_map_region_start += stack_region_size;
274 m_kernel_map_region_end += stack_region_size;
275 } else {
276 m_stack_region_start += kernel_map_region_size;
277 m_stack_region_end += kernel_map_region_size;
278 }
279 }
280 }
281
282 // Set heap and fill members.
283 m_current_heap_end = m_heap_region_start;
284 m_max_heap_size = 0;
285 m_mapped_physical_memory_size = 0;
286 m_mapped_unsafe_physical_memory = 0;
287 m_mapped_insecure_memory = 0;
288 m_mapped_ipc_server_memory = 0;
289
290 m_heap_fill_value = 0;
291 m_ipc_fill_value = 0;
292 m_stack_fill_value = 0;
293
294 // Set allocation option.
295 m_allocate_option =
296 KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
297 : KMemoryManager::Direction::FromFront);
298
299 // Ensure that we regions inside our address space
300 auto IsInAddressSpace = [&](KProcessAddress addr) {
301 return m_address_space_start <= addr && addr <= m_address_space_end;
302 };
303 ASSERT(IsInAddressSpace(m_alias_region_start));
304 ASSERT(IsInAddressSpace(m_alias_region_end));
305 ASSERT(IsInAddressSpace(m_heap_region_start));
306 ASSERT(IsInAddressSpace(m_heap_region_end));
307 ASSERT(IsInAddressSpace(m_stack_region_start));
308 ASSERT(IsInAddressSpace(m_stack_region_end));
309 ASSERT(IsInAddressSpace(m_kernel_map_region_start));
310 ASSERT(IsInAddressSpace(m_kernel_map_region_end));
311
312 // Ensure that we selected regions that don't overlap
313 const KProcessAddress alias_start{m_alias_region_start};
314 const KProcessAddress alias_last{m_alias_region_end - 1};
315 const KProcessAddress heap_start{m_heap_region_start};
316 const KProcessAddress heap_last{m_heap_region_end - 1};
317 const KProcessAddress stack_start{m_stack_region_start};
318 const KProcessAddress stack_last{m_stack_region_end - 1};
319 const KProcessAddress kmap_start{m_kernel_map_region_start};
320 const KProcessAddress kmap_last{m_kernel_map_region_end - 1};
321 ASSERT(alias_last < heap_start || heap_last < alias_start);
322 ASSERT(alias_last < stack_start || stack_last < alias_start);
323 ASSERT(alias_last < kmap_start || kmap_last < alias_start);
324 ASSERT(heap_last < stack_start || stack_last < heap_start);
325 ASSERT(heap_last < kmap_start || kmap_last < heap_start);
326
327 m_current_heap_end = m_heap_region_start;
328 m_max_heap_size = 0;
329 m_mapped_physical_memory_size = 0;
330 m_memory_pool = pool;
331
332 m_page_table_impl = std::make_unique<Common::PageTable>();
333 m_page_table_impl->Resize(m_address_space_width, PageBits);
334
335 // Initialize our memory block manager.
336 R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
337 m_memory_block_slab_manager));
338}
339
340void KPageTable::Finalize() {
341 auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
342 if (Settings::IsFastmemEnabled()) {
343 m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
344 }
345 };
346
347 // Finalize memory blocks.
348 m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback));
349
350 // Release any insecure mapped memory.
351 if (m_mapped_insecure_memory) {
352 UNIMPLEMENTED();
353 }
354
355 // Release any ipc server memory.
356 if (m_mapped_ipc_server_memory) {
357 UNIMPLEMENTED();
358 }
359
360 // Close the backing page table, as the destructor is not called for guest objects.
361 m_page_table_impl.reset();
362}
363
364Result KPageTable::MapProcessCode(KProcessAddress addr, size_t num_pages, KMemoryState state,
365 KMemoryPermission perm) {
366 const u64 size{num_pages * PageSize};
367
368 // Validate the mapping request.
369 R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
370
371 // Lock the table.
372 KScopedLightLock lk(m_general_lock);
373
374 // Verify that the destination memory is unmapped.
375 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
376 KMemoryPermission::None, KMemoryPermission::None,
377 KMemoryAttribute::None, KMemoryAttribute::None));
378
379 // Create an update allocator.
380 Result allocator_result{ResultSuccess};
381 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
382 m_memory_block_slab_manager);
383
384 // Allocate and open.
385 KPageGroup pg{m_kernel, m_block_info_manager};
386 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
387 &pg, num_pages,
388 KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
389
390 R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
391
392 // Update the blocks.
393 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
394 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
395 KMemoryBlockDisableMergeAttribute::None);
396
397 R_SUCCEED();
398}
399
400Result KPageTable::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
401 size_t size) {
402 // Validate the mapping request.
403 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
404 ResultInvalidMemoryRegion);
405
406 // Lock the table.
407 KScopedLightLock lk(m_general_lock);
408
409 // Verify that the source memory is normal heap.
410 KMemoryState src_state{};
411 KMemoryPermission src_perm{};
412 size_t num_src_allocator_blocks{};
413 R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
414 src_address, size, KMemoryState::All, KMemoryState::Normal,
415 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
416 KMemoryAttribute::All, KMemoryAttribute::None));
417
418 // Verify that the destination memory is unmapped.
419 size_t num_dst_allocator_blocks{};
420 R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
421 KMemoryState::Free, KMemoryPermission::None,
422 KMemoryPermission::None, KMemoryAttribute::None,
423 KMemoryAttribute::None));
424
425 // Create an update allocator for the source.
426 Result src_allocator_result{ResultSuccess};
427 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
428 m_memory_block_slab_manager,
429 num_src_allocator_blocks);
430 R_TRY(src_allocator_result);
431
432 // Create an update allocator for the destination.
433 Result dst_allocator_result{ResultSuccess};
434 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
435 m_memory_block_slab_manager,
436 num_dst_allocator_blocks);
437 R_TRY(dst_allocator_result);
438
439 // Map the code memory.
440 {
441 // Determine the number of pages being operated on.
442 const size_t num_pages = size / PageSize;
443
444 // Create page groups for the memory being mapped.
445 KPageGroup pg{m_kernel, m_block_info_manager};
446 AddRegionToPages(src_address, num_pages, pg);
447
448 // We're going to perform an update, so create a helper.
449 KScopedPageTableUpdater updater(this);
450
451 // Reprotect the source as kernel-read/not mapped.
452 const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
453 KMemoryPermission::NotMapped);
454 R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions));
455
456 // Ensure that we unprotect the source pages on failure.
457 auto unprot_guard = SCOPE_GUARD({
458 ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions)
459 .IsSuccess());
460 });
461
462 // Map the alias pages.
463 const KPageProperties dst_properties = {new_perm, false, false,
464 DisableMergeAttribute::DisableHead};
465 R_TRY(
466 this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
467
468 // We successfully mapped the alias pages, so we don't need to unprotect the src pages on
469 // failure.
470 unprot_guard.Cancel();
471
472 // Apply the memory block updates.
473 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
474 src_state, new_perm, KMemoryAttribute::Locked,
475 KMemoryBlockDisableMergeAttribute::Locked,
476 KMemoryBlockDisableMergeAttribute::None);
477 m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
478 KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
479 KMemoryBlockDisableMergeAttribute::Normal,
480 KMemoryBlockDisableMergeAttribute::None);
481 }
482
483 R_SUCCEED();
484}
485
486Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
487 size_t size,
488 ICacheInvalidationStrategy icache_invalidation_strategy) {
489 // Validate the mapping request.
490 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
491 ResultInvalidMemoryRegion);
492
493 // Lock the table.
494 KScopedLightLock lk(m_general_lock);
495
496 // Verify that the source memory is locked normal heap.
497 size_t num_src_allocator_blocks{};
498 R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
499 KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
500 KMemoryPermission::None, KMemoryAttribute::All,
501 KMemoryAttribute::Locked));
502
503 // Verify that the destination memory is aliasable code.
504 size_t num_dst_allocator_blocks{};
505 R_TRY(this->CheckMemoryStateContiguous(
506 std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
507 KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
508 KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
509
510 // Determine whether any pages being unmapped are code.
511 bool any_code_pages = false;
512 {
513 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
514 while (true) {
515 // Get the memory info.
516 const KMemoryInfo info = it->GetMemoryInfo();
517
518 // Check if the memory has code flag.
519 if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) {
520 any_code_pages = true;
521 break;
522 }
523
524 // Check if we're done.
525 if (dst_address + size - 1 <= info.GetLastAddress()) {
526 break;
527 }
528
529 // Advance.
530 ++it;
531 }
532 }
533
534 // Ensure that we maintain the instruction cache.
535 bool reprotected_pages = false;
536 SCOPE_EXIT({
537 if (reprotected_pages && any_code_pages) {
538 if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
539 m_system.InvalidateCpuInstructionCacheRange(GetInteger(dst_address), size);
540 } else {
541 m_system.InvalidateCpuInstructionCaches();
542 }
543 }
544 });
545
546 // Unmap.
547 {
548 // Determine the number of pages being operated on.
549 const size_t num_pages = size / PageSize;
550
551 // Create an update allocator for the source.
552 Result src_allocator_result{ResultSuccess};
553 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
554 m_memory_block_slab_manager,
555 num_src_allocator_blocks);
556 R_TRY(src_allocator_result);
557
558 // Create an update allocator for the destination.
559 Result dst_allocator_result{ResultSuccess};
560 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
561 m_memory_block_slab_manager,
562 num_dst_allocator_blocks);
563 R_TRY(dst_allocator_result);
564
565 // Unmap the aliased copy of the pages.
566 R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
567
568 // Try to set the permissions for the source pages back to what they should be.
569 R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
570 OperationType::ChangePermissions));
571
572 // Apply the memory block updates.
573 m_memory_block_manager.Update(
574 std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
575 KMemoryPermission::None, KMemoryAttribute::None,
576 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
577 m_memory_block_manager.Update(
578 std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
579 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
580 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
581
582 // Note that we reprotected pages.
583 reprotected_pages = true;
584 }
585
586 R_SUCCEED();
587}
588
589KProcessAddress KPageTable::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
590 size_t num_pages, size_t alignment, size_t offset,
591 size_t guard_pages) {
592 KProcessAddress address = 0;
593
594 if (num_pages <= region_num_pages) {
595 if (this->IsAslrEnabled()) {
596 UNIMPLEMENTED();
597 }
598 // Find the first free area.
599 if (address == 0) {
600 address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
601 alignment, offset, guard_pages);
602 }
603 }
604
605 return address;
606}
607
608Result KPageTable::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
609 ASSERT(this->IsLockedByCurrentThread());
610
611 const size_t size = num_pages * PageSize;
612
613 // We're making a new group, not adding to an existing one.
614 R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
615
616 // Begin traversal.
617 Common::PageTable::TraversalContext context;
618 Common::PageTable::TraversalEntry next_entry;
619 R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr)),
620 ResultInvalidCurrentMemory);
621
622 // Prepare tracking variables.
623 KPhysicalAddress cur_addr = next_entry.phys_addr;
624 size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
625 size_t tot_size = cur_size;
626
627 // Iterate, adding to group as we go.
628 const auto& memory_layout = m_system.Kernel().MemoryLayout();
629 while (tot_size < size) {
630 R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context),
631 ResultInvalidCurrentMemory);
632
633 if (next_entry.phys_addr != (cur_addr + cur_size)) {
634 const size_t cur_pages = cur_size / PageSize;
635
636 R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
637 R_TRY(pg.AddBlock(cur_addr, cur_pages));
638
639 cur_addr = next_entry.phys_addr;
640 cur_size = next_entry.block_size;
641 } else {
642 cur_size += next_entry.block_size;
643 }
644
645 tot_size += next_entry.block_size;
646 }
647
648 // Ensure we add the right amount for the last block.
649 if (tot_size > size) {
650 cur_size -= (tot_size - size);
651 }
652
653 // Add the last block.
654 const size_t cur_pages = cur_size / PageSize;
655 R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
656 R_TRY(pg.AddBlock(cur_addr, cur_pages));
657
658 R_SUCCEED();
659}
660
661bool KPageTable::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
662 ASSERT(this->IsLockedByCurrentThread());
663
664 const size_t size = num_pages * PageSize;
665 const auto& memory_layout = m_system.Kernel().MemoryLayout();
666
667 // Empty groups are necessarily invalid.
668 if (pg.empty()) {
669 return false;
670 }
671
672 // We're going to validate that the group we'd expect is the group we see.
673 auto cur_it = pg.begin();
674 KPhysicalAddress cur_block_address = cur_it->GetAddress();
675 size_t cur_block_pages = cur_it->GetNumPages();
676
677 auto UpdateCurrentIterator = [&]() {
678 if (cur_block_pages == 0) {
679 if ((++cur_it) == pg.end()) {
680 return false;
681 }
682
683 cur_block_address = cur_it->GetAddress();
684 cur_block_pages = cur_it->GetNumPages();
685 }
686 return true;
687 };
688
689 // Begin traversal.
690 Common::PageTable::TraversalContext context;
691 Common::PageTable::TraversalEntry next_entry;
692 if (!m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr))) {
693 return false;
694 }
695
696 // Prepare tracking variables.
697 KPhysicalAddress cur_addr = next_entry.phys_addr;
698 size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
699 size_t tot_size = cur_size;
700
701 // Iterate, comparing expected to actual.
702 while (tot_size < size) {
703 if (!m_page_table_impl->ContinueTraversal(next_entry, context)) {
704 return false;
705 }
706
707 if (next_entry.phys_addr != (cur_addr + cur_size)) {
708 const size_t cur_pages = cur_size / PageSize;
709
710 if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
711 return false;
712 }
713
714 if (!UpdateCurrentIterator()) {
715 return false;
716 }
717
718 if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
719 return false;
720 }
721
722 cur_block_address += cur_size;
723 cur_block_pages -= cur_pages;
724 cur_addr = next_entry.phys_addr;
725 cur_size = next_entry.block_size;
726 } else {
727 cur_size += next_entry.block_size;
728 }
729
730 tot_size += next_entry.block_size;
731 }
732
733 // Ensure we compare the right amount for the last block.
734 if (tot_size > size) {
735 cur_size -= (tot_size - size);
736 }
737
738 if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
739 return false;
740 }
741
742 if (!UpdateCurrentIterator()) {
743 return false;
744 }
745
746 return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
747}
748
749Result KPageTable::UnmapProcessMemory(KProcessAddress dst_addr, size_t size,
750 KPageTable& src_page_table, KProcessAddress src_addr) {
751 // Acquire the table locks.
752 KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock);
753
754 const size_t num_pages{size / PageSize};
755
756 // Check that the memory is mapped in the destination process.
757 size_t num_allocator_blocks;
758 R_TRY(CheckMemoryState(&num_allocator_blocks, dst_addr, size, KMemoryState::All,
759 KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
760 KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
761 KMemoryAttribute::None));
762
763 // Check that the memory is mapped in the source process.
764 R_TRY(src_page_table.CheckMemoryState(src_addr, size, KMemoryState::FlagCanMapProcess,
765 KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
766 KMemoryPermission::None, KMemoryAttribute::All,
767 KMemoryAttribute::None));
768
769 // Create an update allocator.
770 Result allocator_result{ResultSuccess};
771 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
772 m_memory_block_slab_manager, num_allocator_blocks);
773 R_TRY(allocator_result);
774
775 R_TRY(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
776
777 // Apply the memory block update.
778 m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages,
779 KMemoryState::Free, KMemoryPermission::None,
780 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
781 KMemoryBlockDisableMergeAttribute::Normal);
782
783 m_system.InvalidateCpuInstructionCaches();
784
785 R_SUCCEED();
786}
787
788Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
789 KProcessAddress address, size_t size,
790 KMemoryPermission test_perm, KMemoryState dst_state) {
791 // Validate pre-conditions.
792 ASSERT(this->IsLockedByCurrentThread());
793 ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
794 test_perm == KMemoryPermission::UserRead);
795
796 // Check that the address is in range.
797 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
798
799 // Get the source permission.
800 const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite)
801 ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
802 : KMemoryPermission::UserRead;
803
804 // Get aligned extents.
805 const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
806 const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
807 const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
808 const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
809
810 const auto aligned_src_last = (aligned_src_end)-1;
811 const auto mapping_src_last = (mapping_src_end)-1;
812
813 // Get the test state and attribute mask.
814 KMemoryState test_state;
815 KMemoryAttribute test_attr_mask;
816 switch (dst_state) {
817 case KMemoryState::Ipc:
818 test_state = KMemoryState::FlagCanUseIpc;
819 test_attr_mask =
820 KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
821 break;
822 case KMemoryState::NonSecureIpc:
823 test_state = KMemoryState::FlagCanUseNonSecureIpc;
824 test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
825 break;
826 case KMemoryState::NonDeviceIpc:
827 test_state = KMemoryState::FlagCanUseNonDeviceIpc;
828 test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
829 break;
830 default:
831 R_THROW(ResultInvalidCombination);
832 }
833
834 // Ensure that on failure, we roll back appropriately.
835 size_t mapped_size = 0;
836 ON_RESULT_FAILURE {
837 if (mapped_size > 0) {
838 this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
839 src_perm);
840 }
841 };
842
843 size_t blocks_needed = 0;
844
845 // Iterate, mapping as needed.
846 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
847 while (true) {
848 const KMemoryInfo info = it->GetMemoryInfo();
849
850 // Validate the current block.
851 R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
852 test_attr_mask, KMemoryAttribute::None));
853
854 if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() &&
855 info.GetAddress() < GetInteger(mapping_src_end)) {
856 const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
857 ? info.GetAddress()
858 : (mapping_src_start);
859 const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress()
860 : (mapping_src_end);
861 const size_t cur_size = cur_end - cur_start;
862
863 if (info.GetAddress() < GetInteger(mapping_src_start)) {
864 ++blocks_needed;
865 }
866 if (mapping_src_last < info.GetLastAddress()) {
867 ++blocks_needed;
868 }
869
870 // Set the permissions on the block, if we need to.
871 if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
872 R_TRY(Operate(cur_start, cur_size / PageSize, src_perm,
873 OperationType::ChangePermissions));
874 }
875
876 // Note that we mapped this part.
877 mapped_size += cur_size;
878 }
879
880 // If the block is at the end, we're done.
881 if (aligned_src_last <= info.GetLastAddress()) {
882 break;
883 }
884
885 // Advance.
886 ++it;
887 ASSERT(it != m_memory_block_manager.end());
888 }
889
890 if (out_blocks_needed != nullptr) {
891 ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
892 *out_blocks_needed = blocks_needed;
893 }
894
895 R_SUCCEED();
896}
897
898Result KPageTable::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
899 KProcessAddress src_addr, KMemoryPermission test_perm,
900 KMemoryState dst_state, KPageTable& src_page_table,
901 bool send) {
902 ASSERT(this->IsLockedByCurrentThread());
903 ASSERT(src_page_table.IsLockedByCurrentThread());
904
905 // Check that we can theoretically map.
906 const KProcessAddress region_start = m_alias_region_start;
907 const size_t region_size = m_alias_region_end - m_alias_region_start;
908 R_UNLESS(size < region_size, ResultOutOfAddressSpace);
909
910 // Get aligned source extents.
911 const KProcessAddress src_start = src_addr;
912 const KProcessAddress src_end = src_addr + size;
913 const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
914 const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
915 const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
916 const KProcessAddress mapping_src_end =
917 Common::AlignDown(GetInteger(src_start) + size, PageSize);
918 const size_t aligned_src_size = aligned_src_end - aligned_src_start;
919 const size_t mapping_src_size =
920 (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
921
922 // Select a random address to map at.
923 KProcessAddress dst_addr =
924 this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
925 PageSize, 0, this->GetNumGuardPages());
926
927 R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
928
929 // Check that we can perform the operation we're about to perform.
930 ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
931
932 // Create an update allocator.
933 Result allocator_result;
934 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
935 m_memory_block_slab_manager);
936 R_TRY(allocator_result);
937
938 // We're going to perform an update, so create a helper.
939 KScopedPageTableUpdater updater(this);
940
941 // Reserve space for any partial pages we allocate.
942 const size_t unmapped_size = aligned_src_size - mapping_src_size;
943 KScopedResourceReservation memory_reservation(
944 m_resource_limit, LimitableResource::PhysicalMemoryMax, unmapped_size);
945 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
946
947 // Ensure that we manage page references correctly.
948 KPhysicalAddress start_partial_page = 0;
949 KPhysicalAddress end_partial_page = 0;
950 KProcessAddress cur_mapped_addr = dst_addr;
951
952 // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
953 // free on scope exit.
954 SCOPE_EXIT({
955 if (start_partial_page != 0) {
956 m_system.Kernel().MemoryManager().Close(start_partial_page, 1);
957 }
958 if (end_partial_page != 0) {
959 m_system.Kernel().MemoryManager().Close(end_partial_page, 1);
960 }
961 });
962
963 ON_RESULT_FAILURE {
964 if (cur_mapped_addr != dst_addr) {
965 ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize,
966 KMemoryPermission::None, OperationType::Unmap)
967 .IsSuccess());
968 }
969 };
970
971 // Allocate the start page as needed.
972 if (aligned_src_start < mapping_src_start) {
973 start_partial_page =
974 m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
975 R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
976 }
977
978 // Allocate the end page as needed.
979 if (mapping_src_end < aligned_src_end &&
980 (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
981 end_partial_page =
982 m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
983 R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
984 }
985
986 // Get the implementation.
987 auto& src_impl = src_page_table.PageTableImpl();
988
989 // Get the fill value for partial pages.
990 const auto fill_val = m_ipc_fill_value;
991
992 // Begin traversal.
993 Common::PageTable::TraversalContext context;
994 Common::PageTable::TraversalEntry next_entry;
995 bool traverse_valid =
996 src_impl.BeginTraversal(next_entry, context, GetInteger(aligned_src_start));
997 ASSERT(traverse_valid);
998
999 // Prepare tracking variables.
1000 KPhysicalAddress cur_block_addr = next_entry.phys_addr;
1001 size_t cur_block_size =
1002 next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1));
1003 size_t tot_block_size = cur_block_size;
1004
1005 // Map the start page, if we have one.
1006 if (start_partial_page != 0) {
1007 // Ensure the page holds correct data.
1008 const KVirtualAddress start_partial_virt =
1009 GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page);
1010 if (send) {
1011 const size_t partial_offset = src_start - aligned_src_start;
1012 size_t copy_size, clear_size;
1013 if (src_end < mapping_src_start) {
1014 copy_size = size;
1015 clear_size = mapping_src_start - src_end;
1016 } else {
1017 copy_size = mapping_src_start - src_start;
1018 clear_size = 0;
1019 }
1020
1021 std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
1022 partial_offset);
1023 std::memcpy(
1024 m_memory->GetPointer<void>(GetInteger(start_partial_virt) + partial_offset),
1025 m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
1026 m_system.Kernel().MemoryLayout(), cur_block_addr)) +
1027 partial_offset),
1028 copy_size);
1029 if (clear_size > 0) {
1030 std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt) +
1031 partial_offset + copy_size),
1032 fill_val, clear_size);
1033 }
1034 } else {
1035 std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
1036 PageSize);
1037 }
1038
1039 // Map the page.
1040 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page));
1041
1042 // Update tracking extents.
1043 cur_mapped_addr += PageSize;
1044 cur_block_addr += PageSize;
1045 cur_block_size -= PageSize;
1046
1047 // If the block's size was one page, we may need to continue traversal.
1048 if (cur_block_size == 0 && aligned_src_size > PageSize) {
1049 traverse_valid = src_impl.ContinueTraversal(next_entry, context);
1050 ASSERT(traverse_valid);
1051
1052 cur_block_addr = next_entry.phys_addr;
1053 cur_block_size = next_entry.block_size;
1054 tot_block_size += next_entry.block_size;
1055 }
1056 }
1057
1058 // Map the remaining pages.
1059 while (aligned_src_start + tot_block_size < mapping_src_end) {
1060 // Continue the traversal.
1061 traverse_valid = src_impl.ContinueTraversal(next_entry, context);
1062 ASSERT(traverse_valid);
1063
1064 // Process the block.
1065 if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
1066 // Map the block we've been processing so far.
1067 R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map,
1068 cur_block_addr));
1069
1070 // Update tracking extents.
1071 cur_mapped_addr += cur_block_size;
1072 cur_block_addr = next_entry.phys_addr;
1073 cur_block_size = next_entry.block_size;
1074 } else {
1075 cur_block_size += next_entry.block_size;
1076 }
1077 tot_block_size += next_entry.block_size;
1078 }
1079
1080 // Handle the last direct-mapped page.
1081 if (const KProcessAddress mapped_block_end =
1082 aligned_src_start + tot_block_size - cur_block_size;
1083 mapped_block_end < mapping_src_end) {
1084 const size_t last_block_size = mapping_src_end - mapped_block_end;
1085
1086 // Map the last block.
1087 R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map,
1088 cur_block_addr));
1089
1090 // Update tracking extents.
1091 cur_mapped_addr += last_block_size;
1092 cur_block_addr += last_block_size;
1093 if (mapped_block_end + cur_block_size < aligned_src_end &&
1094 cur_block_size == last_block_size) {
1095 traverse_valid = src_impl.ContinueTraversal(next_entry, context);
1096 ASSERT(traverse_valid);
1097
1098 cur_block_addr = next_entry.phys_addr;
1099 }
1100 }
1101
1102 // Map the end page, if we have one.
1103 if (end_partial_page != 0) {
1104 // Ensure the page holds correct data.
1105 const KVirtualAddress end_partial_virt =
1106 GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page);
1107 if (send) {
1108 const size_t copy_size = src_end - mapping_src_end;
1109 std::memcpy(m_memory->GetPointer<void>(GetInteger(end_partial_virt)),
1110 m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
1111 m_system.Kernel().MemoryLayout(), cur_block_addr))),
1112 copy_size);
1113 std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt) + copy_size),
1114 fill_val, PageSize - copy_size);
1115 } else {
1116 std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt)), fill_val,
1117 PageSize);
1118 }
1119
1120 // Map the page.
1121 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page));
1122 }
1123
1124 // Update memory blocks to reflect our changes
1125 m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
1126 dst_state, test_perm, KMemoryAttribute::None,
1127 KMemoryBlockDisableMergeAttribute::Normal,
1128 KMemoryBlockDisableMergeAttribute::None);
1129
1130 // Set the output address.
1131 *out_addr = dst_addr + (src_start - aligned_src_start);
1132
1133 // We succeeded.
1134 memory_reservation.Commit();
1135 R_SUCCEED();
1136}
1137
1138Result KPageTable::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
1139 KPageTable& src_page_table, KMemoryPermission test_perm,
1140 KMemoryState dst_state, bool send) {
1141 // For convenience, alias this.
1142 KPageTable& dst_page_table = *this;
1143
1144 // Acquire the table locks.
1145 KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
1146
1147 // We're going to perform an update, so create a helper.
1148 KScopedPageTableUpdater updater(std::addressof(src_page_table));
1149
1150 // Perform client setup.
1151 size_t num_allocator_blocks;
1152 R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
1153 std::addressof(num_allocator_blocks), src_addr, size,
1154 test_perm, dst_state));
1155
1156 // Create an update allocator.
1157 Result allocator_result;
1158 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1159 src_page_table.m_memory_block_slab_manager,
1160 num_allocator_blocks);
1161 R_TRY(allocator_result);
1162
1163 // Get the mapped extents.
1164 const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
1165 const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
1166 const size_t src_map_size = src_map_end - src_map_start;
1167
1168 // Ensure that we clean up appropriately if we fail after this.
1169 const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite)
1170 ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
1171 : KMemoryPermission::UserRead;
1172 ON_RESULT_FAILURE {
1173 if (src_map_end > src_map_start) {
1174 src_page_table.CleanupForIpcClientOnServerSetupFailure(
1175 updater.GetPageList(), src_map_start, src_map_size, src_perm);
1176 }
1177 };
1178
1179 // Perform server setup.
1180 R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
1181 src_page_table, send));
1182
1183 // If anything was mapped, ipc-lock the pages.
1184 if (src_map_start < src_map_end) {
1185 // Get the source permission.
1186 src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
1187 (src_map_end - src_map_start) / PageSize,
1188 &KMemoryBlock::LockForIpc, src_perm);
1189 }
1190
1191 R_SUCCEED();
1192}
1193
1194Result KPageTable::CleanupForIpcServer(KProcessAddress address, size_t size,
1195 KMemoryState dst_state) {
1196 // Validate the address.
1197 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1198
1199 // Lock the table.
1200 KScopedLightLock lk(m_general_lock);
1201
1202 // Validate the memory state.
1203 size_t num_allocator_blocks;
1204 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
1205 KMemoryState::All, dst_state, KMemoryPermission::UserRead,
1206 KMemoryPermission::UserRead, KMemoryAttribute::All,
1207 KMemoryAttribute::None));
1208
1209 // Create an update allocator.
1210 Result allocator_result;
1211 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1212 m_memory_block_slab_manager, num_allocator_blocks);
1213 R_TRY(allocator_result);
1214
1215 // We're going to perform an update, so create a helper.
1216 KScopedPageTableUpdater updater(this);
1217
1218 // Get aligned extents.
1219 const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
1220 const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
1221 const size_t aligned_size = aligned_end - aligned_start;
1222 const size_t aligned_num_pages = aligned_size / PageSize;
1223
1224 // Unmap the pages.
1225 R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap));
1226
1227 // Update memory blocks.
1228 m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
1229 KMemoryState::None, KMemoryPermission::None,
1230 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1231 KMemoryBlockDisableMergeAttribute::Normal);
1232
1233 // Release from the resource limit as relevant.
1234 const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
1235 const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
1236 const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
1237 m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size);
1238
1239 R_SUCCEED();
1240}
1241
1242Result KPageTable::CleanupForIpcClient(KProcessAddress address, size_t size,
1243 KMemoryState dst_state) {
1244 // Validate the address.
1245 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1246
1247 // Get aligned source extents.
1248 const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
1249 const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
1250 const KProcessAddress mapping_last = mapping_end - 1;
1251 const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
1252
1253 // If nothing was mapped, we're actually done immediately.
1254 R_SUCCEED_IF(mapping_size == 0);
1255
1256 // Get the test state and attribute mask.
1257 KMemoryState test_state;
1258 KMemoryAttribute test_attr_mask;
1259 switch (dst_state) {
1260 case KMemoryState::Ipc:
1261 test_state = KMemoryState::FlagCanUseIpc;
1262 test_attr_mask =
1263 KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
1264 break;
1265 case KMemoryState::NonSecureIpc:
1266 test_state = KMemoryState::FlagCanUseNonSecureIpc;
1267 test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
1268 break;
1269 case KMemoryState::NonDeviceIpc:
1270 test_state = KMemoryState::FlagCanUseNonDeviceIpc;
1271 test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
1272 break;
1273 default:
1274 R_THROW(ResultInvalidCombination);
1275 }
1276
1277 // Lock the table.
1278 // NOTE: Nintendo does this *after* creating the updater below, but this does not follow
1279 // convention elsewhere in KPageTable.
1280 KScopedLightLock lk(m_general_lock);
1281
1282 // We're going to perform an update, so create a helper.
1283 KScopedPageTableUpdater updater(this);
1284
1285 // Ensure that on failure, we roll back appropriately.
1286 size_t mapped_size = 0;
1287 ON_RESULT_FAILURE {
1288 if (mapped_size > 0) {
1289 // Determine where the mapping ends.
1290 const auto mapped_end = (mapping_start) + mapped_size;
1291 const auto mapped_last = mapped_end - 1;
1292
1293 // Get current and next iterators.
1294 KMemoryBlockManager::const_iterator start_it =
1295 m_memory_block_manager.FindIterator(mapping_start);
1296 KMemoryBlockManager::const_iterator next_it = start_it;
1297 ++next_it;
1298
1299 // Get the current block info.
1300 KMemoryInfo cur_info = start_it->GetMemoryInfo();
1301
1302 // Create tracking variables.
1303 KProcessAddress cur_address = cur_info.GetAddress();
1304 size_t cur_size = cur_info.GetSize();
1305 bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
1306 bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
1307 bool first =
1308 cur_info.GetIpcDisableMergeCount() == 1 &&
1309 (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
1310 KMemoryBlockDisableMergeAttribute::None;
1311
1312 while (((cur_address) + cur_size - 1) < mapped_last) {
1313 // Check that we have a next block.
1314 ASSERT(next_it != m_memory_block_manager.end());
1315
1316 // Get the next info.
1317 const KMemoryInfo next_info = next_it->GetMemoryInfo();
1318
1319 // Check if we can consolidate the next block's permission set with the current one.
1320
1321 const bool next_perm_eq =
1322 next_info.GetPermission() == next_info.GetOriginalPermission();
1323 const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
1324 if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
1325 cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
1326 // We can consolidate the reprotection for the current and next block into a
1327 // single call.
1328 cur_size += next_info.GetSize();
1329 } else {
1330 // We have to operate on the current block.
1331 if ((cur_needs_set_perm || first) && !cur_perm_eq) {
1332 ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
1333 OperationType::ChangePermissions)
1334 .IsSuccess());
1335 }
1336
1337 // Advance.
1338 cur_address = next_info.GetAddress();
1339 cur_size = next_info.GetSize();
1340 first = false;
1341 }
1342
1343 // Advance.
1344 cur_info = next_info;
1345 cur_perm_eq = next_perm_eq;
1346 cur_needs_set_perm = next_needs_set_perm;
1347 ++next_it;
1348 }
1349
1350 // Process the last block.
1351 if ((first || cur_needs_set_perm) && !cur_perm_eq) {
1352 ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
1353 OperationType::ChangePermissions)
1354 .IsSuccess());
1355 }
1356 }
1357 };
1358
1359 // Iterate, reprotecting as needed.
1360 {
1361 // Get current and next iterators.
1362 KMemoryBlockManager::const_iterator start_it =
1363 m_memory_block_manager.FindIterator(mapping_start);
1364 KMemoryBlockManager::const_iterator next_it = start_it;
1365 ++next_it;
1366
1367 // Validate the current block.
1368 KMemoryInfo cur_info = start_it->GetMemoryInfo();
1369 ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None,
1370 KMemoryPermission::None,
1371 test_attr_mask | KMemoryAttribute::IpcLocked,
1372 KMemoryAttribute::IpcLocked)
1373 .IsSuccess());
1374
1375 // Create tracking variables.
1376 KProcessAddress cur_address = cur_info.GetAddress();
1377 size_t cur_size = cur_info.GetSize();
1378 bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
1379 bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
1380 bool first =
1381 cur_info.GetIpcDisableMergeCount() == 1 &&
1382 (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
1383 KMemoryBlockDisableMergeAttribute::None;
1384
1385 while ((cur_address + cur_size - 1) < mapping_last) {
1386 // Check that we have a next block.
1387 ASSERT(next_it != m_memory_block_manager.end());
1388
1389 // Get the next info.
1390 const KMemoryInfo next_info = next_it->GetMemoryInfo();
1391
1392 // Validate the next block.
1393 ASSERT(this->CheckMemoryState(next_info, test_state, test_state,
1394 KMemoryPermission::None, KMemoryPermission::None,
1395 test_attr_mask | KMemoryAttribute::IpcLocked,
1396 KMemoryAttribute::IpcLocked)
1397 .IsSuccess());
1398
1399 // Check if we can consolidate the next block's permission set with the current one.
1400 const bool next_perm_eq =
1401 next_info.GetPermission() == next_info.GetOriginalPermission();
1402 const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
1403 if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
1404 cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
1405 // We can consolidate the reprotection for the current and next block into a single
1406 // call.
1407 cur_size += next_info.GetSize();
1408 } else {
1409 // We have to operate on the current block.
1410 if ((cur_needs_set_perm || first) && !cur_perm_eq) {
1411 R_TRY(Operate(cur_address, cur_size / PageSize,
1412 cur_needs_set_perm ? cur_info.GetOriginalPermission()
1413 : cur_info.GetPermission(),
1414 OperationType::ChangePermissions));
1415 }
1416
1417 // Mark that we mapped the block.
1418 mapped_size += cur_size;
1419
1420 // Advance.
1421 cur_address = next_info.GetAddress();
1422 cur_size = next_info.GetSize();
1423 first = false;
1424 }
1425
1426 // Advance.
1427 cur_info = next_info;
1428 cur_perm_eq = next_perm_eq;
1429 cur_needs_set_perm = next_needs_set_perm;
1430 ++next_it;
1431 }
1432
1433 // Process the last block.
1434 const auto lock_count =
1435 cur_info.GetIpcLockCount() +
1436 (next_it != m_memory_block_manager.end()
1437 ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
1438 : 0);
1439 if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
1440 R_TRY(Operate(cur_address, cur_size / PageSize,
1441 cur_needs_set_perm ? cur_info.GetOriginalPermission()
1442 : cur_info.GetPermission(),
1443 OperationType::ChangePermissions));
1444 }
1445 }
1446
1447 // Create an update allocator.
1448 // NOTE: Guaranteed zero blocks needed here.
1449 Result allocator_result;
1450 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1451 m_memory_block_slab_manager, 0);
1452 R_TRY(allocator_result);
1453
1454 // Unlock the pages.
1455 m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
1456 mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
1457 KMemoryPermission::None);
1458
1459 R_SUCCEED();
1460}
1461
1462void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list,
1463 KProcessAddress address, size_t size,
1464 KMemoryPermission prot_perm) {
1465 ASSERT(this->IsLockedByCurrentThread());
1466 ASSERT(Common::IsAligned(GetInteger(address), PageSize));
1467 ASSERT(Common::IsAligned(size, PageSize));
1468
1469 // Get the mapped extents.
1470 const KProcessAddress src_map_start = address;
1471 const KProcessAddress src_map_end = address + size;
1472 const KProcessAddress src_map_last = src_map_end - 1;
1473
1474 // This function is only invoked when there's something to do.
1475 ASSERT(src_map_end > src_map_start);
1476
1477 // Iterate over blocks, fixing permissions.
1478 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
1479 while (true) {
1480 const KMemoryInfo info = it->GetMemoryInfo();
1481
1482 const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
1483 ? info.GetAddress()
1484 : GetInteger(src_map_start);
1485 const auto cur_end =
1486 src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
1487
1488 // If we can, fix the protections on the block.
1489 if ((info.GetIpcLockCount() == 0 &&
1490 (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
1491 (info.GetIpcLockCount() != 0 &&
1492 (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
1493 // Check if we actually need to fix the protections on the block.
1494 if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
1495 (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
1496 ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(),
1497 OperationType::ChangePermissions)
1498 .IsSuccess());
1499 }
1500 }
1501
1502 // If we're past the end of the region, we're done.
1503 if (src_map_last <= info.GetLastAddress()) {
1504 break;
1505 }
1506
1507 // Advance.
1508 ++it;
1509 ASSERT(it != m_memory_block_manager.end());
1510 }
1511}
1512
1513Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
1514 // Lock the physical memory lock.
1515 KScopedLightLock phys_lk(m_map_physical_memory_lock);
1516
1517 // Calculate the last address for convenience.
1518 const KProcessAddress last_address = address + size - 1;
1519
1520 // Define iteration variables.
1521 KProcessAddress cur_address;
1522 size_t mapped_size;
1523
1524 // The entire mapping process can be retried.
1525 while (true) {
1526 // Check if the memory is already mapped.
1527 {
1528 // Lock the table.
1529 KScopedLightLock lk(m_general_lock);
1530
1531 // Iterate over the memory.
1532 cur_address = address;
1533 mapped_size = 0;
1534
1535 auto it = m_memory_block_manager.FindIterator(cur_address);
1536 while (true) {
1537 // Check that the iterator is valid.
1538 ASSERT(it != m_memory_block_manager.end());
1539
1540 // Get the memory info.
1541 const KMemoryInfo info = it->GetMemoryInfo();
1542
1543 // Check if we're done.
1544 if (last_address <= info.GetLastAddress()) {
1545 if (info.GetState() != KMemoryState::Free) {
1546 mapped_size += (last_address + 1 - cur_address);
1547 }
1548 break;
1549 }
1550
1551 // Track the memory if it's mapped.
1552 if (info.GetState() != KMemoryState::Free) {
1553 mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
1554 }
1555
1556 // Advance.
1557 cur_address = info.GetEndAddress();
1558 ++it;
1559 }
1560
1561 // If the size mapped is the size requested, we've nothing to do.
1562 R_SUCCEED_IF(size == mapped_size);
1563 }
1564
1565 // Allocate and map the memory.
1566 {
1567 // Reserve the memory from the process resource limit.
1568 KScopedResourceReservation memory_reservation(
1569 m_resource_limit, LimitableResource::PhysicalMemoryMax, size - mapped_size);
1570 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
1571
1572 // Allocate pages for the new memory.
1573 KPageGroup pg{m_kernel, m_block_info_manager};
1574 R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
1575 &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
1576
1577 // If we fail in the next bit (or retry), we need to cleanup the pages.
1578 // auto pg_guard = SCOPE_GUARD {
1579 // pg.OpenFirst();
1580 // pg.Close();
1581 //};
1582
1583 // Map the memory.
1584 {
1585 // Lock the table.
1586 KScopedLightLock lk(m_general_lock);
1587
1588 size_t num_allocator_blocks = 0;
1589
1590 // Verify that nobody has mapped memory since we first checked.
1591 {
1592 // Iterate over the memory.
1593 size_t checked_mapped_size = 0;
1594 cur_address = address;
1595
1596 auto it = m_memory_block_manager.FindIterator(cur_address);
1597 while (true) {
1598 // Check that the iterator is valid.
1599 ASSERT(it != m_memory_block_manager.end());
1600
1601 // Get the memory info.
1602 const KMemoryInfo info = it->GetMemoryInfo();
1603
1604 const bool is_free = info.GetState() == KMemoryState::Free;
1605 if (is_free) {
1606 if (info.GetAddress() < GetInteger(address)) {
1607 ++num_allocator_blocks;
1608 }
1609 if (last_address < info.GetLastAddress()) {
1610 ++num_allocator_blocks;
1611 }
1612 }
1613
1614 // Check if we're done.
1615 if (last_address <= info.GetLastAddress()) {
1616 if (!is_free) {
1617 checked_mapped_size += (last_address + 1 - cur_address);
1618 }
1619 break;
1620 }
1621
1622 // Track the memory if it's mapped.
1623 if (!is_free) {
1624 checked_mapped_size +=
1625 KProcessAddress(info.GetEndAddress()) - cur_address;
1626 }
1627
1628 // Advance.
1629 cur_address = info.GetEndAddress();
1630 ++it;
1631 }
1632
1633 // If the size now isn't what it was before, somebody mapped or unmapped
1634 // concurrently. If this happened, retry.
1635 if (mapped_size != checked_mapped_size) {
1636 continue;
1637 }
1638 }
1639
1640 // Create an update allocator.
1641 ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
1642 Result allocator_result;
1643 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1644 m_memory_block_slab_manager,
1645 num_allocator_blocks);
1646 R_TRY(allocator_result);
1647
1648 // We're going to perform an update, so create a helper.
1649 KScopedPageTableUpdater updater(this);
1650
1651 // Prepare to iterate over the memory.
1652 auto pg_it = pg.begin();
1653 KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
1654 size_t pg_pages = pg_it->GetNumPages();
1655
1656 // Reset the current tracking address, and make sure we clean up on failure.
1657 // pg_guard.Cancel();
1658 cur_address = address;
1659 ON_RESULT_FAILURE {
1660 if (cur_address > address) {
1661 const KProcessAddress last_unmap_address = cur_address - 1;
1662
1663 // Iterate, unmapping the pages.
1664 cur_address = address;
1665
1666 auto it = m_memory_block_manager.FindIterator(cur_address);
1667 while (true) {
1668 // Check that the iterator is valid.
1669 ASSERT(it != m_memory_block_manager.end());
1670
1671 // Get the memory info.
1672 const KMemoryInfo info = it->GetMemoryInfo();
1673
1674 // If the memory state is free, we mapped it and need to unmap it.
1675 if (info.GetState() == KMemoryState::Free) {
1676 // Determine the range to unmap.
1677 const size_t cur_pages =
1678 std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
1679 last_unmap_address + 1 - cur_address) /
1680 PageSize;
1681
1682 // Unmap.
1683 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
1684 OperationType::Unmap)
1685 .IsSuccess());
1686 }
1687
1688 // Check if we're done.
1689 if (last_unmap_address <= info.GetLastAddress()) {
1690 break;
1691 }
1692
1693 // Advance.
1694 cur_address = info.GetEndAddress();
1695 ++it;
1696 }
1697 }
1698
1699 // Release any remaining unmapped memory.
1700 m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
1701 m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
1702 for (++pg_it; pg_it != pg.end(); ++pg_it) {
1703 m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
1704 pg_it->GetNumPages());
1705 m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
1706 pg_it->GetNumPages());
1707 }
1708 };
1709
1710 auto it = m_memory_block_manager.FindIterator(cur_address);
1711 while (true) {
1712 // Check that the iterator is valid.
1713 ASSERT(it != m_memory_block_manager.end());
1714
1715 // Get the memory info.
1716 const KMemoryInfo info = it->GetMemoryInfo();
1717
1718 // If it's unmapped, we need to map it.
1719 if (info.GetState() == KMemoryState::Free) {
1720 // Determine the range to map.
1721 size_t map_pages =
1722 std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
1723 last_address + 1 - cur_address) /
1724 PageSize;
1725
1726 // While we have pages to map, map them.
1727 {
1728 // Create a page group for the current mapping range.
1729 KPageGroup cur_pg(m_kernel, m_block_info_manager);
1730 {
1731 ON_RESULT_FAILURE_2 {
1732 cur_pg.OpenFirst();
1733 cur_pg.Close();
1734 };
1735
1736 size_t remain_pages = map_pages;
1737 while (remain_pages > 0) {
1738 // Check if we're at the end of the physical block.
1739 if (pg_pages == 0) {
1740 // Ensure there are more pages to map.
1741 ASSERT(pg_it != pg.end());
1742
1743 // Advance our physical block.
1744 ++pg_it;
1745 pg_phys_addr = pg_it->GetAddress();
1746 pg_pages = pg_it->GetNumPages();
1747 }
1748
1749 // Add whatever we can to the current block.
1750 const size_t cur_pages = std::min(pg_pages, remain_pages);
1751 R_TRY(cur_pg.AddBlock(pg_phys_addr +
1752 ((pg_pages - cur_pages) * PageSize),
1753 cur_pages));
1754
1755 // Advance.
1756 remain_pages -= cur_pages;
1757 pg_pages -= cur_pages;
1758 }
1759 }
1760
1761 // Map the pages.
1762 R_TRY(this->Operate(cur_address, map_pages, cur_pg,
1763 OperationType::MapFirstGroup));
1764 }
1765 }
1766
1767 // Check if we're done.
1768 if (last_address <= info.GetLastAddress()) {
1769 break;
1770 }
1771
1772 // Advance.
1773 cur_address = info.GetEndAddress();
1774 ++it;
1775 }
1776
1777 // We succeeded, so commit the memory reservation.
1778 memory_reservation.Commit();
1779
1780 // Increase our tracked mapped size.
1781 m_mapped_physical_memory_size += (size - mapped_size);
1782
1783 // Update the relevant memory blocks.
1784 m_memory_block_manager.UpdateIfMatch(
1785 std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
1786 KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
1787 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
1788 address == this->GetAliasRegionStart()
1789 ? KMemoryBlockDisableMergeAttribute::Normal
1790 : KMemoryBlockDisableMergeAttribute::None,
1791 KMemoryBlockDisableMergeAttribute::None);
1792
1793 R_SUCCEED();
1794 }
1795 }
1796 }
1797}
1798
1799Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
1800 // Lock the physical memory lock.
1801 KScopedLightLock phys_lk(m_map_physical_memory_lock);
1802
1803 // Lock the table.
1804 KScopedLightLock lk(m_general_lock);
1805
1806 // Calculate the last address for convenience.
1807 const KProcessAddress last_address = address + size - 1;
1808
1809 // Define iteration variables.
1810 KProcessAddress map_start_address = 0;
1811 KProcessAddress map_last_address = 0;
1812
1813 KProcessAddress cur_address;
1814 size_t mapped_size;
1815 size_t num_allocator_blocks = 0;
1816
1817 // Check if the memory is mapped.
1818 {
1819 // Iterate over the memory.
1820 cur_address = address;
1821 mapped_size = 0;
1822
1823 auto it = m_memory_block_manager.FindIterator(cur_address);
1824 while (true) {
1825 // Check that the iterator is valid.
1826 ASSERT(it != m_memory_block_manager.end());
1827
1828 // Get the memory info.
1829 const KMemoryInfo info = it->GetMemoryInfo();
1830
1831 // Verify the memory's state.
1832 const bool is_normal = info.GetState() == KMemoryState::Normal &&
1833 info.GetAttribute() == KMemoryAttribute::None;
1834 const bool is_free = info.GetState() == KMemoryState::Free;
1835 R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
1836
1837 if (is_normal) {
1838 R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
1839
1840 if (map_start_address == 0) {
1841 map_start_address = cur_address;
1842 }
1843 map_last_address =
1844 (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
1845
1846 if (info.GetAddress() < GetInteger(address)) {
1847 ++num_allocator_blocks;
1848 }
1849 if (last_address < info.GetLastAddress()) {
1850 ++num_allocator_blocks;
1851 }
1852
1853 mapped_size += (map_last_address + 1 - cur_address);
1854 }
1855
1856 // Check if we're done.
1857 if (last_address <= info.GetLastAddress()) {
1858 break;
1859 }
1860
1861 // Advance.
1862 cur_address = info.GetEndAddress();
1863 ++it;
1864 }
1865
1866 // If there's nothing mapped, we've nothing to do.
1867 R_SUCCEED_IF(mapped_size == 0);
1868 }
1869
1870 // Create an update allocator.
1871 ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
1872 Result allocator_result;
1873 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1874 m_memory_block_slab_manager, num_allocator_blocks);
1875 R_TRY(allocator_result);
1876
1877 // We're going to perform an update, so create a helper.
1878 KScopedPageTableUpdater updater(this);
1879
1880 // Separate the mapping.
1881 R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize,
1882 KMemoryPermission::None, OperationType::Separate));
1883
1884 // Reset the current tracking address, and make sure we clean up on failure.
1885 cur_address = address;
1886
1887 // Iterate over the memory, unmapping as we go.
1888 auto it = m_memory_block_manager.FindIterator(cur_address);
1889
1890 const auto clear_merge_attr =
1891 (it->GetState() == KMemoryState::Normal &&
1892 it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
1893 ? KMemoryBlockDisableMergeAttribute::Normal
1894 : KMemoryBlockDisableMergeAttribute::None;
1895
1896 while (true) {
1897 // Check that the iterator is valid.
1898 ASSERT(it != m_memory_block_manager.end());
1899
1900 // Get the memory info.
1901 const KMemoryInfo info = it->GetMemoryInfo();
1902
1903 // If the memory state is normal, we need to unmap it.
1904 if (info.GetState() == KMemoryState::Normal) {
1905 // Determine the range to unmap.
1906 const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
1907 last_address + 1 - cur_address) /
1908 PageSize;
1909
1910 // Unmap.
1911 ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)
1912 .IsSuccess());
1913 }
1914
1915 // Check if we're done.
1916 if (last_address <= info.GetLastAddress()) {
1917 break;
1918 }
1919
1920 // Advance.
1921 cur_address = info.GetEndAddress();
1922 ++it;
1923 }
1924
1925 // Release the memory resource.
1926 m_mapped_physical_memory_size -= mapped_size;
1927 m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, mapped_size);
1928
1929 // Update memory blocks.
1930 m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
1931 KMemoryState::Free, KMemoryPermission::None,
1932 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1933 clear_merge_attr);
1934
1935 // We succeeded.
1936 R_SUCCEED();
1937}
1938
1939Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
1940 size_t size) {
1941 // Lock the table.
1942 KScopedLightLock lk(m_general_lock);
1943
1944 // Validate that the source address's state is valid.
1945 KMemoryState src_state;
1946 size_t num_src_allocator_blocks;
1947 R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
1948 std::addressof(num_src_allocator_blocks), src_address, size,
1949 KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
1950 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
1951 KMemoryAttribute::All, KMemoryAttribute::None));
1952
1953 // Validate that the dst address's state is valid.
1954 size_t num_dst_allocator_blocks;
1955 R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
1956 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
1957 KMemoryPermission::None, KMemoryAttribute::None,
1958 KMemoryAttribute::None));
1959
1960 // Create an update allocator for the source.
1961 Result src_allocator_result;
1962 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
1963 m_memory_block_slab_manager,
1964 num_src_allocator_blocks);
1965 R_TRY(src_allocator_result);
1966
1967 // Create an update allocator for the destination.
1968 Result dst_allocator_result;
1969 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
1970 m_memory_block_slab_manager,
1971 num_dst_allocator_blocks);
1972 R_TRY(dst_allocator_result);
1973
1974 // Map the memory.
1975 {
1976 // Determine the number of pages being operated on.
1977 const size_t num_pages = size / PageSize;
1978
1979 // Create page groups for the memory being unmapped.
1980 KPageGroup pg{m_kernel, m_block_info_manager};
1981
1982 // Create the page group representing the source.
1983 R_TRY(this->MakePageGroup(pg, src_address, num_pages));
1984
1985 // We're going to perform an update, so create a helper.
1986 KScopedPageTableUpdater updater(this);
1987
1988 // Reprotect the source as kernel-read/not mapped.
1989 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
1990 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
1991 const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
1992 const KPageProperties src_properties = {new_src_perm, false, false,
1993 DisableMergeAttribute::DisableHeadBodyTail};
1994 R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
1995 OperationType::ChangePermissions));
1996
1997 // Ensure that we unprotect the source pages on failure.
1998 ON_RESULT_FAILURE {
1999 const KPageProperties unprotect_properties = {
2000 KMemoryPermission::UserReadWrite, false, false,
2001 DisableMergeAttribute::EnableHeadBodyTail};
2002 ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm,
2003 OperationType::ChangePermissions) == ResultSuccess);
2004 };
2005
2006 // Map the alias pages.
2007 const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
2008 DisableMergeAttribute::DisableHead};
2009 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
2010 false));
2011
2012 // Apply the memory block updates.
2013 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
2014 src_state, new_src_perm, new_src_attr,
2015 KMemoryBlockDisableMergeAttribute::Locked,
2016 KMemoryBlockDisableMergeAttribute::None);
2017 m_memory_block_manager.Update(
2018 std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
2019 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
2020 KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
2021 }
2022
2023 R_SUCCEED();
2024}
2025
2026Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
2027 size_t size) {
2028 // Lock the table.
2029 KScopedLightLock lk(m_general_lock);
2030
2031 // Validate that the source address's state is valid.
2032 KMemoryState src_state;
2033 size_t num_src_allocator_blocks;
2034 R_TRY(this->CheckMemoryState(
2035 std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
2036 src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
2037 KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
2038 KMemoryAttribute::All, KMemoryAttribute::Locked));
2039
2040 // Validate that the dst address's state is valid.
2041 KMemoryPermission dst_perm;
2042 size_t num_dst_allocator_blocks;
2043 R_TRY(this->CheckMemoryState(
2044 nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
2045 dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
2046 KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
2047
2048 // Create an update allocator for the source.
2049 Result src_allocator_result;
2050 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
2051 m_memory_block_slab_manager,
2052 num_src_allocator_blocks);
2053 R_TRY(src_allocator_result);
2054
2055 // Create an update allocator for the destination.
2056 Result dst_allocator_result;
2057 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
2058 m_memory_block_slab_manager,
2059 num_dst_allocator_blocks);
2060 R_TRY(dst_allocator_result);
2061
2062 // Unmap the memory.
2063 {
2064 // Determine the number of pages being operated on.
2065 const size_t num_pages = size / PageSize;
2066
2067 // Create page groups for the memory being unmapped.
2068 KPageGroup pg{m_kernel, m_block_info_manager};
2069
2070 // Create the page group representing the destination.
2071 R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
2072
2073 // Ensure the page group is the valid for the source.
2074 R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
2075
2076 // We're going to perform an update, so create a helper.
2077 KScopedPageTableUpdater updater(this);
2078
2079 // Unmap the aliased copy of the pages.
2080 const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
2081 DisableMergeAttribute::None};
2082 R_TRY(
2083 this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap));
2084
2085 // Ensure that we re-map the aliased pages on failure.
2086 ON_RESULT_FAILURE {
2087 this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
2088 };
2089
2090 // Try to set the permissions for the source pages back to what they should be.
2091 const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
2092 DisableMergeAttribute::EnableAndMergeHeadBodyTail};
2093 R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
2094 OperationType::ChangePermissions));
2095
2096 // Apply the memory block updates.
2097 m_memory_block_manager.Update(
2098 std::addressof(src_allocator), src_address, num_pages, src_state,
2099 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
2100 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
2101 m_memory_block_manager.Update(
2102 std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
2103 KMemoryPermission::None, KMemoryAttribute::None,
2104 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
2105 }
2106
2107 R_SUCCEED();
2108}
2109
2110Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
2111 size_t num_pages, KMemoryPermission perm) {
2112 ASSERT(this->IsLockedByCurrentThread());
2113
2114 // Create a page group to hold the pages we allocate.
2115 KPageGroup pg{m_kernel, m_block_info_manager};
2116
2117 // Allocate the pages.
2118 R_TRY(
2119 m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
2120
2121 // Ensure that the page group is closed when we're done working with it.
2122 SCOPE_EXIT({ pg.Close(); });
2123
2124 // Clear all pages.
2125 for (const auto& it : pg) {
2126 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
2127 it.GetSize());
2128 }
2129
2130 // Map the pages.
2131 R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup));
2132}
2133
2134Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
2135 const KPageGroup& pg, const KPageProperties properties,
2136 bool reuse_ll) {
2137 ASSERT(this->IsLockedByCurrentThread());
2138
2139 // Note the current address, so that we can iterate.
2140 const KProcessAddress start_address = address;
2141 KProcessAddress cur_address = address;
2142
2143 // Ensure that we clean up on failure.
2144 ON_RESULT_FAILURE {
2145 ASSERT(!reuse_ll);
2146 if (cur_address != start_address) {
2147 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
2148 DisableMergeAttribute::None};
2149 ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize,
2150 unmap_properties.perm, OperationType::Unmap) == ResultSuccess);
2151 }
2152 };
2153
2154 // Iterate, mapping all pages in the group.
2155 for (const auto& block : pg) {
2156 // Map and advance.
2157 const KPageProperties cur_properties =
2158 (cur_address == start_address)
2159 ? properties
2160 : KPageProperties{properties.perm, properties.io, properties.uncached,
2161 DisableMergeAttribute::None};
2162 this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map,
2163 block.GetAddress());
2164 cur_address += block.GetSize();
2165 }
2166
2167 // We succeeded!
2168 R_SUCCEED();
2169}
2170
2171void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
2172 const KPageGroup& pg) {
2173 ASSERT(this->IsLockedByCurrentThread());
2174
2175 // Note the current address, so that we can iterate.
2176 const KProcessAddress start_address = address;
2177 const KProcessAddress last_address = start_address + size - 1;
2178 const KProcessAddress end_address = last_address + 1;
2179
2180 // Iterate over the memory.
2181 auto pg_it = pg.begin();
2182 ASSERT(pg_it != pg.end());
2183
2184 KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
2185 size_t pg_pages = pg_it->GetNumPages();
2186
2187 auto it = m_memory_block_manager.FindIterator(start_address);
2188 while (true) {
2189 // Check that the iterator is valid.
2190 ASSERT(it != m_memory_block_manager.end());
2191
2192 // Get the memory info.
2193 const KMemoryInfo info = it->GetMemoryInfo();
2194
2195 // Determine the range to map.
2196 KProcessAddress map_address = std::max<KProcessAddress>(info.GetAddress(), start_address);
2197 const KProcessAddress map_end_address =
2198 std::min<KProcessAddress>(info.GetEndAddress(), end_address);
2199 ASSERT(map_end_address != map_address);
2200
2201 // Determine if we should disable head merge.
2202 const bool disable_head_merge =
2203 info.GetAddress() >= GetInteger(start_address) &&
2204 True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
2205 const KPageProperties map_properties = {
2206 info.GetPermission(), false, false,
2207 disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
2208
2209 // While we have pages to map, map them.
2210 size_t map_pages = (map_end_address - map_address) / PageSize;
2211 while (map_pages > 0) {
2212 // Check if we're at the end of the physical block.
2213 if (pg_pages == 0) {
2214 // Ensure there are more pages to map.
2215 ASSERT(pg_it != pg.end());
2216
2217 // Advance our physical block.
2218 ++pg_it;
2219 pg_phys_addr = pg_it->GetAddress();
2220 pg_pages = pg_it->GetNumPages();
2221 }
2222
2223 // Map whatever we can.
2224 const size_t cur_pages = std::min(pg_pages, map_pages);
2225 ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map,
2226 pg_phys_addr) == ResultSuccess);
2227
2228 // Advance.
2229 map_address += cur_pages * PageSize;
2230 map_pages -= cur_pages;
2231
2232 pg_phys_addr += cur_pages * PageSize;
2233 pg_pages -= cur_pages;
2234 }
2235
2236 // Check if we're done.
2237 if (last_address <= info.GetLastAddress()) {
2238 break;
2239 }
2240
2241 // Advance.
2242 ++it;
2243 }
2244
2245 // Check that we re-mapped precisely the page group.
2246 ASSERT((++pg_it) == pg.end());
2247}
2248
2249Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
2250 KPhysicalAddress phys_addr, bool is_pa_valid,
2251 KProcessAddress region_start, size_t region_num_pages,
2252 KMemoryState state, KMemoryPermission perm) {
2253 ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
2254
2255 // Ensure this is a valid map request.
2256 R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
2257 ResultInvalidCurrentMemory);
2258 R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
2259
2260 // Lock the table.
2261 KScopedLightLock lk(m_general_lock);
2262
2263 // Find a random address to map at.
2264 KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
2265 0, this->GetNumGuardPages());
2266 R_UNLESS(addr != 0, ResultOutOfMemory);
2267 ASSERT(Common::IsAligned(GetInteger(addr), alignment));
2268 ASSERT(this->CanContain(addr, num_pages * PageSize, state));
2269 ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
2270 KMemoryPermission::None, KMemoryPermission::None,
2271 KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
2272
2273 // Create an update allocator.
2274 Result allocator_result;
2275 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2276 m_memory_block_slab_manager);
2277 R_TRY(allocator_result);
2278
2279 // We're going to perform an update, so create a helper.
2280 KScopedPageTableUpdater updater(this);
2281
2282 // Perform mapping operation.
2283 if (is_pa_valid) {
2284 const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
2285 R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr));
2286 } else {
2287 R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
2288 }
2289
2290 // Update the blocks.
2291 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
2292 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2293 KMemoryBlockDisableMergeAttribute::None);
2294
2295 // We successfully mapped the pages.
2296 *out_addr = addr;
2297 R_SUCCEED();
2298}
2299
2300Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
2301 KMemoryPermission perm) {
2302 // Check that the map is in range.
2303 const size_t size = num_pages * PageSize;
2304 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
2305
2306 // Lock the table.
2307 KScopedLightLock lk(m_general_lock);
2308
2309 // Check the memory state.
2310 size_t num_allocator_blocks;
2311 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
2312 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
2313 KMemoryPermission::None, KMemoryAttribute::None,
2314 KMemoryAttribute::None));
2315
2316 // Create an update allocator.
2317 Result allocator_result;
2318 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2319 m_memory_block_slab_manager, num_allocator_blocks);
2320 R_TRY(allocator_result);
2321
2322 // We're going to perform an update, so create a helper.
2323 KScopedPageTableUpdater updater(this);
2324
2325 // Map the pages.
2326 R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
2327
2328 // Update the blocks.
2329 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
2330 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2331 KMemoryBlockDisableMergeAttribute::None);
2332
2333 R_SUCCEED();
2334}
2335
2336Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
2337 // Check that the unmap is in range.
2338 const size_t size = num_pages * PageSize;
2339 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
2340
2341 // Lock the table.
2342 KScopedLightLock lk(m_general_lock);
2343
2344 // Check the memory state.
2345 size_t num_allocator_blocks;
2346 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
2347 KMemoryState::All, state, KMemoryPermission::None,
2348 KMemoryPermission::None, KMemoryAttribute::All,
2349 KMemoryAttribute::None));
2350
2351 // Create an update allocator.
2352 Result allocator_result;
2353 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2354 m_memory_block_slab_manager, num_allocator_blocks);
2355 R_TRY(allocator_result);
2356
2357 // We're going to perform an update, so create a helper.
2358 KScopedPageTableUpdater updater(this);
2359
2360 // Perform the unmap.
2361 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
2362 DisableMergeAttribute::None};
2363 R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap));
2364
2365 // Update the blocks.
2366 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
2367 KMemoryPermission::None, KMemoryAttribute::None,
2368 KMemoryBlockDisableMergeAttribute::None,
2369 KMemoryBlockDisableMergeAttribute::Normal);
2370
2371 R_SUCCEED();
2372}
2373
2374Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
2375 KProcessAddress region_start, size_t region_num_pages,
2376 KMemoryState state, KMemoryPermission perm) {
2377 ASSERT(!this->IsLockedByCurrentThread());
2378
2379 // Ensure this is a valid map request.
2380 const size_t num_pages = pg.GetNumPages();
2381 R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
2382 ResultInvalidCurrentMemory);
2383 R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
2384
2385 // Lock the table.
2386 KScopedLightLock lk(m_general_lock);
2387
2388 // Find a random address to map at.
2389 KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
2390 0, this->GetNumGuardPages());
2391 R_UNLESS(addr != 0, ResultOutOfMemory);
2392 ASSERT(this->CanContain(addr, num_pages * PageSize, state));
2393 ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
2394 KMemoryPermission::None, KMemoryPermission::None,
2395 KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
2396
2397 // Create an update allocator.
2398 Result allocator_result;
2399 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2400 m_memory_block_slab_manager);
2401 R_TRY(allocator_result);
2402
2403 // We're going to perform an update, so create a helper.
2404 KScopedPageTableUpdater updater(this);
2405
2406 // Perform mapping operation.
2407 const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
2408 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
2409
2410 // Update the blocks.
2411 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
2412 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2413 KMemoryBlockDisableMergeAttribute::None);
2414
2415 // We successfully mapped the pages.
2416 *out_addr = addr;
2417 R_SUCCEED();
2418}
2419
2420Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
2421 KMemoryPermission perm) {
2422 ASSERT(!this->IsLockedByCurrentThread());
2423
2424 // Ensure this is a valid map request.
2425 const size_t num_pages = pg.GetNumPages();
2426 const size_t size = num_pages * PageSize;
2427 R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
2428
2429 // Lock the table.
2430 KScopedLightLock lk(m_general_lock);
2431
2432 // Check if state allows us to map.
2433 size_t num_allocator_blocks;
2434 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
2435 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
2436 KMemoryPermission::None, KMemoryAttribute::None,
2437 KMemoryAttribute::None));
2438
2439 // Create an update allocator.
2440 Result allocator_result;
2441 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2442 m_memory_block_slab_manager, num_allocator_blocks);
2443 R_TRY(allocator_result);
2444
2445 // We're going to perform an update, so create a helper.
2446 KScopedPageTableUpdater updater(this);
2447
2448 // Perform mapping operation.
2449 const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
2450 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
2451
2452 // Update the blocks.
2453 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
2454 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2455 KMemoryBlockDisableMergeAttribute::None);
2456
2457 // We successfully mapped the pages.
2458 R_SUCCEED();
2459}
2460
2461Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
2462 KMemoryState state) {
2463 ASSERT(!this->IsLockedByCurrentThread());
2464
2465 // Ensure this is a valid unmap request.
2466 const size_t num_pages = pg.GetNumPages();
2467 const size_t size = num_pages * PageSize;
2468 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
2469
2470 // Lock the table.
2471 KScopedLightLock lk(m_general_lock);
2472
2473 // Check if state allows us to unmap.
2474 size_t num_allocator_blocks;
2475 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
2476 KMemoryState::All, state, KMemoryPermission::None,
2477 KMemoryPermission::None, KMemoryAttribute::All,
2478 KMemoryAttribute::None));
2479
2480 // Check that the page group is valid.
2481 R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
2482
2483 // Create an update allocator.
2484 Result allocator_result;
2485 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2486 m_memory_block_slab_manager, num_allocator_blocks);
2487 R_TRY(allocator_result);
2488
2489 // We're going to perform an update, so create a helper.
2490 KScopedPageTableUpdater updater(this);
2491
2492 // Perform unmapping operation.
2493 const KPageProperties properties = {KMemoryPermission::None, false, false,
2494 DisableMergeAttribute::None};
2495 R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap));
2496
2497 // Update the blocks.
2498 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
2499 KMemoryPermission::None, KMemoryAttribute::None,
2500 KMemoryBlockDisableMergeAttribute::None,
2501 KMemoryBlockDisableMergeAttribute::Normal);
2502
2503 R_SUCCEED();
2504}
2505
2506Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
2507 KMemoryState state_mask, KMemoryState state,
2508 KMemoryPermission perm_mask, KMemoryPermission perm,
2509 KMemoryAttribute attr_mask, KMemoryAttribute attr) {
2510 // Ensure that the page group isn't null.
2511 ASSERT(out != nullptr);
2512
2513 // Make sure that the region we're mapping is valid for the table.
2514 const size_t size = num_pages * PageSize;
2515 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
2516
2517 // Lock the table.
2518 KScopedLightLock lk(m_general_lock);
2519
2520 // Check if state allows us to create the group.
2521 R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
2522 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
2523 attr_mask, attr));
2524
2525 // Create a new page group for the region.
2526 R_TRY(this->MakePageGroup(*out, address, num_pages));
2527
2528 R_SUCCEED();
2529}
2530
2531Result KPageTable::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
2532 Svc::MemoryPermission svc_perm) {
2533 const size_t num_pages = size / PageSize;
2534
2535 // Lock the table.
2536 KScopedLightLock lk(m_general_lock);
2537
2538 // Verify we can change the memory permission.
2539 KMemoryState old_state;
2540 KMemoryPermission old_perm;
2541 size_t num_allocator_blocks;
2542 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
2543 std::addressof(num_allocator_blocks), addr, size,
2544 KMemoryState::FlagCode, KMemoryState::FlagCode,
2545 KMemoryPermission::None, KMemoryPermission::None,
2546 KMemoryAttribute::All, KMemoryAttribute::None));
2547
2548 // Determine new perm/state.
2549 const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
2550 KMemoryState new_state = old_state;
2551 const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite;
2552 const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
2553 const bool was_x =
2554 (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
2555 ASSERT(!(is_w && is_x));
2556
2557 if (is_w) {
2558 switch (old_state) {
2559 case KMemoryState::Code:
2560 new_state = KMemoryState::CodeData;
2561 break;
2562 case KMemoryState::AliasCode:
2563 new_state = KMemoryState::AliasCodeData;
2564 break;
2565 default:
2566 ASSERT(false);
2567 break;
2568 }
2569 }
2570
2571 // Succeed if there's nothing to do.
2572 R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
2573
2574 // Create an update allocator.
2575 Result allocator_result{ResultSuccess};
2576 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2577 m_memory_block_slab_manager, num_allocator_blocks);
2578 R_TRY(allocator_result);
2579
2580 // Perform mapping operation.
2581 const auto operation =
2582 was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions;
2583 R_TRY(Operate(addr, num_pages, new_perm, operation));
2584
2585 // Update the blocks.
2586 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
2587 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
2588 KMemoryBlockDisableMergeAttribute::None);
2589
2590 // Ensure cache coherency, if we're setting pages as executable.
2591 if (is_x) {
2592 m_system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
2593 }
2594
2595 R_SUCCEED();
2596}
2597
2598KMemoryInfo KPageTable::QueryInfoImpl(KProcessAddress addr) {
2599 KScopedLightLock lk(m_general_lock);
2600
2601 return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo();
2602}
2603
2604KMemoryInfo KPageTable::QueryInfo(KProcessAddress addr) {
2605 if (!Contains(addr, 1)) {
2606 return {
2607 .m_address = GetInteger(m_address_space_end),
2608 .m_size = 0 - GetInteger(m_address_space_end),
2609 .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
2610 .m_device_disable_merge_left_count = 0,
2611 .m_device_disable_merge_right_count = 0,
2612 .m_ipc_lock_count = 0,
2613 .m_device_use_count = 0,
2614 .m_ipc_disable_merge_count = 0,
2615 .m_permission = KMemoryPermission::None,
2616 .m_attribute = KMemoryAttribute::None,
2617 .m_original_permission = KMemoryPermission::None,
2618 .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
2619 };
2620 }
2621
2622 return QueryInfoImpl(addr);
2623}
2624
2625Result KPageTable::SetMemoryPermission(KProcessAddress addr, size_t size,
2626 Svc::MemoryPermission svc_perm) {
2627 const size_t num_pages = size / PageSize;
2628
2629 // Lock the table.
2630 KScopedLightLock lk(m_general_lock);
2631
2632 // Verify we can change the memory permission.
2633 KMemoryState old_state;
2634 KMemoryPermission old_perm;
2635 size_t num_allocator_blocks;
2636 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
2637 std::addressof(num_allocator_blocks), addr, size,
2638 KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
2639 KMemoryPermission::None, KMemoryPermission::None,
2640 KMemoryAttribute::All, KMemoryAttribute::None));
2641
2642 // Determine new perm.
2643 const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
2644 R_SUCCEED_IF(old_perm == new_perm);
2645
2646 // Create an update allocator.
2647 Result allocator_result{ResultSuccess};
2648 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2649 m_memory_block_slab_manager, num_allocator_blocks);
2650 R_TRY(allocator_result);
2651
2652 // Perform mapping operation.
2653 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
2654
2655 // Update the blocks.
2656 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
2657 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
2658 KMemoryBlockDisableMergeAttribute::None);
2659
2660 R_SUCCEED();
2661}
2662
2663Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
2664 const size_t num_pages = size / PageSize;
2665 ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
2666 KMemoryAttribute::SetMask);
2667
2668 // Lock the table.
2669 KScopedLightLock lk(m_general_lock);
2670
2671 // Verify we can change the memory attribute.
2672 KMemoryState old_state;
2673 KMemoryPermission old_perm;
2674 KMemoryAttribute old_attr;
2675 size_t num_allocator_blocks;
2676 constexpr auto AttributeTestMask =
2677 ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
2678 const KMemoryState state_test_mask =
2679 static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached))
2680 ? static_cast<u32>(KMemoryState::FlagCanChangeAttribute)
2681 : 0) |
2682 ((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked))
2683 ? static_cast<u32>(KMemoryState::FlagCanPermissionLock)
2684 : 0));
2685 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
2686 std::addressof(old_attr), std::addressof(num_allocator_blocks),
2687 addr, size, state_test_mask, state_test_mask,
2688 KMemoryPermission::None, KMemoryPermission::None,
2689 AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
2690
2691 // Create an update allocator.
2692 Result allocator_result{ResultSuccess};
2693 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2694 m_memory_block_slab_manager, num_allocator_blocks);
2695 R_TRY(allocator_result);
2696
2697 // If we need to, perform a change attribute operation.
2698 if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) {
2699 // Perform operation.
2700 R_TRY(this->Operate(addr, num_pages, old_perm,
2701 OperationType::ChangePermissionsAndRefreshAndFlush, 0));
2702 }
2703
2704 // Update the blocks.
2705 m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages,
2706 static_cast<KMemoryAttribute>(mask),
2707 static_cast<KMemoryAttribute>(attr));
2708
2709 R_SUCCEED();
2710}
2711
2712Result KPageTable::SetMaxHeapSize(size_t size) {
2713 // Lock the table.
2714 KScopedLightLock lk(m_general_lock);
2715
2716 // Only process page tables are allowed to set heap size.
2717 ASSERT(!this->IsKernel());
2718
2719 m_max_heap_size = size;
2720
2721 R_SUCCEED();
2722}
2723
2724Result KPageTable::SetHeapSize(u64* out, size_t size) {
2725 // Lock the physical memory mutex.
2726 KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
2727
2728 // Try to perform a reduction in heap, instead of an extension.
2729 KProcessAddress cur_address{};
2730 size_t allocation_size{};
2731 {
2732 // Lock the table.
2733 KScopedLightLock lk(m_general_lock);
2734
2735 // Validate that setting heap size is possible at all.
2736 R_UNLESS(!m_is_kernel, ResultOutOfMemory);
2737 R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
2738 ResultOutOfMemory);
2739 R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
2740
2741 if (size < GetHeapSize()) {
2742 // The size being requested is less than the current size, so we need to free the end of
2743 // the heap.
2744
2745 // Validate memory state.
2746 size_t num_allocator_blocks;
2747 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
2748 m_heap_region_start + size, GetHeapSize() - size,
2749 KMemoryState::All, KMemoryState::Normal,
2750 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
2751 KMemoryAttribute::All, KMemoryAttribute::None));
2752
2753 // Create an update allocator.
2754 Result allocator_result{ResultSuccess};
2755 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2756 m_memory_block_slab_manager,
2757 num_allocator_blocks);
2758 R_TRY(allocator_result);
2759
2760 // Unmap the end of the heap.
2761 const auto num_pages = (GetHeapSize() - size) / PageSize;
2762 R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None,
2763 OperationType::Unmap));
2764
2765 // Release the memory from the resource limit.
2766 m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, num_pages * PageSize);
2767
2768 // Apply the memory block update.
2769 m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
2770 num_pages, KMemoryState::Free, KMemoryPermission::None,
2771 KMemoryAttribute::None,
2772 KMemoryBlockDisableMergeAttribute::None,
2773 size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
2774 : KMemoryBlockDisableMergeAttribute::None);
2775
2776 // Update the current heap end.
2777 m_current_heap_end = m_heap_region_start + size;
2778
2779 // Set the output.
2780 *out = GetInteger(m_heap_region_start);
2781 R_SUCCEED();
2782 } else if (size == GetHeapSize()) {
2783 // The size requested is exactly the current size.
2784 *out = GetInteger(m_heap_region_start);
2785 R_SUCCEED();
2786 } else {
2787 // We have to allocate memory. Determine how much to allocate and where while the table
2788 // is locked.
2789 cur_address = m_current_heap_end;
2790 allocation_size = size - GetHeapSize();
2791 }
2792 }
2793
2794 // Reserve memory for the heap extension.
2795 KScopedResourceReservation memory_reservation(
2796 m_resource_limit, LimitableResource::PhysicalMemoryMax, allocation_size);
2797 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
2798
2799 // Allocate pages for the heap extension.
2800 KPageGroup pg{m_kernel, m_block_info_manager};
2801 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
2802 &pg, allocation_size / PageSize,
2803 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
2804
2805 // Clear all the newly allocated pages.
2806 for (const auto& it : pg) {
2807 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
2808 it.GetSize());
2809 }
2810
2811 // Map the pages.
2812 {
2813 // Lock the table.
2814 KScopedLightLock lk(m_general_lock);
2815
2816 // Ensure that the heap hasn't changed since we began executing.
2817 ASSERT(cur_address == m_current_heap_end);
2818
2819 // Check the memory state.
2820 size_t num_allocator_blocks{};
2821 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
2822 allocation_size, KMemoryState::All, KMemoryState::Free,
2823 KMemoryPermission::None, KMemoryPermission::None,
2824 KMemoryAttribute::None, KMemoryAttribute::None));
2825
2826 // Create an update allocator.
2827 Result allocator_result{ResultSuccess};
2828 KMemoryBlockManagerUpdateAllocator allocator(
2829 std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
2830 R_TRY(allocator_result);
2831
2832 // Map the pages.
2833 const auto num_pages = allocation_size / PageSize;
2834 R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup));
2835
2836 // Clear all the newly allocated pages.
2837 for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
2838 std::memset(m_memory->GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
2839 PageSize);
2840 }
2841
2842 // We succeeded, so commit our memory reservation.
2843 memory_reservation.Commit();
2844
2845 // Apply the memory block update.
2846 m_memory_block_manager.Update(
2847 std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
2848 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
2849 m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
2850 : KMemoryBlockDisableMergeAttribute::None,
2851 KMemoryBlockDisableMergeAttribute::None);
2852
2853 // Update the current heap end.
2854 m_current_heap_end = m_heap_region_start + size;
2855
2856 // Set the output.
2857 *out = GetInteger(m_heap_region_start);
2858 R_SUCCEED();
2859 }
2860}
2861
2862Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
2863 size_t size, KMemoryPermission perm,
2864 bool is_aligned, bool check_heap) {
2865 // Lightly validate the range before doing anything else.
2866 const size_t num_pages = size / PageSize;
2867 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
2868
2869 // Lock the table.
2870 KScopedLightLock lk(m_general_lock);
2871
2872 // Check the memory state.
2873 const auto test_state =
2874 (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
2875 (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
2876 size_t num_allocator_blocks;
2877 KMemoryState old_state;
2878 R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
2879 std::addressof(num_allocator_blocks), address, size, test_state,
2880 test_state, perm, perm,
2881 KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
2882 KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
2883
2884 // Create an update allocator.
2885 Result allocator_result;
2886 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2887 m_memory_block_slab_manager, num_allocator_blocks);
2888 R_TRY(allocator_result);
2889
2890 // Update the memory blocks.
2891 m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
2892 &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
2893
2894 // Set whether the locked memory was io.
2895 *out_is_io =
2896 static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
2897
2898 R_SUCCEED();
2899}
2900
2901Result KPageTable::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
2902 bool check_heap) {
2903 // Lightly validate the range before doing anything else.
2904 const size_t num_pages = size / PageSize;
2905 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
2906
2907 // Lock the table.
2908 KScopedLightLock lk(m_general_lock);
2909
2910 // Check the memory state.
2911 const auto test_state = KMemoryState::FlagCanDeviceMap |
2912 (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
2913 size_t num_allocator_blocks;
2914 R_TRY(this->CheckMemoryStateContiguous(
2915 std::addressof(num_allocator_blocks), address, size, test_state, test_state,
2916 KMemoryPermission::None, KMemoryPermission::None,
2917 KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
2918
2919 // Create an update allocator.
2920 Result allocator_result;
2921 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2922 m_memory_block_slab_manager, num_allocator_blocks);
2923 R_TRY(allocator_result);
2924
2925 // Update the memory blocks.
2926 const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
2927 m_enable_device_address_space_merge
2928 ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
2929 : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
2930 m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
2931 KMemoryPermission::None);
2932
2933 R_SUCCEED();
2934}
2935
2936Result KPageTable::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
2937 // Lightly validate the range before doing anything else.
2938 const size_t num_pages = size / PageSize;
2939 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
2940
2941 // Lock the table.
2942 KScopedLightLock lk(m_general_lock);
2943
2944 // Check the memory state.
2945 size_t num_allocator_blocks;
2946 R_TRY(this->CheckMemoryStateContiguous(
2947 std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
2948 KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
2949 KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
2950
2951 // Create an update allocator.
2952 Result allocator_result{ResultSuccess};
2953 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2954 m_memory_block_slab_manager, num_allocator_blocks);
2955 R_TRY(allocator_result);
2956
2957 // Update the memory blocks.
2958 m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
2959 &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
2960
2961 R_SUCCEED();
2962}
2963
2964Result KPageTable::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
2965 size_t size) {
2966 R_RETURN(this->LockMemoryAndOpen(
2967 nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
2968 KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
2969 KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
2970 KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
2971 KMemoryAttribute::Locked));
2972}
2973
2974Result KPageTable::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
2975 R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
2976 KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
2977 KMemoryPermission::None, KMemoryAttribute::All,
2978 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
2979 KMemoryAttribute::Locked, nullptr));
2980}
2981
2982Result KPageTable::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
2983 KMemoryPermission perm) {
2984 R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer,
2985 KMemoryState::FlagCanTransfer, KMemoryPermission::All,
2986 KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
2987 KMemoryAttribute::None, perm, KMemoryAttribute::Locked));
2988}
2989
2990Result KPageTable::UnlockForTransferMemory(KProcessAddress address, size_t size,
2991 const KPageGroup& pg) {
2992 R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer,
2993 KMemoryState::FlagCanTransfer, KMemoryPermission::None,
2994 KMemoryPermission::None, KMemoryAttribute::All,
2995 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
2996 KMemoryAttribute::Locked, std::addressof(pg)));
2997}
2998
2999Result KPageTable::LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size) {
3000 R_RETURN(this->LockMemoryAndOpen(
3001 out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
3002 KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
3003 KMemoryAttribute::None, KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
3004 KMemoryAttribute::Locked));
3005}
3006
3007Result KPageTable::UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg) {
3008 R_RETURN(this->UnlockMemory(
3009 addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
3010 KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
3011 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
3012}
3013
3014bool KPageTable::IsRegionContiguous(KProcessAddress addr, u64 size) const {
3015 auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr));
3016 for (u64 offset{}; offset < size; offset += PageSize) {
3017 if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr) + offset)) {
3018 return false;
3019 }
3020 start_ptr += PageSize;
3021 }
3022 return true;
3023}
3024
3025void KPageTable::AddRegionToPages(KProcessAddress start, size_t num_pages,
3026 KPageGroup& page_linked_list) {
3027 KProcessAddress addr{start};
3028 while (addr < start + (num_pages * PageSize)) {
3029 const KPhysicalAddress paddr{GetPhysicalAddr(addr)};
3030 ASSERT(paddr != 0);
3031 page_linked_list.AddBlock(paddr, 1);
3032 addr += PageSize;
3033 }
3034}
3035
3036KProcessAddress KPageTable::AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
3037 u64 needed_num_pages, size_t align) {
3038 if (m_enable_aslr) {
3039 UNIMPLEMENTED();
3040 }
3041 return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
3042 IsKernel() ? 1 : 4);
3043}
3044
3045Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
3046 OperationType operation) {
3047 ASSERT(this->IsLockedByCurrentThread());
3048
3049 ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
3050 ASSERT(num_pages > 0);
3051 ASSERT(num_pages == page_group.GetNumPages());
3052
3053 switch (operation) {
3054 case OperationType::MapGroup:
3055 case OperationType::MapFirstGroup: {
3056 // We want to maintain a new reference to every page in the group.
3057 KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
3058
3059 for (const auto& node : page_group) {
3060 const size_t size{node.GetNumPages() * PageSize};
3061
3062 // Map the pages.
3063 m_memory->MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
3064
3065 addr += size;
3066 }
3067
3068 // We succeeded! We want to persist the reference to the pages.
3069 spg.CancelClose();
3070
3071 break;
3072 }
3073 default:
3074 ASSERT(false);
3075 break;
3076 }
3077
3078 R_SUCCEED();
3079}
3080
3081Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
3082 OperationType operation, KPhysicalAddress map_addr) {
3083 ASSERT(this->IsLockedByCurrentThread());
3084
3085 ASSERT(num_pages > 0);
3086 ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
3087 ASSERT(ContainsPages(addr, num_pages));
3088
3089 switch (operation) {
3090 case OperationType::Unmap: {
3091 // Ensure that any pages we track close on exit.
3092 KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()};
3093 SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
3094
3095 this->AddRegionToPages(addr, num_pages, pages_to_close);
3096 m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
3097 break;
3098 }
3099 case OperationType::Map: {
3100 ASSERT(map_addr);
3101 ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
3102 m_memory->MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
3103
3104 // Open references to pages, if we should.
3105 if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
3106 m_kernel.MemoryManager().Open(map_addr, num_pages);
3107 }
3108 break;
3109 }
3110 case OperationType::Separate: {
3111 // HACK: Unimplemented.
3112 break;
3113 }
3114 case OperationType::ChangePermissions:
3115 case OperationType::ChangePermissionsAndRefresh:
3116 case OperationType::ChangePermissionsAndRefreshAndFlush:
3117 break;
3118 default:
3119 ASSERT(false);
3120 break;
3121 }
3122 R_SUCCEED();
3123}
3124
3125void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
3126 while (page_list->Peek()) {
3127 [[maybe_unused]] auto page = page_list->Pop();
3128
3129 // TODO(bunnei): Free pages once they are allocated in guest memory
3130 // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
3131 // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
3132 // this->GetPageTableManager().Free(page);
3133 }
3134}
3135
3136KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const {
3137 switch (state) {
3138 case Svc::MemoryState::Free:
3139 case Svc::MemoryState::Kernel:
3140 return m_address_space_start;
3141 case Svc::MemoryState::Normal:
3142 return m_heap_region_start;
3143 case Svc::MemoryState::Ipc:
3144 case Svc::MemoryState::NonSecureIpc:
3145 case Svc::MemoryState::NonDeviceIpc:
3146 return m_alias_region_start;
3147 case Svc::MemoryState::Stack:
3148 return m_stack_region_start;
3149 case Svc::MemoryState::Static:
3150 case Svc::MemoryState::ThreadLocal:
3151 return m_kernel_map_region_start;
3152 case Svc::MemoryState::Io:
3153 case Svc::MemoryState::Shared:
3154 case Svc::MemoryState::AliasCode:
3155 case Svc::MemoryState::AliasCodeData:
3156 case Svc::MemoryState::Transfered:
3157 case Svc::MemoryState::SharedTransfered:
3158 case Svc::MemoryState::SharedCode:
3159 case Svc::MemoryState::GeneratedCode:
3160 case Svc::MemoryState::CodeOut:
3161 case Svc::MemoryState::Coverage:
3162 case Svc::MemoryState::Insecure:
3163 return m_alias_code_region_start;
3164 case Svc::MemoryState::Code:
3165 case Svc::MemoryState::CodeData:
3166 return m_code_region_start;
3167 default:
3168 UNREACHABLE();
3169 }
3170}
3171
3172size_t KPageTable::GetRegionSize(Svc::MemoryState state) const {
3173 switch (state) {
3174 case Svc::MemoryState::Free:
3175 case Svc::MemoryState::Kernel:
3176 return m_address_space_end - m_address_space_start;
3177 case Svc::MemoryState::Normal:
3178 return m_heap_region_end - m_heap_region_start;
3179 case Svc::MemoryState::Ipc:
3180 case Svc::MemoryState::NonSecureIpc:
3181 case Svc::MemoryState::NonDeviceIpc:
3182 return m_alias_region_end - m_alias_region_start;
3183 case Svc::MemoryState::Stack:
3184 return m_stack_region_end - m_stack_region_start;
3185 case Svc::MemoryState::Static:
3186 case Svc::MemoryState::ThreadLocal:
3187 return m_kernel_map_region_end - m_kernel_map_region_start;
3188 case Svc::MemoryState::Io:
3189 case Svc::MemoryState::Shared:
3190 case Svc::MemoryState::AliasCode:
3191 case Svc::MemoryState::AliasCodeData:
3192 case Svc::MemoryState::Transfered:
3193 case Svc::MemoryState::SharedTransfered:
3194 case Svc::MemoryState::SharedCode:
3195 case Svc::MemoryState::GeneratedCode:
3196 case Svc::MemoryState::CodeOut:
3197 case Svc::MemoryState::Coverage:
3198 case Svc::MemoryState::Insecure:
3199 return m_alias_code_region_end - m_alias_code_region_start;
3200 case Svc::MemoryState::Code:
3201 case Svc::MemoryState::CodeData:
3202 return m_code_region_end - m_code_region_start;
3203 default:
3204 UNREACHABLE();
3205 }
3206}
3207
3208bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
3209 const KProcessAddress end = addr + size;
3210 const KProcessAddress last = end - 1;
3211
3212 const KProcessAddress region_start = this->GetRegionAddress(state);
3213 const size_t region_size = this->GetRegionSize(state);
3214
3215 const bool is_in_region =
3216 region_start <= addr && addr < end && last <= region_start + region_size - 1;
3217 const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
3218 m_heap_region_start == m_heap_region_end);
3219 const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
3220 m_alias_region_start == m_alias_region_end);
3221 switch (state) {
3222 case Svc::MemoryState::Free:
3223 case Svc::MemoryState::Kernel:
3224 return is_in_region;
3225 case Svc::MemoryState::Io:
3226 case Svc::MemoryState::Static:
3227 case Svc::MemoryState::Code:
3228 case Svc::MemoryState::CodeData:
3229 case Svc::MemoryState::Shared:
3230 case Svc::MemoryState::AliasCode:
3231 case Svc::MemoryState::AliasCodeData:
3232 case Svc::MemoryState::Stack:
3233 case Svc::MemoryState::ThreadLocal:
3234 case Svc::MemoryState::Transfered:
3235 case Svc::MemoryState::SharedTransfered:
3236 case Svc::MemoryState::SharedCode:
3237 case Svc::MemoryState::GeneratedCode:
3238 case Svc::MemoryState::CodeOut:
3239 case Svc::MemoryState::Coverage:
3240 case Svc::MemoryState::Insecure:
3241 return is_in_region && !is_in_heap && !is_in_alias;
3242 case Svc::MemoryState::Normal:
3243 ASSERT(is_in_heap);
3244 return is_in_region && !is_in_alias;
3245 case Svc::MemoryState::Ipc:
3246 case Svc::MemoryState::NonSecureIpc:
3247 case Svc::MemoryState::NonDeviceIpc:
3248 ASSERT(is_in_alias);
3249 return is_in_region && !is_in_heap;
3250 default:
3251 return false;
3252 }
3253}
3254
3255Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
3256 KMemoryState state, KMemoryPermission perm_mask,
3257 KMemoryPermission perm, KMemoryAttribute attr_mask,
3258 KMemoryAttribute attr) const {
3259 // Validate the states match expectation.
3260 R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
3261 R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
3262 R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
3263
3264 R_SUCCEED();
3265}
3266
3267Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
3268 size_t size, KMemoryState state_mask,
3269 KMemoryState state, KMemoryPermission perm_mask,
3270 KMemoryPermission perm, KMemoryAttribute attr_mask,
3271 KMemoryAttribute attr) const {
3272 ASSERT(this->IsLockedByCurrentThread());
3273
3274 // Get information about the first block.
3275 const KProcessAddress last_addr = addr + size - 1;
3276 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
3277 KMemoryInfo info = it->GetMemoryInfo();
3278
3279 // If the start address isn't aligned, we need a block.
3280 const size_t blocks_for_start_align =
3281 (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
3282
3283 while (true) {
3284 // Validate against the provided masks.
3285 R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
3286
3287 // Break once we're done.
3288 if (last_addr <= info.GetLastAddress()) {
3289 break;
3290 }
3291
3292 // Advance our iterator.
3293 it++;
3294 ASSERT(it != m_memory_block_manager.cend());
3295 info = it->GetMemoryInfo();
3296 }
3297
3298 // If the end address isn't aligned, we need a block.
3299 const size_t blocks_for_end_align =
3300 (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
3301
3302 if (out_blocks_needed != nullptr) {
3303 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
3304 }
3305
3306 R_SUCCEED();
3307}
3308
3309Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
3310 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
3311 KMemoryBlockManager::const_iterator it,
3312 KProcessAddress last_addr, KMemoryState state_mask,
3313 KMemoryState state, KMemoryPermission perm_mask,
3314 KMemoryPermission perm, KMemoryAttribute attr_mask,
3315 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
3316 ASSERT(this->IsLockedByCurrentThread());
3317
3318 // Get information about the first block.
3319 KMemoryInfo info = it->GetMemoryInfo();
3320
3321 // Validate all blocks in the range have correct state.
3322 const KMemoryState first_state = info.m_state;
3323 const KMemoryPermission first_perm = info.m_permission;
3324 const KMemoryAttribute first_attr = info.m_attribute;
3325 while (true) {
3326 // Validate the current block.
3327 R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
3328 R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
3329 R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
3330 ResultInvalidCurrentMemory);
3331
3332 // Validate against the provided masks.
3333 R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
3334
3335 // Break once we're done.
3336 if (last_addr <= info.GetLastAddress()) {
3337 break;
3338 }
3339
3340 // Advance our iterator.
3341 it++;
3342 ASSERT(it != m_memory_block_manager.cend());
3343 info = it->GetMemoryInfo();
3344 }
3345
3346 // Write output state.
3347 if (out_state != nullptr) {
3348 *out_state = first_state;
3349 }
3350 if (out_perm != nullptr) {
3351 *out_perm = first_perm;
3352 }
3353 if (out_attr != nullptr) {
3354 *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
3355 }
3356
3357 // If the end address isn't aligned, we need a block.
3358 if (out_blocks_needed != nullptr) {
3359 const size_t blocks_for_end_align =
3360 (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
3361 ? 1
3362 : 0;
3363 *out_blocks_needed = blocks_for_end_align;
3364 }
3365
3366 R_SUCCEED();
3367}
3368
3369Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
3370 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
3371 KProcessAddress addr, size_t size, KMemoryState state_mask,
3372 KMemoryState state, KMemoryPermission perm_mask,
3373 KMemoryPermission perm, KMemoryAttribute attr_mask,
3374 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
3375 ASSERT(this->IsLockedByCurrentThread());
3376
3377 // Check memory state.
3378 const KProcessAddress last_addr = addr + size - 1;
3379 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
3380 R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
3381 state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
3382
3383 // If the start address isn't aligned, we need a block.
3384 if (out_blocks_needed != nullptr &&
3385 Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
3386 ++(*out_blocks_needed);
3387 }
3388
3389 R_SUCCEED();
3390}
3391
3392Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
3393 KProcessAddress addr, size_t size, KMemoryState state_mask,
3394 KMemoryState state, KMemoryPermission perm_mask,
3395 KMemoryPermission perm, KMemoryAttribute attr_mask,
3396 KMemoryAttribute attr, KMemoryPermission new_perm,
3397 KMemoryAttribute lock_attr) {
3398 // Validate basic preconditions.
3399 ASSERT((lock_attr & attr) == KMemoryAttribute::None);
3400 ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
3401 KMemoryAttribute::None);
3402
3403 // Validate the lock request.
3404 const size_t num_pages = size / PageSize;
3405 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
3406
3407 // Lock the table.
3408 KScopedLightLock lk(m_general_lock);
3409
3410 // Check that the output page group is empty, if it exists.
3411 if (out_pg) {
3412 ASSERT(out_pg->GetNumPages() == 0);
3413 }
3414
3415 // Check the state.
3416 KMemoryState old_state{};
3417 KMemoryPermission old_perm{};
3418 KMemoryAttribute old_attr{};
3419 size_t num_allocator_blocks{};
3420 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
3421 std::addressof(old_attr), std::addressof(num_allocator_blocks),
3422 addr, size, state_mask | KMemoryState::FlagReferenceCounted,
3423 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
3424 attr_mask, attr));
3425
3426 // Get the physical address, if we're supposed to.
3427 if (out_KPhysicalAddress != nullptr) {
3428 ASSERT(this->GetPhysicalAddressLocked(out_KPhysicalAddress, addr));
3429 }
3430
3431 // Make the page group, if we're supposed to.
3432 if (out_pg != nullptr) {
3433 R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
3434 }
3435
3436 // Create an update allocator.
3437 Result allocator_result{ResultSuccess};
3438 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
3439 m_memory_block_slab_manager, num_allocator_blocks);
3440 R_TRY(allocator_result);
3441
3442 // Decide on new perm and attr.
3443 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
3444 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
3445
3446 // Update permission, if we need to.
3447 if (new_perm != old_perm) {
3448 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
3449 }
3450
3451 // Apply the memory block updates.
3452 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
3453 new_attr, KMemoryBlockDisableMergeAttribute::Locked,
3454 KMemoryBlockDisableMergeAttribute::None);
3455
3456 // If we have an output page group, open.
3457 if (out_pg) {
3458 out_pg->Open();
3459 }
3460
3461 R_SUCCEED();
3462}
3463
3464Result KPageTable::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
3465 KMemoryState state, KMemoryPermission perm_mask,
3466 KMemoryPermission perm, KMemoryAttribute attr_mask,
3467 KMemoryAttribute attr, KMemoryPermission new_perm,
3468 KMemoryAttribute lock_attr, const KPageGroup* pg) {
3469 // Validate basic preconditions.
3470 ASSERT((attr_mask & lock_attr) == lock_attr);
3471 ASSERT((attr & lock_attr) == lock_attr);
3472
3473 // Validate the unlock request.
3474 const size_t num_pages = size / PageSize;
3475 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
3476
3477 // Lock the table.
3478 KScopedLightLock lk(m_general_lock);
3479
3480 // Check the state.
3481 KMemoryState old_state{};
3482 KMemoryPermission old_perm{};
3483 KMemoryAttribute old_attr{};
3484 size_t num_allocator_blocks{};
3485 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
3486 std::addressof(old_attr), std::addressof(num_allocator_blocks),
3487 addr, size, state_mask | KMemoryState::FlagReferenceCounted,
3488 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
3489 attr_mask, attr));
3490
3491 // Check the page group.
3492 if (pg != nullptr) {
3493 R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
3494 }
3495
3496 // Decide on new perm and attr.
3497 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
3498 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
3499
3500 // Create an update allocator.
3501 Result allocator_result{ResultSuccess};
3502 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
3503 m_memory_block_slab_manager, num_allocator_blocks);
3504 R_TRY(allocator_result);
3505
3506 // Update permission, if we need to.
3507 if (new_perm != old_perm) {
3508 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
3509 }
3510
3511 // Apply the memory block updates.
3512 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
3513 new_attr, KMemoryBlockDisableMergeAttribute::None,
3514 KMemoryBlockDisableMergeAttribute::Locked);
3515
3516 R_SUCCEED();
3517}
3518
3519} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 66f16faaf..5541bc13f 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -3,548 +3,14 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <memory> 6#include "core/hle/kernel/k_page_table_base.h"
7
8#include "common/common_funcs.h"
9#include "common/page_table.h"
10#include "core/file_sys/program_metadata.h"
11#include "core/hle/kernel/k_dynamic_resource_manager.h"
12#include "core/hle/kernel/k_light_lock.h"
13#include "core/hle/kernel/k_memory_block.h"
14#include "core/hle/kernel/k_memory_block_manager.h"
15#include "core/hle/kernel/k_memory_layout.h"
16#include "core/hle/kernel/k_memory_manager.h"
17#include "core/hle/kernel/k_typed_address.h"
18#include "core/hle/result.h"
19#include "core/memory.h"
20
21namespace Core {
22class System;
23}
24 7
25namespace Kernel { 8namespace Kernel {
26 9
27enum class DisableMergeAttribute : u8 { 10class KPageTable final : public KPageTableBase {
28 None = (0U << 0),
29 DisableHead = (1U << 0),
30 DisableHeadAndBody = (1U << 1),
31 EnableHeadAndBody = (1U << 2),
32 DisableTail = (1U << 3),
33 EnableTail = (1U << 4),
34 EnableAndMergeHeadBodyTail = (1U << 5),
35 EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
36 DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
37};
38
39struct KPageProperties {
40 KMemoryPermission perm;
41 bool io;
42 bool uncached;
43 DisableMergeAttribute disable_merge_attributes;
44};
45static_assert(std::is_trivial_v<KPageProperties>);
46static_assert(sizeof(KPageProperties) == sizeof(u32));
47
48class KBlockInfoManager;
49class KMemoryBlockManager;
50class KResourceLimit;
51class KSystemResource;
52
53class KPageTable final {
54protected:
55 struct PageLinkedList;
56
57public:
58 enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
59
60 YUZU_NON_COPYABLE(KPageTable);
61 YUZU_NON_MOVEABLE(KPageTable);
62
63 explicit KPageTable(Core::System& system_);
64 ~KPageTable();
65
66 Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
67 bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
68 KProcessAddress code_addr, size_t code_size,
69 KSystemResource* system_resource, KResourceLimit* resource_limit,
70 Core::Memory::Memory& memory);
71
72 void Finalize();
73
74 Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state,
75 KMemoryPermission perm);
76 Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
77 Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
78 ICacheInvalidationStrategy icache_invalidation_strategy);
79 Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table,
80 KProcessAddress src_addr);
81 Result MapPhysicalMemory(KProcessAddress addr, size_t size);
82 Result UnmapPhysicalMemory(KProcessAddress addr, size_t size);
83 Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
84 Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
85 Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
86 Svc::MemoryPermission svc_perm);
87 KMemoryInfo QueryInfo(KProcessAddress addr);
88 Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
89 Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr);
90 Result SetMaxHeapSize(size_t size);
91 Result SetHeapSize(u64* out, size_t size);
92 Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
93 KMemoryPermission perm, bool is_aligned, bool check_heap);
94 Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
95
96 Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size);
97
98 Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
99 Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
100
101 Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
102 KPageTable& src_page_table, KMemoryPermission test_perm,
103 KMemoryState dst_state, bool send);
104 Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
105 Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
106
107 Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
108 KMemoryPermission perm);
109 Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
110 Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size);
111 Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg);
112 Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
113 KMemoryState state_mask, KMemoryState state,
114 KMemoryPermission perm_mask, KMemoryPermission perm,
115 KMemoryAttribute attr_mask, KMemoryAttribute attr);
116
117 Common::PageTable& PageTableImpl() {
118 return *m_page_table_impl;
119 }
120
121 const Common::PageTable& PageTableImpl() const {
122 return *m_page_table_impl;
123 }
124
125 KBlockInfoManager* GetBlockInfoManager() {
126 return m_block_info_manager;
127 }
128
129 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
130 KPhysicalAddress phys_addr, KProcessAddress region_start,
131 size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
132 R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
133 region_num_pages, state, perm));
134 }
135
136 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
137 KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
138 R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
139 this->GetRegionAddress(state),
140 this->GetRegionSize(state) / PageSize, state, perm));
141 }
142
143 Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
144 KMemoryPermission perm) {
145 R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
146 this->GetRegionAddress(state),
147 this->GetRegionSize(state) / PageSize, state, perm));
148 }
149
150 Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
151 KMemoryPermission perm);
152 Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
153
154 Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
155 KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
156 KMemoryPermission perm);
157 Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
158 KMemoryPermission perm);
159 Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
160 void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
161 const KPageGroup& pg);
162
163 KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
164 size_t GetRegionSize(Svc::MemoryState state) const;
165 bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
166
167 KProcessAddress GetRegionAddress(KMemoryState state) const {
168 return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
169 }
170 size_t GetRegionSize(KMemoryState state) const {
171 return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
172 }
173 bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
174 return this->CanContain(addr, size,
175 static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
176 }
177
178protected:
179 struct PageLinkedList {
180 private:
181 struct Node {
182 Node* m_next;
183 std::array<u8, PageSize - sizeof(Node*)> m_buffer;
184 };
185
186 public:
187 constexpr PageLinkedList() = default;
188
189 void Push(Node* n) {
190 ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
191 n->m_next = m_root;
192 m_root = n;
193 }
194
195 void Push(Core::Memory::Memory& memory, KVirtualAddress addr) {
196 this->Push(memory.GetPointer<Node>(GetInteger(addr)));
197 }
198
199 Node* Peek() const {
200 return m_root;
201 }
202
203 Node* Pop() {
204 Node* const r = m_root;
205
206 m_root = r->m_next;
207 r->m_next = nullptr;
208
209 return r;
210 }
211
212 private:
213 Node* m_root{};
214 };
215 static_assert(std::is_trivially_destructible<PageLinkedList>::value);
216
217private:
218 enum class OperationType : u32 {
219 Map = 0,
220 MapGroup = 1,
221 MapFirstGroup = 2,
222 Unmap = 3,
223 ChangePermissions = 4,
224 ChangePermissionsAndRefresh = 5,
225 ChangePermissionsAndRefreshAndFlush = 6,
226 Separate = 7,
227 };
228
229 static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
230 KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
231
232 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
233 KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
234 size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
235 bool IsRegionContiguous(KProcessAddress addr, u64 size) const;
236 void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list);
237 KMemoryInfo QueryInfoImpl(KProcessAddress addr);
238 KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
239 u64 needed_num_pages, size_t align);
240 Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
241 OperationType operation);
242 Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
243 OperationType operation, KPhysicalAddress map_addr = 0);
244 void FinalizeUpdate(PageLinkedList* page_list);
245
246 KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
247 size_t num_pages, size_t alignment, size_t offset,
248 size_t guard_pages);
249
250 Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
251 KMemoryState state_mask, KMemoryState state,
252 KMemoryPermission perm_mask, KMemoryPermission perm,
253 KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
254 Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
255 KMemoryState state, KMemoryPermission perm_mask,
256 KMemoryPermission perm, KMemoryAttribute attr_mask,
257 KMemoryAttribute attr) const {
258 R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
259 perm, attr_mask, attr));
260 }
261
262 Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
263 KMemoryPermission perm_mask, KMemoryPermission perm,
264 KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
265 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
266 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
267 KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
268 KMemoryState state_mask, KMemoryState state,
269 KMemoryPermission perm_mask, KMemoryPermission perm,
270 KMemoryAttribute attr_mask, KMemoryAttribute attr,
271 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
272 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
273 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
274 KProcessAddress addr, size_t size, KMemoryState state_mask,
275 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
276 KMemoryAttribute attr_mask, KMemoryAttribute attr,
277 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
278 Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
279 KMemoryState state_mask, KMemoryState state,
280 KMemoryPermission perm_mask, KMemoryPermission perm,
281 KMemoryAttribute attr_mask, KMemoryAttribute attr,
282 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
283 R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
284 state_mask, state, perm_mask, perm, attr_mask, attr,
285 ignore_attr));
286 }
287 Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
288 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
289 KMemoryAttribute attr_mask, KMemoryAttribute attr,
290 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
291 R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
292 attr_mask, attr, ignore_attr));
293 }
294
295 Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
296 KProcessAddress addr, size_t size, KMemoryState state_mask,
297 KMemoryState state, KMemoryPermission perm_mask,
298 KMemoryPermission perm, KMemoryAttribute attr_mask,
299 KMemoryAttribute attr, KMemoryPermission new_perm,
300 KMemoryAttribute lock_attr);
301 Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
302 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
303 KMemoryAttribute attr_mask, KMemoryAttribute attr,
304 KMemoryPermission new_perm, KMemoryAttribute lock_attr,
305 const KPageGroup* pg);
306
307 Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
308 bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
309
310 bool IsLockedByCurrentThread() const {
311 return m_general_lock.IsLockedByCurrentThread();
312 }
313
314 bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) {
315 ASSERT(this->IsLockedByCurrentThread());
316
317 return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
318 }
319
320 bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
321 ASSERT(this->IsLockedByCurrentThread());
322
323 *out = GetPhysicalAddr(virt_addr);
324
325 return *out != 0;
326 }
327
328 Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
329 KProcessAddress address, size_t size, KMemoryPermission test_perm,
330 KMemoryState dst_state);
331 Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
332 KMemoryPermission test_perm, KMemoryState dst_state,
333 KPageTable& src_page_table, bool send);
334 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
335 size_t size, KMemoryPermission prot_perm);
336
337 Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
338 size_t num_pages, KMemoryPermission perm);
339 Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
340 const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
341
342 mutable KLightLock m_general_lock;
343 mutable KLightLock m_map_physical_memory_lock;
344
345public:
346 constexpr KProcessAddress GetAddressSpaceStart() const {
347 return m_address_space_start;
348 }
349 constexpr KProcessAddress GetAddressSpaceEnd() const {
350 return m_address_space_end;
351 }
352 constexpr size_t GetAddressSpaceSize() const {
353 return m_address_space_end - m_address_space_start;
354 }
355 constexpr KProcessAddress GetHeapRegionStart() const {
356 return m_heap_region_start;
357 }
358 constexpr KProcessAddress GetHeapRegionEnd() const {
359 return m_heap_region_end;
360 }
361 constexpr size_t GetHeapRegionSize() const {
362 return m_heap_region_end - m_heap_region_start;
363 }
364 constexpr KProcessAddress GetAliasRegionStart() const {
365 return m_alias_region_start;
366 }
367 constexpr KProcessAddress GetAliasRegionEnd() const {
368 return m_alias_region_end;
369 }
370 constexpr size_t GetAliasRegionSize() const {
371 return m_alias_region_end - m_alias_region_start;
372 }
373 constexpr KProcessAddress GetStackRegionStart() const {
374 return m_stack_region_start;
375 }
376 constexpr KProcessAddress GetStackRegionEnd() const {
377 return m_stack_region_end;
378 }
379 constexpr size_t GetStackRegionSize() const {
380 return m_stack_region_end - m_stack_region_start;
381 }
382 constexpr KProcessAddress GetKernelMapRegionStart() const {
383 return m_kernel_map_region_start;
384 }
385 constexpr KProcessAddress GetKernelMapRegionEnd() const {
386 return m_kernel_map_region_end;
387 }
388 constexpr KProcessAddress GetCodeRegionStart() const {
389 return m_code_region_start;
390 }
391 constexpr KProcessAddress GetCodeRegionEnd() const {
392 return m_code_region_end;
393 }
394 constexpr KProcessAddress GetAliasCodeRegionStart() const {
395 return m_alias_code_region_start;
396 }
397 constexpr KProcessAddress GetAliasCodeRegionEnd() const {
398 return m_alias_code_region_end;
399 }
400 constexpr size_t GetAliasCodeRegionSize() const {
401 return m_alias_code_region_end - m_alias_code_region_start;
402 }
403 size_t GetNormalMemorySize() const {
404 KScopedLightLock lk(m_general_lock);
405 return GetHeapSize() + m_mapped_physical_memory_size;
406 }
407 constexpr size_t GetAddressSpaceWidth() const {
408 return m_address_space_width;
409 }
410 constexpr size_t GetHeapSize() const {
411 return m_current_heap_end - m_heap_region_start;
412 }
413 constexpr size_t GetNumGuardPages() const {
414 return IsKernel() ? 1 : 4;
415 }
416 KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const {
417 const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
418 ASSERT(backing_addr);
419 return backing_addr + GetInteger(addr);
420 }
421 constexpr bool Contains(KProcessAddress addr) const {
422 return m_address_space_start <= addr && addr <= m_address_space_end - 1;
423 }
424 constexpr bool Contains(KProcessAddress addr, size_t size) const {
425 return m_address_space_start <= addr && addr < addr + size &&
426 addr + size - 1 <= m_address_space_end - 1;
427 }
428 constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
429 return this->Contains(addr, size) && m_alias_region_start <= addr &&
430 addr + size - 1 <= m_alias_region_end - 1;
431 }
432 constexpr bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
433 return this->Contains(addr, size) && m_heap_region_start <= addr &&
434 addr + size - 1 <= m_heap_region_end - 1;
435 }
436
437public: 11public:
438 static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout, 12 explicit KPageTable(KernelCore& kernel) : KPageTableBase(kernel) {}
439 KPhysicalAddress addr) { 13 ~KPageTable() = default;
440 return layout.GetLinearVirtualAddress(addr);
441 }
442
443 static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout,
444 KVirtualAddress addr) {
445 return layout.GetLinearPhysicalAddress(addr);
446 }
447
448 static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout,
449 KPhysicalAddress addr) {
450 return GetLinearMappedVirtualAddress(layout, addr);
451 }
452
453 static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout,
454 KVirtualAddress addr) {
455 return GetLinearMappedPhysicalAddress(layout, addr);
456 }
457
458 static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout,
459 KPhysicalAddress addr) {
460 return GetLinearMappedVirtualAddress(layout, addr);
461 }
462
463 static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout,
464 KVirtualAddress addr) {
465 return GetLinearMappedPhysicalAddress(layout, addr);
466 }
467
468private:
469 constexpr bool IsKernel() const {
470 return m_is_kernel;
471 }
472 constexpr bool IsAslrEnabled() const {
473 return m_enable_aslr;
474 }
475
476 constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
477 return (m_address_space_start <= addr) &&
478 (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
479 (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
480 }
481
482private:
483 class KScopedPageTableUpdater {
484 private:
485 KPageTable* m_pt{};
486 PageLinkedList m_ll;
487
488 public:
489 explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {}
490 explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {}
491 ~KScopedPageTableUpdater() {
492 m_pt->FinalizeUpdate(this->GetPageList());
493 }
494
495 PageLinkedList* GetPageList() {
496 return std::addressof(m_ll);
497 }
498 };
499
500private:
501 KProcessAddress m_address_space_start{};
502 KProcessAddress m_address_space_end{};
503 KProcessAddress m_heap_region_start{};
504 KProcessAddress m_heap_region_end{};
505 KProcessAddress m_current_heap_end{};
506 KProcessAddress m_alias_region_start{};
507 KProcessAddress m_alias_region_end{};
508 KProcessAddress m_stack_region_start{};
509 KProcessAddress m_stack_region_end{};
510 KProcessAddress m_kernel_map_region_start{};
511 KProcessAddress m_kernel_map_region_end{};
512 KProcessAddress m_code_region_start{};
513 KProcessAddress m_code_region_end{};
514 KProcessAddress m_alias_code_region_start{};
515 KProcessAddress m_alias_code_region_end{};
516
517 size_t m_max_heap_size{};
518 size_t m_mapped_physical_memory_size{};
519 size_t m_mapped_unsafe_physical_memory{};
520 size_t m_mapped_insecure_memory{};
521 size_t m_mapped_ipc_server_memory{};
522 size_t m_address_space_width{};
523
524 KMemoryBlockManager m_memory_block_manager;
525 u32 m_allocate_option{};
526
527 bool m_is_kernel{};
528 bool m_enable_aslr{};
529 bool m_enable_device_address_space_merge{};
530
531 KMemoryBlockSlabManager* m_memory_block_slab_manager{};
532 KBlockInfoManager* m_block_info_manager{};
533 KResourceLimit* m_resource_limit{};
534
535 u32 m_heap_fill_value{};
536 u32 m_ipc_fill_value{};
537 u32 m_stack_fill_value{};
538 const KMemoryRegion* m_cached_physical_heap_region{};
539
540 KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
541 KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
542
543 std::unique_ptr<Common::PageTable> m_page_table_impl;
544
545 Core::System& m_system;
546 KernelCore& m_kernel;
547 Core::Memory::Memory* m_memory{};
548}; 14};
549 15
550} // namespace Kernel 16} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
new file mode 100644
index 000000000..1cc019c06
--- /dev/null
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -0,0 +1,5718 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/scope_exit.h"
5#include "common/settings.h"
6#include "core/core.h"
7#include "core/hle/kernel/k_address_space_info.h"
8#include "core/hle/kernel/k_page_table_base.h"
9#include "core/hle/kernel/k_scoped_resource_reservation.h"
10#include "core/hle/kernel/k_system_resource.h"
11
12namespace Kernel {
13
14namespace {
15
16class KScopedLightLockPair {
17 YUZU_NON_COPYABLE(KScopedLightLockPair);
18 YUZU_NON_MOVEABLE(KScopedLightLockPair);
19
20private:
21 KLightLock* m_lower;
22 KLightLock* m_upper;
23
24public:
25 KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
26 // Ensure our locks are in a consistent order.
27 if (std::addressof(lhs) <= std::addressof(rhs)) {
28 m_lower = std::addressof(lhs);
29 m_upper = std::addressof(rhs);
30 } else {
31 m_lower = std::addressof(rhs);
32 m_upper = std::addressof(lhs);
33 }
34
35 // Acquire both locks.
36 m_lower->Lock();
37 if (m_lower != m_upper) {
38 m_upper->Lock();
39 }
40 }
41
42 ~KScopedLightLockPair() {
43 // Unlock the upper lock.
44 if (m_upper != nullptr && m_upper != m_lower) {
45 m_upper->Unlock();
46 }
47
48 // Unlock the lower lock.
49 if (m_lower != nullptr) {
50 m_lower->Unlock();
51 }
52 }
53
54public:
55 // Utility.
56 void TryUnlockHalf(KLightLock& lock) {
57 // Only allow unlocking if the lock is half the pair.
58 if (m_lower != m_upper) {
59 // We want to be sure the lock is one we own.
60 if (m_lower == std::addressof(lock)) {
61 lock.Unlock();
62 m_lower = nullptr;
63 } else if (m_upper == std::addressof(lock)) {
64 lock.Unlock();
65 m_upper = nullptr;
66 }
67 }
68 }
69};
70
71void InvalidateEntireInstructionCache(Core::System& system) {
72 system.InvalidateCpuInstructionCaches();
73}
74
75template <typename AddressType>
76Result InvalidateDataCache(AddressType addr, u64 size) {
77 R_SUCCEED();
78}
79
80template <typename AddressType>
81Result StoreDataCache(AddressType addr, u64 size) {
82 R_SUCCEED();
83}
84
85template <typename AddressType>
86Result FlushDataCache(AddressType addr, u64 size) {
87 R_SUCCEED();
88}
89
90} // namespace
91
92void KPageTableBase::MemoryRange::Open() {
93 // If the range contains heap pages, open them.
94 if (this->IsHeap()) {
95 m_kernel.MemoryManager().Open(this->GetAddress(), this->GetSize() / PageSize);
96 }
97}
98
99void KPageTableBase::MemoryRange::Close() {
100 // If the range contains heap pages, close them.
101 if (this->IsHeap()) {
102 m_kernel.MemoryManager().Close(this->GetAddress(), this->GetSize() / PageSize);
103 }
104}
105
106KPageTableBase::KPageTableBase(KernelCore& kernel)
107 : m_kernel(kernel), m_system(kernel.System()), m_general_lock(kernel),
108 m_map_physical_memory_lock(kernel), m_device_map_lock(kernel) {}
109KPageTableBase::~KPageTableBase() = default;
110
111Result KPageTableBase::InitializeForKernel(bool is_64_bit, KVirtualAddress start,
112 KVirtualAddress end, Core::Memory::Memory& memory) {
113 // Initialize our members.
114 m_address_space_width =
115 static_cast<u32>(is_64_bit ? Common::BitSize<u64>() : Common::BitSize<u32>());
116 m_address_space_start = KProcessAddress(GetInteger(start));
117 m_address_space_end = KProcessAddress(GetInteger(end));
118 m_is_kernel = true;
119 m_enable_aslr = true;
120 m_enable_device_address_space_merge = false;
121
122 m_heap_region_start = 0;
123 m_heap_region_end = 0;
124 m_current_heap_end = 0;
125 m_alias_region_start = 0;
126 m_alias_region_end = 0;
127 m_stack_region_start = 0;
128 m_stack_region_end = 0;
129 m_kernel_map_region_start = 0;
130 m_kernel_map_region_end = 0;
131 m_alias_code_region_start = 0;
132 m_alias_code_region_end = 0;
133 m_code_region_start = 0;
134 m_code_region_end = 0;
135 m_max_heap_size = 0;
136 m_mapped_physical_memory_size = 0;
137 m_mapped_unsafe_physical_memory = 0;
138 m_mapped_insecure_memory = 0;
139 m_mapped_ipc_server_memory = 0;
140
141 m_memory_block_slab_manager =
142 m_kernel.GetSystemSystemResource().GetMemoryBlockSlabManagerPointer();
143 m_block_info_manager = m_kernel.GetSystemSystemResource().GetBlockInfoManagerPointer();
144 m_resource_limit = m_kernel.GetSystemResourceLimit();
145
146 m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool::System,
147 KMemoryManager::Direction::FromFront);
148 m_heap_fill_value = MemoryFillValue_Zero;
149 m_ipc_fill_value = MemoryFillValue_Zero;
150 m_stack_fill_value = MemoryFillValue_Zero;
151
152 m_cached_physical_linear_region = nullptr;
153 m_cached_physical_heap_region = nullptr;
154
155 // Initialize our implementation.
156 m_impl = std::make_unique<Common::PageTable>();
157 m_impl->Resize(m_address_space_width, PageBits);
158
159 // Set the tracking memory.
160 m_memory = std::addressof(memory);
161
162 // Initialize our memory block manager.
163 R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
164 m_memory_block_slab_manager));
165}
166
167Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
168 bool enable_das_merge, bool from_back,
169 KMemoryManager::Pool pool, KProcessAddress code_address,
170 size_t code_size, KSystemResource* system_resource,
171 KResourceLimit* resource_limit,
172 Core::Memory::Memory& memory) {
173 // Calculate region extents.
174 const size_t as_width = GetAddressSpaceWidth(as_type);
175 const KProcessAddress start = 0;
176 const KProcessAddress end = (1ULL << as_width);
177
178 // Validate the region.
179 ASSERT(start <= code_address);
180 ASSERT(code_address < code_address + code_size);
181 ASSERT(code_address + code_size - 1 <= end - 1);
182
183 // Define helpers.
184 auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) {
185 return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
186 };
187 auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) {
188 return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
189 };
190
191 // Set our bit width and heap/alias sizes.
192 m_address_space_width = static_cast<u32>(GetAddressSpaceWidth(as_type));
193 size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
194 size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
195
196 // Adjust heap/alias size if we don't have an alias region.
197 if ((as_type & Svc::CreateProcessFlag::AddressSpaceMask) ==
198 Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) {
199 heap_region_size += alias_region_size;
200 alias_region_size = 0;
201 }
202
203 // Set code regions and determine remaining sizes.
204 KProcessAddress process_code_start;
205 KProcessAddress process_code_end;
206 size_t stack_region_size;
207 size_t kernel_map_region_size;
208 if (m_address_space_width == 39) {
209 alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
210 heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
211 stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
212 kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
213 m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
214 m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
215 m_alias_code_region_start = m_code_region_start;
216 m_alias_code_region_end = m_code_region_end;
217 process_code_start = Common::AlignDown(GetInteger(code_address), RegionAlignment);
218 process_code_end = Common::AlignUp(GetInteger(code_address) + code_size, RegionAlignment);
219 } else {
220 stack_region_size = 0;
221 kernel_map_region_size = 0;
222 m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
223 m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
224 m_stack_region_start = m_code_region_start;
225 m_alias_code_region_start = m_code_region_start;
226 m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
227 GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
228 m_stack_region_end = m_code_region_end;
229 m_kernel_map_region_start = m_code_region_start;
230 m_kernel_map_region_end = m_code_region_end;
231 process_code_start = m_code_region_start;
232 process_code_end = m_code_region_end;
233 }
234
235 // Set other basic fields.
236 m_enable_aslr = enable_aslr;
237 m_enable_device_address_space_merge = enable_das_merge;
238 m_address_space_start = start;
239 m_address_space_end = end;
240 m_is_kernel = false;
241 m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
242 m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
243 m_resource_limit = resource_limit;
244
245 // Determine the region we can place our undetermineds in.
246 KProcessAddress alloc_start;
247 size_t alloc_size;
248 if ((GetInteger(process_code_start) - GetInteger(m_code_region_start)) >=
249 (GetInteger(end) - GetInteger(process_code_end))) {
250 alloc_start = m_code_region_start;
251 alloc_size = GetInteger(process_code_start) - GetInteger(m_code_region_start);
252 } else {
253 alloc_start = process_code_end;
254 alloc_size = GetInteger(end) - GetInteger(process_code_end);
255 }
256 const size_t needed_size =
257 (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
258 R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
259
260 const size_t remaining_size = alloc_size - needed_size;
261
262 // Determine random placements for each region.
263 size_t alias_rnd = 0, heap_rnd = 0, stack_rnd = 0, kmap_rnd = 0;
264 if (enable_aslr) {
265 alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
266 RegionAlignment;
267 heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
268 RegionAlignment;
269 stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
270 RegionAlignment;
271 kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
272 RegionAlignment;
273 }
274
275 // Setup heap and alias regions.
276 m_alias_region_start = alloc_start + alias_rnd;
277 m_alias_region_end = m_alias_region_start + alias_region_size;
278 m_heap_region_start = alloc_start + heap_rnd;
279 m_heap_region_end = m_heap_region_start + heap_region_size;
280
281 if (alias_rnd <= heap_rnd) {
282 m_heap_region_start += alias_region_size;
283 m_heap_region_end += alias_region_size;
284 } else {
285 m_alias_region_start += heap_region_size;
286 m_alias_region_end += heap_region_size;
287 }
288
289 // Setup stack region.
290 if (stack_region_size) {
291 m_stack_region_start = alloc_start + stack_rnd;
292 m_stack_region_end = m_stack_region_start + stack_region_size;
293
294 if (alias_rnd < stack_rnd) {
295 m_stack_region_start += alias_region_size;
296 m_stack_region_end += alias_region_size;
297 } else {
298 m_alias_region_start += stack_region_size;
299 m_alias_region_end += stack_region_size;
300 }
301
302 if (heap_rnd < stack_rnd) {
303 m_stack_region_start += heap_region_size;
304 m_stack_region_end += heap_region_size;
305 } else {
306 m_heap_region_start += stack_region_size;
307 m_heap_region_end += stack_region_size;
308 }
309 }
310
311 // Setup kernel map region.
312 if (kernel_map_region_size) {
313 m_kernel_map_region_start = alloc_start + kmap_rnd;
314 m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
315
316 if (alias_rnd < kmap_rnd) {
317 m_kernel_map_region_start += alias_region_size;
318 m_kernel_map_region_end += alias_region_size;
319 } else {
320 m_alias_region_start += kernel_map_region_size;
321 m_alias_region_end += kernel_map_region_size;
322 }
323
324 if (heap_rnd < kmap_rnd) {
325 m_kernel_map_region_start += heap_region_size;
326 m_kernel_map_region_end += heap_region_size;
327 } else {
328 m_heap_region_start += kernel_map_region_size;
329 m_heap_region_end += kernel_map_region_size;
330 }
331
332 if (stack_region_size) {
333 if (stack_rnd < kmap_rnd) {
334 m_kernel_map_region_start += stack_region_size;
335 m_kernel_map_region_end += stack_region_size;
336 } else {
337 m_stack_region_start += kernel_map_region_size;
338 m_stack_region_end += kernel_map_region_size;
339 }
340 }
341 }
342
343 // Set heap and fill members.
344 m_current_heap_end = m_heap_region_start;
345 m_max_heap_size = 0;
346 m_mapped_physical_memory_size = 0;
347 m_mapped_unsafe_physical_memory = 0;
348 m_mapped_insecure_memory = 0;
349 m_mapped_ipc_server_memory = 0;
350
351 // const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled();
352 const bool fill_memory = false;
353 m_heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero;
354 m_ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero;
355 m_stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero;
356
357 // Set allocation option.
358 m_allocate_option =
359 KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
360 : KMemoryManager::Direction::FromFront);
361
362 // Ensure that we regions inside our address space.
363 auto IsInAddressSpace = [&](KProcessAddress addr) {
364 return m_address_space_start <= addr && addr <= m_address_space_end;
365 };
366 ASSERT(IsInAddressSpace(m_alias_region_start));
367 ASSERT(IsInAddressSpace(m_alias_region_end));
368 ASSERT(IsInAddressSpace(m_heap_region_start));
369 ASSERT(IsInAddressSpace(m_heap_region_end));
370 ASSERT(IsInAddressSpace(m_stack_region_start));
371 ASSERT(IsInAddressSpace(m_stack_region_end));
372 ASSERT(IsInAddressSpace(m_kernel_map_region_start));
373 ASSERT(IsInAddressSpace(m_kernel_map_region_end));
374
375 // Ensure that we selected regions that don't overlap.
376 const KProcessAddress alias_start = m_alias_region_start;
377 const KProcessAddress alias_last = m_alias_region_end - 1;
378 const KProcessAddress heap_start = m_heap_region_start;
379 const KProcessAddress heap_last = m_heap_region_end - 1;
380 const KProcessAddress stack_start = m_stack_region_start;
381 const KProcessAddress stack_last = m_stack_region_end - 1;
382 const KProcessAddress kmap_start = m_kernel_map_region_start;
383 const KProcessAddress kmap_last = m_kernel_map_region_end - 1;
384 ASSERT(alias_last < heap_start || heap_last < alias_start);
385 ASSERT(alias_last < stack_start || stack_last < alias_start);
386 ASSERT(alias_last < kmap_start || kmap_last < alias_start);
387 ASSERT(heap_last < stack_start || stack_last < heap_start);
388 ASSERT(heap_last < kmap_start || kmap_last < heap_start);
389
390 // Initialize our implementation.
391 m_impl = std::make_unique<Common::PageTable>();
392 m_impl->Resize(m_address_space_width, PageBits);
393
394 // Set the tracking memory.
395 m_memory = std::addressof(memory);
396
397 // Initialize our memory block manager.
398 R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
399 m_memory_block_slab_manager));
400}
401
402void KPageTableBase::Finalize() {
403 auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
404 if (Settings::IsFastmemEnabled()) {
405 m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
406 }
407 };
408
409 // Finalize memory blocks.
410 m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback));
411
412 // Free any unsafe mapped memory.
413 if (m_mapped_unsafe_physical_memory) {
414 UNIMPLEMENTED();
415 }
416
417 // Release any insecure mapped memory.
418 if (m_mapped_insecure_memory) {
419 if (auto* const insecure_resource_limit =
420 KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
421 insecure_resource_limit != nullptr) {
422 insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
423 m_mapped_insecure_memory);
424 }
425 }
426
427 // Release any ipc server memory.
428 if (m_mapped_ipc_server_memory) {
429 m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
430 m_mapped_ipc_server_memory);
431 }
432
433 // Invalidate the entire instruction cache.
434 InvalidateEntireInstructionCache(m_system);
435
436 // Close the backing page table, as the destructor is not called for guest objects.
437 m_impl.reset();
438}
439
440KProcessAddress KPageTableBase::GetRegionAddress(Svc::MemoryState state) const {
441 switch (state) {
442 case Svc::MemoryState::Free:
443 case Svc::MemoryState::Kernel:
444 return m_address_space_start;
445 case Svc::MemoryState::Normal:
446 return m_heap_region_start;
447 case Svc::MemoryState::Ipc:
448 case Svc::MemoryState::NonSecureIpc:
449 case Svc::MemoryState::NonDeviceIpc:
450 return m_alias_region_start;
451 case Svc::MemoryState::Stack:
452 return m_stack_region_start;
453 case Svc::MemoryState::Static:
454 case Svc::MemoryState::ThreadLocal:
455 return m_kernel_map_region_start;
456 case Svc::MemoryState::Io:
457 case Svc::MemoryState::Shared:
458 case Svc::MemoryState::AliasCode:
459 case Svc::MemoryState::AliasCodeData:
460 case Svc::MemoryState::Transfered:
461 case Svc::MemoryState::SharedTransfered:
462 case Svc::MemoryState::SharedCode:
463 case Svc::MemoryState::GeneratedCode:
464 case Svc::MemoryState::CodeOut:
465 case Svc::MemoryState::Coverage:
466 case Svc::MemoryState::Insecure:
467 return m_alias_code_region_start;
468 case Svc::MemoryState::Code:
469 case Svc::MemoryState::CodeData:
470 return m_code_region_start;
471 default:
472 UNREACHABLE();
473 }
474}
475
476size_t KPageTableBase::GetRegionSize(Svc::MemoryState state) const {
477 switch (state) {
478 case Svc::MemoryState::Free:
479 case Svc::MemoryState::Kernel:
480 return m_address_space_end - m_address_space_start;
481 case Svc::MemoryState::Normal:
482 return m_heap_region_end - m_heap_region_start;
483 case Svc::MemoryState::Ipc:
484 case Svc::MemoryState::NonSecureIpc:
485 case Svc::MemoryState::NonDeviceIpc:
486 return m_alias_region_end - m_alias_region_start;
487 case Svc::MemoryState::Stack:
488 return m_stack_region_end - m_stack_region_start;
489 case Svc::MemoryState::Static:
490 case Svc::MemoryState::ThreadLocal:
491 return m_kernel_map_region_end - m_kernel_map_region_start;
492 case Svc::MemoryState::Io:
493 case Svc::MemoryState::Shared:
494 case Svc::MemoryState::AliasCode:
495 case Svc::MemoryState::AliasCodeData:
496 case Svc::MemoryState::Transfered:
497 case Svc::MemoryState::SharedTransfered:
498 case Svc::MemoryState::SharedCode:
499 case Svc::MemoryState::GeneratedCode:
500 case Svc::MemoryState::CodeOut:
501 case Svc::MemoryState::Coverage:
502 case Svc::MemoryState::Insecure:
503 return m_alias_code_region_end - m_alias_code_region_start;
504 case Svc::MemoryState::Code:
505 case Svc::MemoryState::CodeData:
506 return m_code_region_end - m_code_region_start;
507 default:
508 UNREACHABLE();
509 }
510}
511
512bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
513 const KProcessAddress end = addr + size;
514 const KProcessAddress last = end - 1;
515
516 const KProcessAddress region_start = this->GetRegionAddress(state);
517 const size_t region_size = this->GetRegionSize(state);
518
519 const bool is_in_region =
520 region_start <= addr && addr < end && last <= region_start + region_size - 1;
521 const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
522 m_heap_region_start == m_heap_region_end);
523 const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
524 m_alias_region_start == m_alias_region_end);
525 switch (state) {
526 case Svc::MemoryState::Free:
527 case Svc::MemoryState::Kernel:
528 return is_in_region;
529 case Svc::MemoryState::Io:
530 case Svc::MemoryState::Static:
531 case Svc::MemoryState::Code:
532 case Svc::MemoryState::CodeData:
533 case Svc::MemoryState::Shared:
534 case Svc::MemoryState::AliasCode:
535 case Svc::MemoryState::AliasCodeData:
536 case Svc::MemoryState::Stack:
537 case Svc::MemoryState::ThreadLocal:
538 case Svc::MemoryState::Transfered:
539 case Svc::MemoryState::SharedTransfered:
540 case Svc::MemoryState::SharedCode:
541 case Svc::MemoryState::GeneratedCode:
542 case Svc::MemoryState::CodeOut:
543 case Svc::MemoryState::Coverage:
544 case Svc::MemoryState::Insecure:
545 return is_in_region && !is_in_heap && !is_in_alias;
546 case Svc::MemoryState::Normal:
547 ASSERT(is_in_heap);
548 return is_in_region && !is_in_alias;
549 case Svc::MemoryState::Ipc:
550 case Svc::MemoryState::NonSecureIpc:
551 case Svc::MemoryState::NonDeviceIpc:
552 ASSERT(is_in_alias);
553 return is_in_region && !is_in_heap;
554 default:
555 return false;
556 }
557}
558
559Result KPageTableBase::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
560 KMemoryState state, KMemoryPermission perm_mask,
561 KMemoryPermission perm, KMemoryAttribute attr_mask,
562 KMemoryAttribute attr) const {
563 // Validate the states match expectation.
564 R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
565 R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
566 R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
567
568 R_SUCCEED();
569}
570
571Result KPageTableBase::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
572 size_t size, KMemoryState state_mask,
573 KMemoryState state, KMemoryPermission perm_mask,
574 KMemoryPermission perm,
575 KMemoryAttribute attr_mask,
576 KMemoryAttribute attr) const {
577 ASSERT(this->IsLockedByCurrentThread());
578
579 // Get information about the first block.
580 const KProcessAddress last_addr = addr + size - 1;
581 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
582 KMemoryInfo info = it->GetMemoryInfo();
583
584 // If the start address isn't aligned, we need a block.
585 const size_t blocks_for_start_align =
586 (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
587
588 while (true) {
589 // Validate against the provided masks.
590 R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
591
592 // Break once we're done.
593 if (last_addr <= info.GetLastAddress()) {
594 break;
595 }
596
597 // Advance our iterator.
598 it++;
599 ASSERT(it != m_memory_block_manager.cend());
600 info = it->GetMemoryInfo();
601 }
602
603 // If the end address isn't aligned, we need a block.
604 const size_t blocks_for_end_align =
605 (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
606
607 if (out_blocks_needed != nullptr) {
608 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
609 }
610
611 R_SUCCEED();
612}
613
614Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
615 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
616 KMemoryBlockManager::const_iterator it,
617 KProcessAddress last_addr, KMemoryState state_mask,
618 KMemoryState state, KMemoryPermission perm_mask,
619 KMemoryPermission perm, KMemoryAttribute attr_mask,
620 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
621 ASSERT(this->IsLockedByCurrentThread());
622
623 // Get information about the first block.
624 KMemoryInfo info = it->GetMemoryInfo();
625
626 // Validate all blocks in the range have correct state.
627 const KMemoryState first_state = info.m_state;
628 const KMemoryPermission first_perm = info.m_permission;
629 const KMemoryAttribute first_attr = info.m_attribute;
630 while (true) {
631 // Validate the current block.
632 R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
633 R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
634 R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
635 ResultInvalidCurrentMemory);
636
637 // Validate against the provided masks.
638 R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
639
640 // Break once we're done.
641 if (last_addr <= info.GetLastAddress()) {
642 break;
643 }
644
645 // Advance our iterator.
646 it++;
647 ASSERT(it != m_memory_block_manager.cend());
648 info = it->GetMemoryInfo();
649 }
650
651 // Write output state.
652 if (out_state != nullptr) {
653 *out_state = first_state;
654 }
655 if (out_perm != nullptr) {
656 *out_perm = first_perm;
657 }
658 if (out_attr != nullptr) {
659 *out_attr = first_attr & ~ignore_attr;
660 }
661
662 // If the end address isn't aligned, we need a block.
663 if (out_blocks_needed != nullptr) {
664 const size_t blocks_for_end_align =
665 (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
666 ? 1
667 : 0;
668 *out_blocks_needed = blocks_for_end_align;
669 }
670
671 R_SUCCEED();
672}
673
674Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
675 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
676 KProcessAddress addr, size_t size, KMemoryState state_mask,
677 KMemoryState state, KMemoryPermission perm_mask,
678 KMemoryPermission perm, KMemoryAttribute attr_mask,
679 KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
680 ASSERT(this->IsLockedByCurrentThread());
681
682 // Check memory state.
683 const KProcessAddress last_addr = addr + size - 1;
684 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
685 R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
686 state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
687
688 // If the start address isn't aligned, we need a block.
689 if (out_blocks_needed != nullptr &&
690 Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
691 ++(*out_blocks_needed);
692 }
693
694 R_SUCCEED();
695}
696
697Result KPageTableBase::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr,
698 KProcessAddress addr, size_t size, KMemoryState state_mask,
699 KMemoryState state, KMemoryPermission perm_mask,
700 KMemoryPermission perm, KMemoryAttribute attr_mask,
701 KMemoryAttribute attr, KMemoryPermission new_perm,
702 KMemoryAttribute lock_attr) {
703 // Validate basic preconditions.
704 ASSERT(False(lock_attr & attr));
705 ASSERT(False(lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
706
707 // Validate the lock request.
708 const size_t num_pages = size / PageSize;
709 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
710
711 // Lock the table.
712 KScopedLightLock lk(m_general_lock);
713
714 // Check that the output page group is empty, if it exists.
715 if (out_pg) {
716 ASSERT(out_pg->GetNumPages() == 0);
717 }
718
719 // Check the state.
720 KMemoryState old_state;
721 KMemoryPermission old_perm;
722 KMemoryAttribute old_attr;
723 size_t num_allocator_blocks;
724 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
725 std::addressof(old_attr), std::addressof(num_allocator_blocks),
726 addr, size, state_mask | KMemoryState::FlagReferenceCounted,
727 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
728 attr_mask, attr));
729
730 // Get the physical address, if we're supposed to.
731 if (out_paddr != nullptr) {
732 ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
733 }
734
735 // Make the page group, if we're supposed to.
736 if (out_pg != nullptr) {
737 R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
738 }
739
740 // Create an update allocator.
741 Result allocator_result;
742 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
743 m_memory_block_slab_manager, num_allocator_blocks);
744 R_TRY(allocator_result);
745
746 // Decide on new perm and attr.
747 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
748 KMemoryAttribute new_attr = old_attr | static_cast<KMemoryAttribute>(lock_attr);
749
750 // Update permission, if we need to.
751 if (new_perm != old_perm) {
752 // We're going to perform an update, so create a helper.
753 KScopedPageTableUpdater updater(this);
754
755 const KPageProperties properties = {new_perm, false,
756 True(old_attr & KMemoryAttribute::Uncached),
757 DisableMergeAttribute::DisableHeadBodyTail};
758 R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
759 OperationType::ChangePermissions, false));
760 }
761
762 // Apply the memory block updates.
763 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
764 new_attr, KMemoryBlockDisableMergeAttribute::Locked,
765 KMemoryBlockDisableMergeAttribute::None);
766
767 // If we have an output group, open.
768 if (out_pg) {
769 out_pg->Open();
770 }
771
772 R_SUCCEED();
773}
774
775Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
776 KMemoryState state, KMemoryPermission perm_mask,
777 KMemoryPermission perm, KMemoryAttribute attr_mask,
778 KMemoryAttribute attr, KMemoryPermission new_perm,
779 KMemoryAttribute lock_attr, const KPageGroup* pg) {
780 // Validate basic preconditions.
781 ASSERT((attr_mask & lock_attr) == lock_attr);
782 ASSERT((attr & lock_attr) == lock_attr);
783
784 // Validate the unlock request.
785 const size_t num_pages = size / PageSize;
786 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
787
788 // Lock the table.
789 KScopedLightLock lk(m_general_lock);
790
791 // Check the state.
792 KMemoryState old_state;
793 KMemoryPermission old_perm;
794 KMemoryAttribute old_attr;
795 size_t num_allocator_blocks;
796 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
797 std::addressof(old_attr), std::addressof(num_allocator_blocks),
798 addr, size, state_mask | KMemoryState::FlagReferenceCounted,
799 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
800 attr_mask, attr));
801
802 // Check the page group.
803 if (pg != nullptr) {
804 R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
805 }
806
807 // Decide on new perm and attr.
808 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
809 KMemoryAttribute new_attr = old_attr & ~static_cast<KMemoryAttribute>(lock_attr);
810
811 // Create an update allocator.
812 Result allocator_result;
813 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
814 m_memory_block_slab_manager, num_allocator_blocks);
815 R_TRY(allocator_result);
816
817 // Update permission, if we need to.
818 if (new_perm != old_perm) {
819 // We're going to perform an update, so create a helper.
820 KScopedPageTableUpdater updater(this);
821
822 const KPageProperties properties = {new_perm, false,
823 True(old_attr & KMemoryAttribute::Uncached),
824 DisableMergeAttribute::EnableAndMergeHeadBodyTail};
825 R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
826 OperationType::ChangePermissions, false));
827 }
828
829 // Apply the memory block updates.
830 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
831 new_attr, KMemoryBlockDisableMergeAttribute::None,
832 KMemoryBlockDisableMergeAttribute::Locked);
833
834 R_SUCCEED();
835}
836
837Result KPageTableBase::QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page,
838 KProcessAddress address) const {
839 ASSERT(this->IsLockedByCurrentThread());
840 ASSERT(out_info != nullptr);
841 ASSERT(out_page != nullptr);
842
843 const KMemoryBlock* block = m_memory_block_manager.FindBlock(address);
844 R_UNLESS(block != nullptr, ResultInvalidCurrentMemory);
845
846 *out_info = block->GetMemoryInfo();
847 out_page->flags = 0;
848 R_SUCCEED();
849}
850
851Result KPageTableBase::QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size,
852 Svc::MemoryState state) const {
853 ASSERT(!this->IsLockedByCurrentThread());
854 ASSERT(out != nullptr);
855
856 const KProcessAddress region_start = this->GetRegionAddress(state);
857 const size_t region_size = this->GetRegionSize(state);
858
859 // Check that the address/size are potentially valid.
860 R_UNLESS((address < address + size), ResultNotFound);
861
862 // Lock the table.
863 KScopedLightLock lk(m_general_lock);
864
865 auto& impl = this->GetImpl();
866
867 // Begin traversal.
868 TraversalContext context;
869 TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
870 bool cur_valid = false;
871 TraversalEntry next_entry;
872 bool next_valid;
873 size_t tot_size = 0;
874
875 next_valid =
876 impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), region_start);
877 next_entry.block_size =
878 (next_entry.block_size - (GetInteger(region_start) & (next_entry.block_size - 1)));
879
880 // Iterate, looking for entry.
881 while (true) {
882 if ((!next_valid && !cur_valid) ||
883 (next_valid && cur_valid &&
884 next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
885 cur_entry.block_size += next_entry.block_size;
886 } else {
887 if (cur_valid && cur_entry.phys_addr <= address &&
888 address + size <= cur_entry.phys_addr + cur_entry.block_size) {
889 // Check if this region is valid.
890 const KProcessAddress mapped_address =
891 (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr);
892 if (R_SUCCEEDED(this->CheckMemoryState(
893 mapped_address, size, KMemoryState::Mask, static_cast<KMemoryState>(state),
894 KMemoryPermission::UserRead, KMemoryPermission::UserRead,
895 KMemoryAttribute::None, KMemoryAttribute::None))) {
896 // It is!
897 *out = mapped_address;
898 R_SUCCEED();
899 }
900 }
901
902 // Update tracking variables.
903 tot_size += cur_entry.block_size;
904 cur_entry = next_entry;
905 cur_valid = next_valid;
906 }
907
908 if (cur_entry.block_size + tot_size >= region_size) {
909 break;
910 }
911
912 next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
913 }
914
915 // Check the last entry.
916 R_UNLESS(cur_valid, ResultNotFound);
917 R_UNLESS(cur_entry.phys_addr <= address, ResultNotFound);
918 R_UNLESS(address + size <= cur_entry.phys_addr + cur_entry.block_size, ResultNotFound);
919
920 // Check if the last region is valid.
921 const KProcessAddress mapped_address =
922 (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr);
923 R_TRY_CATCH(this->CheckMemoryState(mapped_address, size, KMemoryState::All,
924 static_cast<KMemoryState>(state),
925 KMemoryPermission::UserRead, KMemoryPermission::UserRead,
926 KMemoryAttribute::None, KMemoryAttribute::None)) {
927 R_CONVERT_ALL(ResultNotFound);
928 }
929 R_END_TRY_CATCH;
930
931 // We found the region.
932 *out = mapped_address;
933 R_SUCCEED();
934}
935
936Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
937 size_t size) {
938 // Lock the table.
939 KScopedLightLock lk(m_general_lock);
940
941 // Validate that the source address's state is valid.
942 KMemoryState src_state;
943 size_t num_src_allocator_blocks;
944 R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
945 std::addressof(num_src_allocator_blocks), src_address, size,
946 KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
947 KMemoryPermission::All, KMemoryPermission::UserReadWrite,
948 KMemoryAttribute::All, KMemoryAttribute::None));
949
950 // Validate that the dst address's state is valid.
951 size_t num_dst_allocator_blocks;
952 R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
953 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
954 KMemoryPermission::None, KMemoryAttribute::None,
955 KMemoryAttribute::None));
956
957 // Create an update allocator for the source.
958 Result src_allocator_result;
959 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
960 m_memory_block_slab_manager,
961 num_src_allocator_blocks);
962 R_TRY(src_allocator_result);
963
964 // Create an update allocator for the destination.
965 Result dst_allocator_result;
966 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
967 m_memory_block_slab_manager,
968 num_dst_allocator_blocks);
969 R_TRY(dst_allocator_result);
970
971 // Map the memory.
972 {
973 // Determine the number of pages being operated on.
974 const size_t num_pages = size / PageSize;
975
976 // Create page groups for the memory being unmapped.
977 KPageGroup pg(m_kernel, m_block_info_manager);
978
979 // Create the page group representing the source.
980 R_TRY(this->MakePageGroup(pg, src_address, num_pages));
981
982 // We're going to perform an update, so create a helper.
983 KScopedPageTableUpdater updater(this);
984
985 // Reprotect the source as kernel-read/not mapped.
986 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
987 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
988 const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
989 const KPageProperties src_properties = {new_src_perm, false, false,
990 DisableMergeAttribute::DisableHeadBodyTail};
991 R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
992 OperationType::ChangePermissions, false));
993
994 // Ensure that we unprotect the source pages on failure.
995 ON_RESULT_FAILURE {
996 const KPageProperties unprotect_properties = {
997 KMemoryPermission::UserReadWrite, false, false,
998 DisableMergeAttribute::EnableHeadBodyTail};
999 R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false,
1000 unprotect_properties, OperationType::ChangePermissions, true));
1001 };
1002
1003 // Map the alias pages.
1004 const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
1005 DisableMergeAttribute::DisableHead};
1006 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
1007 false));
1008
1009 // Apply the memory block updates.
1010 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
1011 src_state, new_src_perm, new_src_attr,
1012 KMemoryBlockDisableMergeAttribute::Locked,
1013 KMemoryBlockDisableMergeAttribute::None);
1014 m_memory_block_manager.Update(
1015 std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
1016 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
1017 KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
1018 }
1019
1020 R_SUCCEED();
1021}
1022
1023Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
1024 size_t size) {
1025 // Lock the table.
1026 KScopedLightLock lk(m_general_lock);
1027
1028 // Validate that the source address's state is valid.
1029 KMemoryState src_state;
1030 size_t num_src_allocator_blocks;
1031 R_TRY(this->CheckMemoryState(
1032 std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
1033 src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
1034 KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
1035 KMemoryAttribute::All, KMemoryAttribute::Locked));
1036
1037 // Validate that the dst address's state is valid.
1038 KMemoryPermission dst_perm;
1039 size_t num_dst_allocator_blocks;
1040 R_TRY(this->CheckMemoryState(
1041 nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
1042 dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
1043 KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
1044
1045 // Create an update allocator for the source.
1046 Result src_allocator_result;
1047 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
1048 m_memory_block_slab_manager,
1049 num_src_allocator_blocks);
1050 R_TRY(src_allocator_result);
1051
1052 // Create an update allocator for the destination.
1053 Result dst_allocator_result;
1054 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
1055 m_memory_block_slab_manager,
1056 num_dst_allocator_blocks);
1057 R_TRY(dst_allocator_result);
1058
1059 // Unmap the memory.
1060 {
1061 // Determine the number of pages being operated on.
1062 const size_t num_pages = size / PageSize;
1063
1064 // Create page groups for the memory being unmapped.
1065 KPageGroup pg(m_kernel, m_block_info_manager);
1066
1067 // Create the page group representing the destination.
1068 R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
1069
1070 // Ensure the page group is the valid for the source.
1071 R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
1072
1073 // We're going to perform an update, so create a helper.
1074 KScopedPageTableUpdater updater(this);
1075
1076 // Unmap the aliased copy of the pages.
1077 const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
1078 DisableMergeAttribute::None};
1079 R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
1080 dst_unmap_properties, OperationType::Unmap, false));
1081
1082 // Ensure that we re-map the aliased pages on failure.
1083 ON_RESULT_FAILURE {
1084 this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
1085 };
1086
1087 // Try to set the permissions for the source pages back to what they should be.
1088 const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
1089 DisableMergeAttribute::EnableAndMergeHeadBodyTail};
1090 R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
1091 OperationType::ChangePermissions, false));
1092
1093 // Apply the memory block updates.
1094 m_memory_block_manager.Update(
1095 std::addressof(src_allocator), src_address, num_pages, src_state,
1096 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
1097 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
1098 m_memory_block_manager.Update(
1099 std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
1100 KMemoryPermission::None, KMemoryAttribute::None,
1101 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
1102 }
1103
1104 R_SUCCEED();
1105}
1106
1107Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
1108 size_t size) {
1109 // Validate the mapping request.
1110 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
1111 ResultInvalidMemoryRegion);
1112
1113 // Lock the table.
1114 KScopedLightLock lk(m_general_lock);
1115
1116 // Verify that the source memory is normal heap.
1117 KMemoryState src_state;
1118 KMemoryPermission src_perm;
1119 size_t num_src_allocator_blocks;
1120 R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr,
1121 std::addressof(num_src_allocator_blocks), src_address, size,
1122 KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All,
1123 KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
1124 KMemoryAttribute::None));
1125
1126 // Verify that the destination memory is unmapped.
1127 size_t num_dst_allocator_blocks;
1128 R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
1129 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
1130 KMemoryPermission::None, KMemoryAttribute::None,
1131 KMemoryAttribute::None));
1132
1133 // Create an update allocator for the source.
1134 Result src_allocator_result;
1135 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
1136 m_memory_block_slab_manager,
1137 num_src_allocator_blocks);
1138 R_TRY(src_allocator_result);
1139
1140 // Create an update allocator for the destination.
1141 Result dst_allocator_result;
1142 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
1143 m_memory_block_slab_manager,
1144 num_dst_allocator_blocks);
1145 R_TRY(dst_allocator_result);
1146
1147 // Map the code memory.
1148 {
1149 // Determine the number of pages being operated on.
1150 const size_t num_pages = size / PageSize;
1151
1152 // Create page groups for the memory being unmapped.
1153 KPageGroup pg(m_kernel, m_block_info_manager);
1154
1155 // Create the page group representing the source.
1156 R_TRY(this->MakePageGroup(pg, src_address, num_pages));
1157
1158 // We're going to perform an update, so create a helper.
1159 KScopedPageTableUpdater updater(this);
1160
1161 // Reprotect the source as kernel-read/not mapped.
1162 const KMemoryPermission new_perm = static_cast<KMemoryPermission>(
1163 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
1164 const KPageProperties src_properties = {new_perm, false, false,
1165 DisableMergeAttribute::DisableHeadBodyTail};
1166 R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
1167 OperationType::ChangePermissions, false));
1168
1169 // Ensure that we unprotect the source pages on failure.
1170 ON_RESULT_FAILURE {
1171 const KPageProperties unprotect_properties = {
1172 src_perm, false, false, DisableMergeAttribute::EnableHeadBodyTail};
1173 R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false,
1174 unprotect_properties, OperationType::ChangePermissions, true));
1175 };
1176
1177 // Map the alias pages.
1178 const KPageProperties dst_properties = {new_perm, false, false,
1179 DisableMergeAttribute::DisableHead};
1180 R_TRY(
1181 this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
1182
1183 // Apply the memory block updates.
1184 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
1185 src_state, new_perm, KMemoryAttribute::Locked,
1186 KMemoryBlockDisableMergeAttribute::Locked,
1187 KMemoryBlockDisableMergeAttribute::None);
1188 m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
1189 KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
1190 KMemoryBlockDisableMergeAttribute::Normal,
1191 KMemoryBlockDisableMergeAttribute::None);
1192 }
1193
1194 R_SUCCEED();
1195}
1196
1197Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
1198 size_t size) {
1199 // Validate the mapping request.
1200 R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
1201 ResultInvalidMemoryRegion);
1202
1203 // Lock the table.
1204 KScopedLightLock lk(m_general_lock);
1205
1206 // Verify that the source memory is locked normal heap.
1207 size_t num_src_allocator_blocks;
1208 R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
1209 KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
1210 KMemoryPermission::None, KMemoryAttribute::All,
1211 KMemoryAttribute::Locked));
1212
1213 // Verify that the destination memory is aliasable code.
1214 size_t num_dst_allocator_blocks;
1215 R_TRY(this->CheckMemoryStateContiguous(
1216 std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
1217 KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
1218 KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
1219
1220 // Determine whether any pages being unmapped are code.
1221 bool any_code_pages = false;
1222 {
1223 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
1224 while (true) {
1225 // Get the memory info.
1226 const KMemoryInfo info = it->GetMemoryInfo();
1227
1228 // Check if the memory has code flag.
1229 if (True(info.GetState() & KMemoryState::FlagCode)) {
1230 any_code_pages = true;
1231 break;
1232 }
1233
1234 // Check if we're done.
1235 if (dst_address + size - 1 <= info.GetLastAddress()) {
1236 break;
1237 }
1238
1239 // Advance.
1240 ++it;
1241 }
1242 }
1243
1244 // Ensure that we maintain the instruction cache.
1245 bool reprotected_pages = false;
1246 SCOPE_EXIT({
1247 if (reprotected_pages && any_code_pages) {
1248 InvalidateEntireInstructionCache(m_system);
1249 }
1250 });
1251
1252 // Unmap.
1253 {
1254 // Determine the number of pages being operated on.
1255 const size_t num_pages = size / PageSize;
1256
1257 // Create page groups for the memory being unmapped.
1258 KPageGroup pg(m_kernel, m_block_info_manager);
1259
1260 // Create the page group representing the destination.
1261 R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
1262
1263 // Verify that the page group contains the same pages as the source.
1264 R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
1265
1266 // Create an update allocator for the source.
1267 Result src_allocator_result;
1268 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
1269 m_memory_block_slab_manager,
1270 num_src_allocator_blocks);
1271 R_TRY(src_allocator_result);
1272
1273 // Create an update allocator for the destination.
1274 Result dst_allocator_result;
1275 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
1276 m_memory_block_slab_manager,
1277 num_dst_allocator_blocks);
1278 R_TRY(dst_allocator_result);
1279
1280 // We're going to perform an update, so create a helper.
1281 KScopedPageTableUpdater updater(this);
1282
1283 // Unmap the aliased copy of the pages.
1284 const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
1285 DisableMergeAttribute::None};
1286 R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
1287 dst_unmap_properties, OperationType::Unmap, false));
1288
1289 // Ensure that we re-map the aliased pages on failure.
1290 ON_RESULT_FAILURE {
1291 this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
1292 };
1293
1294 // Try to set the permissions for the source pages back to what they should be.
1295 const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
1296 DisableMergeAttribute::EnableAndMergeHeadBodyTail};
1297 R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
1298 OperationType::ChangePermissions, false));
1299
1300 // Apply the memory block updates.
1301 m_memory_block_manager.Update(
1302 std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
1303 KMemoryPermission::None, KMemoryAttribute::None,
1304 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
1305 m_memory_block_manager.Update(
1306 std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
1307 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
1308 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
1309
1310 // Note that we reprotected pages.
1311 reprotected_pages = true;
1312 }
1313
1314 R_SUCCEED();
1315}
1316
1317Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) {
1318 // Get the insecure memory resource limit and pool.
1319 auto* const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
1320 const auto insecure_pool =
1321 static_cast<KMemoryManager::Pool>(KSystemControl::GetInsecureMemoryPool());
1322
1323 // Reserve the insecure memory.
1324 // NOTE: ResultOutOfMemory is returned here instead of the usual LimitReached.
1325 KScopedResourceReservation memory_reservation(insecure_resource_limit,
1326 Svc::LimitableResource::PhysicalMemoryMax, size);
1327 R_UNLESS(memory_reservation.Succeeded(), ResultOutOfMemory);
1328
1329 // Allocate pages for the insecure memory.
1330 KPageGroup pg(m_kernel, m_block_info_manager);
1331 R_TRY(m_kernel.MemoryManager().AllocateAndOpen(
1332 std::addressof(pg), size / PageSize,
1333 KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction::FromFront)));
1334
1335 // Close the opened pages when we're done with them.
1336 // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
1337 // automatically.
1338 SCOPE_EXIT({ pg.Close(); });
1339
1340 // Clear all the newly allocated pages.
1341 for (const auto& it : pg) {
1342 std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()),
1343 static_cast<u32>(m_heap_fill_value), it.GetSize());
1344 }
1345
1346 // Lock the table.
1347 KScopedLightLock lk(m_general_lock);
1348
1349 // Validate that the address's state is valid.
1350 size_t num_allocator_blocks;
1351 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
1352 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
1353 KMemoryPermission::None, KMemoryAttribute::None,
1354 KMemoryAttribute::None));
1355
1356 // Create an update allocator.
1357 Result allocator_result;
1358 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1359 m_memory_block_slab_manager, num_allocator_blocks);
1360 R_TRY(allocator_result);
1361
1362 // We're going to perform an update, so create a helper.
1363 KScopedPageTableUpdater updater(this);
1364
1365 // Map the pages.
1366 const size_t num_pages = size / PageSize;
1367 const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false,
1368 DisableMergeAttribute::DisableHead};
1369 R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties,
1370 OperationType::MapGroup, false));
1371
1372 // Apply the memory block update.
1373 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages,
1374 KMemoryState::Insecure, KMemoryPermission::UserReadWrite,
1375 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
1376 KMemoryBlockDisableMergeAttribute::None);
1377
1378 // Update our mapped insecure size.
1379 m_mapped_insecure_memory += size;
1380
1381 // Commit the memory reservation.
1382 memory_reservation.Commit();
1383
1384 // We succeeded.
1385 R_SUCCEED();
1386}
1387
1388Result KPageTableBase::UnmapInsecureMemory(KProcessAddress address, size_t size) {
1389 // Lock the table.
1390 KScopedLightLock lk(m_general_lock);
1391
1392 // Check the memory state.
1393 size_t num_allocator_blocks;
1394 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
1395 KMemoryState::All, KMemoryState::Insecure, KMemoryPermission::All,
1396 KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
1397 KMemoryAttribute::None));
1398
1399 // Create an update allocator.
1400 Result allocator_result;
1401 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1402 m_memory_block_slab_manager, num_allocator_blocks);
1403 R_TRY(allocator_result);
1404
1405 // We're going to perform an update, so create a helper.
1406 KScopedPageTableUpdater updater(this);
1407
1408 // Unmap the memory.
1409 const size_t num_pages = size / PageSize;
1410 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
1411 DisableMergeAttribute::None};
1412 R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties,
1413 OperationType::Unmap, false));
1414
1415 // Apply the memory block update.
1416 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
1417 KMemoryPermission::None, KMemoryAttribute::None,
1418 KMemoryBlockDisableMergeAttribute::None,
1419 KMemoryBlockDisableMergeAttribute::Normal);
1420
1421 // Update our mapped insecure size.
1422 m_mapped_insecure_memory -= size;
1423
1424 // Release the insecure memory from the insecure limit.
1425 if (auto* const insecure_resource_limit =
1426 KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
1427 insecure_resource_limit != nullptr) {
1428 insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, size);
1429 }
1430
1431 R_SUCCEED();
1432}
1433
1434KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
1435 size_t num_pages, size_t alignment, size_t offset,
1436 size_t guard_pages) const {
1437 KProcessAddress address = 0;
1438
1439 if (num_pages <= region_num_pages) {
1440 if (this->IsAslrEnabled()) {
1441 // Try to directly find a free area up to 8 times.
1442 for (size_t i = 0; i < 8; i++) {
1443 const size_t random_offset =
1444 KSystemControl::GenerateRandomRange(
1445 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
1446 alignment;
1447 const KProcessAddress candidate =
1448 Common::AlignDown(GetInteger(region_start + random_offset), alignment) + offset;
1449
1450 KMemoryInfo info;
1451 Svc::PageInfo page_info;
1452 R_ASSERT(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info),
1453 candidate));
1454
1455 if (info.m_state != KMemoryState::Free) {
1456 continue;
1457 }
1458 if (!(region_start <= candidate)) {
1459 continue;
1460 }
1461 if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) {
1462 continue;
1463 }
1464 if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <=
1465 info.GetLastAddress())) {
1466 continue;
1467 }
1468 if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <=
1469 region_start + region_num_pages * PageSize - 1)) {
1470 continue;
1471 }
1472
1473 address = candidate;
1474 break;
1475 }
1476 // Fall back to finding the first free area with a random offset.
1477 if (address == 0) {
1478 // NOTE: Nintendo does not account for guard pages here.
1479 // This may theoretically cause an offset to be chosen that cannot be mapped.
1480 // We will account for guard pages.
1481 const size_t offset_pages = KSystemControl::GenerateRandomRange(
1482 0, region_num_pages - num_pages - guard_pages);
1483 address = m_memory_block_manager.FindFreeArea(
1484 region_start + offset_pages * PageSize, region_num_pages - offset_pages,
1485 num_pages, alignment, offset, guard_pages);
1486 }
1487 }
1488 // Find the first free area.
1489 if (address == 0) {
1490 address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
1491 alignment, offset, guard_pages);
1492 }
1493 }
1494
1495 return address;
1496}
1497
1498size_t KPageTableBase::GetSize(KMemoryState state) const {
1499 // Lock the table.
1500 KScopedLightLock lk(m_general_lock);
1501
1502 // Iterate, counting blocks with the desired state.
1503 size_t total_size = 0;
1504 for (KMemoryBlockManager::const_iterator it =
1505 m_memory_block_manager.FindIterator(m_address_space_start);
1506 it != m_memory_block_manager.end(); ++it) {
1507 // Get the memory info.
1508 const KMemoryInfo info = it->GetMemoryInfo();
1509 if (info.GetState() == state) {
1510 total_size += info.GetSize();
1511 }
1512 }
1513
1514 return total_size;
1515}
1516
1517size_t KPageTableBase::GetCodeSize() const {
1518 return this->GetSize(KMemoryState::Code);
1519}
1520
1521size_t KPageTableBase::GetCodeDataSize() const {
1522 return this->GetSize(KMemoryState::CodeData);
1523}
1524
1525size_t KPageTableBase::GetAliasCodeSize() const {
1526 return this->GetSize(KMemoryState::AliasCode);
1527}
1528
1529size_t KPageTableBase::GetAliasCodeDataSize() const {
1530 return this->GetSize(KMemoryState::AliasCodeData);
1531}
1532
1533Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
1534 size_t num_pages, KMemoryPermission perm) {
1535 ASSERT(this->IsLockedByCurrentThread());
1536
1537 // Create a page group to hold the pages we allocate.
1538 KPageGroup pg(m_kernel, m_block_info_manager);
1539
1540 // Allocate the pages.
1541 R_TRY(
1542 m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
1543
1544 // Ensure that the page group is closed when we're done working with it.
1545 SCOPE_EXIT({ pg.Close(); });
1546
1547 // Clear all pages.
1548 for (const auto& it : pg) {
1549 std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()),
1550 static_cast<u32>(m_heap_fill_value), it.GetSize());
1551 }
1552
1553 // Map the pages.
1554 const KPageProperties properties = {perm, false, false, DisableMergeAttribute::None};
1555 R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType::MapGroup,
1556 false));
1557}
1558
1559Result KPageTableBase::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
1560 const KPageGroup& pg, const KPageProperties properties,
1561 bool reuse_ll) {
1562 ASSERT(this->IsLockedByCurrentThread());
1563
1564 // Note the current address, so that we can iterate.
1565 const KProcessAddress start_address = address;
1566 KProcessAddress cur_address = address;
1567
1568 // Ensure that we clean up on failure.
1569 ON_RESULT_FAILURE {
1570 ASSERT(!reuse_ll);
1571 if (cur_address != start_address) {
1572 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
1573 DisableMergeAttribute::None};
1574 R_ASSERT(this->Operate(page_list, start_address,
1575 (cur_address - start_address) / PageSize, 0, false,
1576 unmap_properties, OperationType::Unmap, true));
1577 }
1578 };
1579
1580 // Iterate, mapping all pages in the group.
1581 for (const auto& block : pg) {
1582 // Map and advance.
1583 const KPageProperties cur_properties =
1584 (cur_address == start_address)
1585 ? properties
1586 : KPageProperties{properties.perm, properties.io, properties.uncached,
1587 DisableMergeAttribute::None};
1588 R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), block.GetAddress(), true,
1589 cur_properties, OperationType::Map, reuse_ll));
1590 cur_address += block.GetSize();
1591 }
1592
1593 // We succeeded!
1594 R_SUCCEED();
1595}
1596
1597void KPageTableBase::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
1598 const KPageGroup& pg) {
1599 ASSERT(this->IsLockedByCurrentThread());
1600
1601 // Note the current address, so that we can iterate.
1602 const KProcessAddress start_address = address;
1603 const KProcessAddress last_address = start_address + size - 1;
1604 const KProcessAddress end_address = last_address + 1;
1605
1606 // Iterate over the memory.
1607 auto pg_it = pg.begin();
1608 ASSERT(pg_it != pg.end());
1609
1610 KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
1611 size_t pg_pages = pg_it->GetNumPages();
1612
1613 auto it = m_memory_block_manager.FindIterator(start_address);
1614 while (true) {
1615 // Check that the iterator is valid.
1616 ASSERT(it != m_memory_block_manager.end());
1617
1618 // Get the memory info.
1619 const KMemoryInfo info = it->GetMemoryInfo();
1620
1621 // Determine the range to map.
1622 KProcessAddress map_address = std::max(info.GetAddress(), GetInteger(start_address));
1623 const KProcessAddress map_end_address =
1624 std::min(info.GetEndAddress(), GetInteger(end_address));
1625 ASSERT(map_end_address != map_address);
1626
1627 // Determine if we should disable head merge.
1628 const bool disable_head_merge =
1629 info.GetAddress() >= GetInteger(start_address) &&
1630 True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
1631 const KPageProperties map_properties = {
1632 info.GetPermission(), false, false,
1633 disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
1634
1635 // While we have pages to map, map them.
1636 size_t map_pages = (map_end_address - map_address) / PageSize;
1637 while (map_pages > 0) {
1638 // Check if we're at the end of the physical block.
1639 if (pg_pages == 0) {
1640 // Ensure there are more pages to map.
1641 ASSERT(pg_it != pg.end());
1642
1643 // Advance our physical block.
1644 ++pg_it;
1645 pg_phys_addr = pg_it->GetAddress();
1646 pg_pages = pg_it->GetNumPages();
1647 }
1648
1649 // Map whatever we can.
1650 const size_t cur_pages = std::min(pg_pages, map_pages);
1651 R_ASSERT(this->Operate(page_list, map_address, map_pages, pg_phys_addr, true,
1652 map_properties, OperationType::Map, true));
1653
1654 // Advance.
1655 map_address += cur_pages * PageSize;
1656 map_pages -= cur_pages;
1657
1658 pg_phys_addr += cur_pages * PageSize;
1659 pg_pages -= cur_pages;
1660 }
1661
1662 // Check if we're done.
1663 if (last_address <= info.GetLastAddress()) {
1664 break;
1665 }
1666
1667 // Advance.
1668 ++it;
1669 }
1670
1671 // Check that we re-mapped precisely the page group.
1672 ASSERT((++pg_it) == pg.end());
1673}
1674
1675Result KPageTableBase::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
1676 ASSERT(this->IsLockedByCurrentThread());
1677
1678 const size_t size = num_pages * PageSize;
1679
1680 // We're making a new group, not adding to an existing one.
1681 R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
1682
1683 auto& impl = this->GetImpl();
1684
1685 // Begin traversal.
1686 TraversalContext context;
1687 TraversalEntry next_entry;
1688 R_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr),
1689 ResultInvalidCurrentMemory);
1690
1691 // Prepare tracking variables.
1692 KPhysicalAddress cur_addr = next_entry.phys_addr;
1693 size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
1694 size_t tot_size = cur_size;
1695
1696 // Iterate, adding to group as we go.
1697 while (tot_size < size) {
1698 R_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)),
1699 ResultInvalidCurrentMemory);
1700
1701 if (next_entry.phys_addr != (cur_addr + cur_size)) {
1702 const size_t cur_pages = cur_size / PageSize;
1703
1704 R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
1705 R_TRY(pg.AddBlock(cur_addr, cur_pages));
1706
1707 cur_addr = next_entry.phys_addr;
1708 cur_size = next_entry.block_size;
1709 } else {
1710 cur_size += next_entry.block_size;
1711 }
1712
1713 tot_size += next_entry.block_size;
1714 }
1715
1716 // Ensure we add the right amount for the last block.
1717 if (tot_size > size) {
1718 cur_size -= (tot_size - size);
1719 }
1720
1721 // add the last block.
1722 const size_t cur_pages = cur_size / PageSize;
1723 R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
1724 R_TRY(pg.AddBlock(cur_addr, cur_pages));
1725
1726 R_SUCCEED();
1727}
1728
1729bool KPageTableBase::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr,
1730 size_t num_pages) {
1731 ASSERT(this->IsLockedByCurrentThread());
1732
1733 const size_t size = num_pages * PageSize;
1734
1735 // Empty groups are necessarily invalid.
1736 if (pg.empty()) {
1737 return false;
1738 }
1739
1740 auto& impl = this->GetImpl();
1741
1742 // We're going to validate that the group we'd expect is the group we see.
1743 auto cur_it = pg.begin();
1744 KPhysicalAddress cur_block_address = cur_it->GetAddress();
1745 size_t cur_block_pages = cur_it->GetNumPages();
1746
1747 auto UpdateCurrentIterator = [&]() {
1748 if (cur_block_pages == 0) {
1749 if ((++cur_it) == pg.end()) {
1750 return false;
1751 }
1752
1753 cur_block_address = cur_it->GetAddress();
1754 cur_block_pages = cur_it->GetNumPages();
1755 }
1756 return true;
1757 };
1758
1759 // Begin traversal.
1760 TraversalContext context;
1761 TraversalEntry next_entry;
1762 if (!impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr)) {
1763 return false;
1764 }
1765
1766 // Prepare tracking variables.
1767 KPhysicalAddress cur_addr = next_entry.phys_addr;
1768 size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
1769 size_t tot_size = cur_size;
1770
1771 // Iterate, comparing expected to actual.
1772 while (tot_size < size) {
1773 if (!impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))) {
1774 return false;
1775 }
1776
1777 if (next_entry.phys_addr != (cur_addr + cur_size)) {
1778 const size_t cur_pages = cur_size / PageSize;
1779
1780 if (!IsHeapPhysicalAddress(cur_addr)) {
1781 return false;
1782 }
1783
1784 if (!UpdateCurrentIterator()) {
1785 return false;
1786 }
1787
1788 if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
1789 return false;
1790 }
1791
1792 cur_block_address += cur_size;
1793 cur_block_pages -= cur_pages;
1794 cur_addr = next_entry.phys_addr;
1795 cur_size = next_entry.block_size;
1796 } else {
1797 cur_size += next_entry.block_size;
1798 }
1799
1800 tot_size += next_entry.block_size;
1801 }
1802
1803 // Ensure we compare the right amount for the last block.
1804 if (tot_size > size) {
1805 cur_size -= (tot_size - size);
1806 }
1807
1808 if (!IsHeapPhysicalAddress(cur_addr)) {
1809 return false;
1810 }
1811
1812 if (!UpdateCurrentIterator()) {
1813 return false;
1814 }
1815
1816 return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
1817}
1818
1819Result KPageTableBase::GetContiguousMemoryRangeWithState(
1820 MemoryRange* out, KProcessAddress address, size_t size, KMemoryState state_mask,
1821 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
1822 KMemoryAttribute attr_mask, KMemoryAttribute attr) {
1823 ASSERT(this->IsLockedByCurrentThread());
1824
1825 auto& impl = this->GetImpl();
1826
1827 // Begin a traversal.
1828 TraversalContext context;
1829 TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
1830 R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address),
1831 ResultInvalidCurrentMemory);
1832
1833 // Traverse until we have enough size or we aren't contiguous any more.
1834 const KPhysicalAddress phys_address = cur_entry.phys_addr;
1835 size_t contig_size;
1836 for (contig_size =
1837 cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1));
1838 contig_size < size; contig_size += cur_entry.block_size) {
1839 if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) {
1840 break;
1841 }
1842 if (cur_entry.phys_addr != phys_address + contig_size) {
1843 break;
1844 }
1845 }
1846
1847 // Take the minimum size for our region.
1848 size = std::min(size, contig_size);
1849
1850 // Check that the memory is contiguous (modulo the reference count bit).
1851 const KMemoryState test_state_mask = state_mask | KMemoryState::FlagReferenceCounted;
1852 const bool is_heap = R_SUCCEEDED(this->CheckMemoryStateContiguous(
1853 address, size, test_state_mask, state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
1854 attr_mask, attr));
1855 if (!is_heap) {
1856 R_TRY(this->CheckMemoryStateContiguous(address, size, test_state_mask, state, perm_mask,
1857 perm, attr_mask, attr));
1858 }
1859
1860 // The memory is contiguous, so set the output range.
1861 out->Set(phys_address, size, is_heap);
1862 R_SUCCEED();
1863}
1864
1865Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size,
1866 Svc::MemoryPermission svc_perm) {
1867 const size_t num_pages = size / PageSize;
1868
1869 // Lock the table.
1870 KScopedLightLock lk(m_general_lock);
1871
1872 // Verify we can change the memory permission.
1873 KMemoryState old_state;
1874 KMemoryPermission old_perm;
1875 size_t num_allocator_blocks;
1876 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
1877 std::addressof(num_allocator_blocks), addr, size,
1878 KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
1879 KMemoryPermission::None, KMemoryPermission::None,
1880 KMemoryAttribute::All, KMemoryAttribute::None));
1881
1882 // Determine new perm.
1883 const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
1884 R_SUCCEED_IF(old_perm == new_perm);
1885
1886 // Create an update allocator.
1887 Result allocator_result;
1888 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1889 m_memory_block_slab_manager, num_allocator_blocks);
1890 R_TRY(allocator_result);
1891
1892 // We're going to perform an update, so create a helper.
1893 KScopedPageTableUpdater updater(this);
1894
1895 // Perform mapping operation.
1896 const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None};
1897 R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
1898 OperationType::ChangePermissions, false));
1899
1900 // Update the blocks.
1901 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
1902 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1903 KMemoryBlockDisableMergeAttribute::None);
1904
1905 R_SUCCEED();
1906}
1907
1908Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
1909 Svc::MemoryPermission svc_perm) {
1910 const size_t num_pages = size / PageSize;
1911
1912 // Lock the table.
1913 KScopedLightLock lk(m_general_lock);
1914
1915 // Verify we can change the memory permission.
1916 KMemoryState old_state;
1917 KMemoryPermission old_perm;
1918 size_t num_allocator_blocks;
1919 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
1920 std::addressof(num_allocator_blocks), addr, size,
1921 KMemoryState::FlagCode, KMemoryState::FlagCode,
1922 KMemoryPermission::None, KMemoryPermission::None,
1923 KMemoryAttribute::All, KMemoryAttribute::None));
1924
1925 // Make a new page group for the region.
1926 KPageGroup pg(m_kernel, m_block_info_manager);
1927
1928 // Determine new perm/state.
1929 const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
1930 KMemoryState new_state = old_state;
1931 const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite;
1932 const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
1933 const bool was_x =
1934 (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
1935 ASSERT(!(is_w && is_x));
1936
1937 if (is_w) {
1938 switch (old_state) {
1939 case KMemoryState::Code:
1940 new_state = KMemoryState::CodeData;
1941 break;
1942 case KMemoryState::AliasCode:
1943 new_state = KMemoryState::AliasCodeData;
1944 break;
1945 default:
1946 UNREACHABLE();
1947 }
1948 }
1949
1950 // Create a page group, if we're setting execute permissions.
1951 if (is_x) {
1952 R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages));
1953 }
1954
1955 // Succeed if there's nothing to do.
1956 R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
1957
1958 // Create an update allocator.
1959 Result allocator_result;
1960 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1961 m_memory_block_slab_manager, num_allocator_blocks);
1962 R_TRY(allocator_result);
1963
1964 // We're going to perform an update, so create a helper.
1965 KScopedPageTableUpdater updater(this);
1966
1967 // Perform mapping operation.
1968 const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None};
1969 const auto operation = was_x ? OperationType::ChangePermissionsAndRefreshAndFlush
1970 : OperationType::ChangePermissions;
1971 R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, operation,
1972 false));
1973
1974 // Update the blocks.
1975 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
1976 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1977 KMemoryBlockDisableMergeAttribute::None);
1978
1979 // Ensure cache coherency, if we're setting pages as executable.
1980 if (is_x) {
1981 for (const auto& block : pg) {
1982 StoreDataCache(GetHeapVirtualPointer(m_kernel, block.GetAddress()), block.GetSize());
1983 }
1984 InvalidateEntireInstructionCache(m_system);
1985 }
1986
1987 R_SUCCEED();
1988}
1989
1990Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
1991 KMemoryAttribute attr) {
1992 const size_t num_pages = size / PageSize;
1993 ASSERT((mask | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask);
1994
1995 // Lock the table.
1996 KScopedLightLock lk(m_general_lock);
1997
1998 // Verify we can change the memory attribute.
1999 KMemoryState old_state;
2000 KMemoryPermission old_perm;
2001 KMemoryAttribute old_attr;
2002 size_t num_allocator_blocks;
2003 constexpr KMemoryAttribute AttributeTestMask =
2004 ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
2005 const KMemoryState state_test_mask =
2006 (True(mask & KMemoryAttribute::Uncached) ? KMemoryState::FlagCanChangeAttribute
2007 : KMemoryState::None) |
2008 (True(mask & KMemoryAttribute::PermissionLocked) ? KMemoryState::FlagCanPermissionLock
2009 : KMemoryState::None);
2010 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
2011 std::addressof(old_attr), std::addressof(num_allocator_blocks),
2012 addr, size, state_test_mask, state_test_mask,
2013 KMemoryPermission::None, KMemoryPermission::None,
2014 AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
2015
2016 // Create an update allocator.
2017 Result allocator_result;
2018 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2019 m_memory_block_slab_manager, num_allocator_blocks);
2020 R_TRY(allocator_result);
2021
2022 // We're going to perform an update, so create a helper.
2023 KScopedPageTableUpdater updater(this);
2024
2025 // If we need to, perform a change attribute operation.
2026 if (True(mask & KMemoryAttribute::Uncached)) {
2027 // Determine the new attribute.
2028 const KMemoryAttribute new_attr =
2029 static_cast<KMemoryAttribute>(((old_attr & ~mask) | (attr & mask)));
2030
2031 // Perform operation.
2032 const KPageProperties properties = {old_perm, false,
2033 True(new_attr & KMemoryAttribute::Uncached),
2034 DisableMergeAttribute::None};
2035 R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
2036 OperationType::ChangePermissionsAndRefreshAndFlush, false));
2037 }
2038
2039 // Update the blocks.
2040 m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, mask, attr);
2041
2042 R_SUCCEED();
2043}
2044
2045Result KPageTableBase::SetHeapSize(KProcessAddress* out, size_t size) {
2046 // Lock the physical memory mutex.
2047 KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
2048
2049 // Try to perform a reduction in heap, instead of an extension.
2050 KProcessAddress cur_address;
2051 size_t allocation_size;
2052 {
2053 // Lock the table.
2054 KScopedLightLock lk(m_general_lock);
2055
2056 // Validate that setting heap size is possible at all.
2057 R_UNLESS(!m_is_kernel, ResultOutOfMemory);
2058 R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
2059 ResultOutOfMemory);
2060 R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
2061
2062 if (size < static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
2063 // The size being requested is less than the current size, so we need to free the end of
2064 // the heap.
2065
2066 // Validate memory state.
2067 size_t num_allocator_blocks;
2068 R_TRY(this->CheckMemoryState(
2069 std::addressof(num_allocator_blocks), m_heap_region_start + size,
2070 (m_current_heap_end - m_heap_region_start) - size, KMemoryState::All,
2071 KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
2072 KMemoryAttribute::All, KMemoryAttribute::None));
2073
2074 // Create an update allocator.
2075 Result allocator_result;
2076 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2077 m_memory_block_slab_manager,
2078 num_allocator_blocks);
2079 R_TRY(allocator_result);
2080
2081 // We're going to perform an update, so create a helper.
2082 KScopedPageTableUpdater updater(this);
2083
2084 // Unmap the end of the heap.
2085 const size_t num_pages = ((m_current_heap_end - m_heap_region_start) - size) / PageSize;
2086 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
2087 DisableMergeAttribute::None};
2088 R_TRY(this->Operate(updater.GetPageList(), m_heap_region_start + size, num_pages, 0,
2089 false, unmap_properties, OperationType::Unmap, false));
2090
2091 // Release the memory from the resource limit.
2092 m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
2093 num_pages * PageSize);
2094
2095 // Apply the memory block update.
2096 m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
2097 num_pages, KMemoryState::Free, KMemoryPermission::None,
2098 KMemoryAttribute::None,
2099 KMemoryBlockDisableMergeAttribute::None,
2100 size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
2101 : KMemoryBlockDisableMergeAttribute::None);
2102
2103 // Update the current heap end.
2104 m_current_heap_end = m_heap_region_start + size;
2105
2106 // Set the output.
2107 *out = m_heap_region_start;
2108 R_SUCCEED();
2109 } else if (size == static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
2110 // The size requested is exactly the current size.
2111 *out = m_heap_region_start;
2112 R_SUCCEED();
2113 } else {
2114 // We have to allocate memory. Determine how much to allocate and where while the table
2115 // is locked.
2116 cur_address = m_current_heap_end;
2117 allocation_size = size - (m_current_heap_end - m_heap_region_start);
2118 }
2119 }
2120
2121 // Reserve memory for the heap extension.
2122 KScopedResourceReservation memory_reservation(
2123 m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, allocation_size);
2124 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
2125
2126 // Allocate pages for the heap extension.
2127 KPageGroup pg(m_kernel, m_block_info_manager);
2128 R_TRY(m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize,
2129 m_allocate_option));
2130
2131 // Close the opened pages when we're done with them.
2132 // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
2133 // automatically.
2134 SCOPE_EXIT({ pg.Close(); });
2135
2136 // Clear all the newly allocated pages.
2137 for (const auto& it : pg) {
2138 std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), m_heap_fill_value,
2139 it.GetSize());
2140 }
2141
2142 // Map the pages.
2143 {
2144 // Lock the table.
2145 KScopedLightLock lk(m_general_lock);
2146
2147 // Ensure that the heap hasn't changed since we began executing.
2148 ASSERT(cur_address == m_current_heap_end);
2149
2150 // Check the memory state.
2151 size_t num_allocator_blocks;
2152 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
2153 allocation_size, KMemoryState::All, KMemoryState::Free,
2154 KMemoryPermission::None, KMemoryPermission::None,
2155 KMemoryAttribute::None, KMemoryAttribute::None));
2156
2157 // Create an update allocator.
2158 Result allocator_result;
2159 KMemoryBlockManagerUpdateAllocator allocator(
2160 std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
2161 R_TRY(allocator_result);
2162
2163 // We're going to perform an update, so create a helper.
2164 KScopedPageTableUpdater updater(this);
2165
2166 // Map the pages.
2167 const size_t num_pages = allocation_size / PageSize;
2168 const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false,
2169 (m_current_heap_end == m_heap_region_start)
2170 ? DisableMergeAttribute::DisableHead
2171 : DisableMergeAttribute::None};
2172 R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg,
2173 map_properties, OperationType::MapGroup, false));
2174
2175 // We succeeded, so commit our memory reservation.
2176 memory_reservation.Commit();
2177
2178 // Apply the memory block update.
2179 m_memory_block_manager.Update(
2180 std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
2181 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
2182 m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
2183 : KMemoryBlockDisableMergeAttribute::None,
2184 KMemoryBlockDisableMergeAttribute::None);
2185
2186 // Update the current heap end.
2187 m_current_heap_end = m_heap_region_start + size;
2188
2189 // Set the output.
2190 *out = m_heap_region_start;
2191 R_SUCCEED();
2192 }
2193}
2194
2195Result KPageTableBase::SetMaxHeapSize(size_t size) {
2196 // Lock the table.
2197 KScopedLightLock lk(m_general_lock);
2198
2199 // Only process page tables are allowed to set heap size.
2200 ASSERT(!this->IsKernel());
2201
2202 m_max_heap_size = size;
2203
2204 R_SUCCEED();
2205}
2206
2207Result KPageTableBase::QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
2208 KProcessAddress addr) const {
2209 // If the address is invalid, create a fake block.
2210 if (!this->Contains(addr, 1)) {
2211 *out_info = {
2212 .m_address = GetInteger(m_address_space_end),
2213 .m_size = 0 - GetInteger(m_address_space_end),
2214 .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
2215 .m_device_disable_merge_left_count = 0,
2216 .m_device_disable_merge_right_count = 0,
2217 .m_ipc_lock_count = 0,
2218 .m_device_use_count = 0,
2219 .m_ipc_disable_merge_count = 0,
2220 .m_permission = KMemoryPermission::None,
2221 .m_attribute = KMemoryAttribute::None,
2222 .m_original_permission = KMemoryPermission::None,
2223 .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
2224 };
2225 out_page_info->flags = 0;
2226
2227 R_SUCCEED();
2228 }
2229
2230 // Otherwise, lock the table and query.
2231 KScopedLightLock lk(m_general_lock);
2232 R_RETURN(this->QueryInfoImpl(out_info, out_page_info, addr));
2233}
2234
2235Result KPageTableBase::QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out,
2236 KProcessAddress address) const {
2237 // Lock the table.
2238 KScopedLightLock lk(m_general_lock);
2239
2240 // Align the address down to page size.
2241 address = Common::AlignDown(GetInteger(address), PageSize);
2242
2243 // Verify that we can query the address.
2244 KMemoryInfo info;
2245 Svc::PageInfo page_info;
2246 R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address));
2247
2248 // Check the memory state.
2249 R_TRY(this->CheckMemoryState(info, KMemoryState::FlagCanQueryPhysical,
2250 KMemoryState::FlagCanQueryPhysical,
2251 KMemoryPermission::UserReadExecute, KMemoryPermission::UserRead,
2252 KMemoryAttribute::None, KMemoryAttribute::None));
2253
2254 // Prepare to traverse.
2255 KPhysicalAddress phys_addr;
2256 size_t phys_size;
2257
2258 KProcessAddress virt_addr = info.GetAddress();
2259 KProcessAddress end_addr = info.GetEndAddress();
2260
2261 // Perform traversal.
2262 {
2263 // Begin traversal.
2264 TraversalContext context;
2265 TraversalEntry next_entry;
2266 bool traverse_valid =
2267 m_impl->BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr);
2268 R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
2269
2270 // Set tracking variables.
2271 phys_addr = next_entry.phys_addr;
2272 phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
2273
2274 // Iterate.
2275 while (true) {
2276 // Continue the traversal.
2277 traverse_valid =
2278 m_impl->ContinueTraversal(std::addressof(next_entry), std::addressof(context));
2279 if (!traverse_valid) {
2280 break;
2281 }
2282
2283 if (next_entry.phys_addr != (phys_addr + phys_size)) {
2284 // Check if we're done.
2285 if (virt_addr <= address && address <= virt_addr + phys_size - 1) {
2286 break;
2287 }
2288
2289 // Advance.
2290 phys_addr = next_entry.phys_addr;
2291 virt_addr += next_entry.block_size;
2292 phys_size =
2293 next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
2294 } else {
2295 phys_size += next_entry.block_size;
2296 }
2297
2298 // Check if we're done.
2299 if (end_addr < virt_addr + phys_size) {
2300 break;
2301 }
2302 }
2303 ASSERT(virt_addr <= address && address <= virt_addr + phys_size - 1);
2304
2305 // Ensure we use the right size.
2306 if (end_addr < virt_addr + phys_size) {
2307 phys_size = end_addr - virt_addr;
2308 }
2309 }
2310
2311 // Set the output.
2312 out->physical_address = GetInteger(phys_addr);
2313 out->virtual_address = GetInteger(virt_addr);
2314 out->size = phys_size;
2315 R_SUCCEED();
2316}
2317
2318Result KPageTableBase::MapIoImpl(KProcessAddress* out, PageLinkedList* page_list,
2319 KPhysicalAddress phys_addr, size_t size, KMemoryState state,
2320 KMemoryPermission perm) {
2321 // Check pre-conditions.
2322 ASSERT(this->IsLockedByCurrentThread());
2323 ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
2324 ASSERT(Common::IsAligned(size, PageSize));
2325 ASSERT(size > 0);
2326
2327 R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
2328 const size_t num_pages = size / PageSize;
2329 const KPhysicalAddress last = phys_addr + size - 1;
2330
2331 // Get region extents.
2332 const KProcessAddress region_start = m_kernel_map_region_start;
2333 const size_t region_size = m_kernel_map_region_end - m_kernel_map_region_start;
2334 const size_t region_num_pages = region_size / PageSize;
2335
2336 ASSERT(this->CanContain(region_start, region_size, state));
2337
2338 // Locate the memory region.
2339 const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr);
2340 R_UNLESS(region != nullptr, ResultInvalidAddress);
2341
2342 ASSERT(region->Contains(GetInteger(phys_addr)));
2343
2344 // Ensure that the region is mappable.
2345 const bool is_rw = perm == KMemoryPermission::UserReadWrite;
2346 while (true) {
2347 // Check that the region exists.
2348 R_UNLESS(region != nullptr, ResultInvalidAddress);
2349
2350 // Check the region attributes.
2351 R_UNLESS(!region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress);
2352 R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw,
2353 ResultInvalidAddress);
2354 R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress);
2355
2356 // Check if we're done.
2357 if (GetInteger(last) <= region->GetLastAddress()) {
2358 break;
2359 }
2360
2361 // Advance.
2362 region = region->GetNext();
2363 };
2364
2365 // Select an address to map at.
2366 KProcessAddress addr = 0;
2367 {
2368 const size_t alignment = 4_KiB;
2369 const KPhysicalAddress aligned_phys =
2370 Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
2371 R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress);
2372
2373 const KPhysicalAddress last_aligned_paddr =
2374 Common::AlignDown(GetInteger(last) + 1, alignment) - 1;
2375 R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr),
2376 ResultInvalidAddress);
2377
2378 addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
2379 this->GetNumGuardPages());
2380 R_UNLESS(addr != 0, ResultOutOfMemory);
2381 }
2382
2383 // Check that we can map IO here.
2384 ASSERT(this->CanContain(addr, size, state));
2385 R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
2386 KMemoryPermission::None, KMemoryPermission::None,
2387 KMemoryAttribute::None, KMemoryAttribute::None));
2388
2389 // Perform mapping operation.
2390 const KPageProperties properties = {perm, state == KMemoryState::IoRegister, false,
2391 DisableMergeAttribute::DisableHead};
2392 R_TRY(this->Operate(page_list, addr, num_pages, phys_addr, true, properties, OperationType::Map,
2393 false));
2394
2395 // Set the output address.
2396 *out = addr;
2397
2398 R_SUCCEED();
2399}
2400
2401Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
2402 // Lock the table.
2403 KScopedLightLock lk(m_general_lock);
2404
2405 // Create an update allocator.
2406 Result allocator_result;
2407 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2408 m_memory_block_slab_manager);
2409 R_TRY(allocator_result);
2410
2411 // We're going to perform an update, so create a helper.
2412 KScopedPageTableUpdater updater(this);
2413
2414 // Map the io memory.
2415 KProcessAddress addr;
2416 R_TRY(this->MapIoImpl(std::addressof(addr), updater.GetPageList(), phys_addr, size,
2417 KMemoryState::IoRegister, perm));
2418
2419 // Update the blocks.
2420 m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize,
2421 KMemoryState::IoRegister, perm, KMemoryAttribute::Locked,
2422 KMemoryBlockDisableMergeAttribute::Normal,
2423 KMemoryBlockDisableMergeAttribute::None);
2424
2425 // We successfully mapped the pages.
2426 R_SUCCEED();
2427}
2428
2429Result KPageTableBase::MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr,
2430 size_t size, Svc::MemoryMapping mapping,
2431 Svc::MemoryPermission svc_perm) {
2432 const size_t num_pages = size / PageSize;
2433
2434 // Lock the table.
2435 KScopedLightLock lk(m_general_lock);
2436
2437 // Validate the memory state.
2438 size_t num_allocator_blocks;
2439 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size,
2440 KMemoryState::All, KMemoryState::None, KMemoryPermission::None,
2441 KMemoryPermission::None, KMemoryAttribute::None,
2442 KMemoryAttribute::None));
2443
2444 // Create an update allocator.
2445 Result allocator_result;
2446 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2447 m_memory_block_slab_manager, num_allocator_blocks);
2448 R_TRY(allocator_result);
2449
2450 // We're going to perform an update, so create a helper.
2451 KScopedPageTableUpdater updater(this);
2452
2453 // Perform mapping operation.
2454 const KMemoryPermission perm = ConvertToKMemoryPermission(svc_perm);
2455 const KPageProperties properties = {perm, mapping == Svc::MemoryMapping::IoRegister,
2456 mapping == Svc::MemoryMapping::Uncached,
2457 DisableMergeAttribute::DisableHead};
2458 R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, phys_addr, true, properties,
2459 OperationType::Map, false));
2460
2461 // Update the blocks.
2462 const auto state =
2463 mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister;
2464 m_memory_block_manager.Update(
2465 std::addressof(allocator), dst_address, num_pages, state, perm, KMemoryAttribute::Locked,
2466 KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
2467
2468 // We successfully mapped the pages.
2469 R_SUCCEED();
2470}
2471
2472Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr,
2473 size_t size, Svc::MemoryMapping mapping) {
2474 const size_t num_pages = size / PageSize;
2475
2476 // Lock the table.
2477 KScopedLightLock lk(m_general_lock);
2478
2479 // Validate the memory state.
2480 KMemoryState old_state;
2481 KMemoryPermission old_perm;
2482 KMemoryAttribute old_attr;
2483 size_t num_allocator_blocks;
2484 R_TRY(this->CheckMemoryState(
2485 std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr),
2486 std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All,
2487 mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister,
2488 KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
2489 KMemoryAttribute::Locked));
2490
2491 // Validate that the region being unmapped corresponds to the physical range described.
2492 {
2493 // Get the impl.
2494 auto& impl = this->GetImpl();
2495
2496 // Begin traversal.
2497 TraversalContext context;
2498 TraversalEntry next_entry;
2499 ASSERT(
2500 impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address));
2501
2502 // Check that the physical region matches.
2503 R_UNLESS(next_entry.phys_addr == phys_addr, ResultInvalidMemoryRegion);
2504
2505 // Iterate.
2506 for (size_t checked_size =
2507 next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
2508 checked_size < size; checked_size += next_entry.block_size) {
2509 // Continue the traversal.
2510 ASSERT(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)));
2511
2512 // Check that the physical region matches.
2513 R_UNLESS(next_entry.phys_addr == phys_addr + checked_size, ResultInvalidMemoryRegion);
2514 }
2515 }
2516
2517 // Create an update allocator.
2518 Result allocator_result;
2519 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2520 m_memory_block_slab_manager, num_allocator_blocks);
2521 R_TRY(allocator_result);
2522
2523 // We're going to perform an update, so create a helper.
2524 KScopedPageTableUpdater updater(this);
2525
2526 // If the region being unmapped is Memory, synchronize.
2527 if (mapping == Svc::MemoryMapping::Memory) {
2528 // Change the region to be uncached.
2529 const KPageProperties properties = {old_perm, false, true, DisableMergeAttribute::None};
2530 R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, properties,
2531 OperationType::ChangePermissionsAndRefresh, false));
2532
2533 // Temporarily unlock ourselves, so that other operations can occur while we flush the
2534 // region.
2535 m_general_lock.Unlock();
2536 SCOPE_EXIT({ m_general_lock.Lock(); });
2537
2538 // Flush the region.
2539 R_ASSERT(FlushDataCache(dst_address, size));
2540 }
2541
2542 // Perform the unmap.
2543 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
2544 DisableMergeAttribute::None};
2545 R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
2546 unmap_properties, OperationType::Unmap, false));
2547
2548 // Update the blocks.
2549 m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages,
2550 KMemoryState::Free, KMemoryPermission::None,
2551 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
2552 KMemoryBlockDisableMergeAttribute::Normal);
2553
2554 R_SUCCEED();
2555}
2556
2557Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
2558 ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
2559 ASSERT(Common::IsAligned(size, PageSize));
2560 ASSERT(size > 0);
2561 R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
2562 const size_t num_pages = size / PageSize;
2563 const KPhysicalAddress last = phys_addr + size - 1;
2564
2565 // Get region extents.
2566 const KProcessAddress region_start = this->GetRegionAddress(KMemoryState::Static);
2567 const size_t region_size = this->GetRegionSize(KMemoryState::Static);
2568 const size_t region_num_pages = region_size / PageSize;
2569
2570 // Locate the memory region.
2571 const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr);
2572 R_UNLESS(region != nullptr, ResultInvalidAddress);
2573
2574 ASSERT(region->Contains(GetInteger(phys_addr)));
2575 R_UNLESS(GetInteger(last) <= region->GetLastAddress(), ResultInvalidAddress);
2576
2577 // Check the region attributes.
2578 const bool is_rw = perm == KMemoryPermission::UserReadWrite;
2579 R_UNLESS(region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress);
2580 R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress);
2581 R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw,
2582 ResultInvalidAddress);
2583
2584 // Lock the table.
2585 KScopedLightLock lk(m_general_lock);
2586
2587 // Select an address to map at.
2588 KProcessAddress addr = 0;
2589 {
2590 const size_t alignment = 4_KiB;
2591 const KPhysicalAddress aligned_phys =
2592 Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
2593 R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress);
2594
2595 const KPhysicalAddress last_aligned_paddr =
2596 Common::AlignDown(GetInteger(last) + 1, alignment) - 1;
2597 R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr),
2598 ResultInvalidAddress);
2599
2600 addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
2601 this->GetNumGuardPages());
2602 R_UNLESS(addr != 0, ResultOutOfMemory);
2603 }
2604
2605 // Check that we can map static here.
2606 ASSERT(this->CanContain(addr, size, KMemoryState::Static));
2607 R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
2608 KMemoryPermission::None, KMemoryPermission::None,
2609 KMemoryAttribute::None, KMemoryAttribute::None));
2610
2611 // Create an update allocator.
2612 Result allocator_result;
2613 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2614 m_memory_block_slab_manager);
2615 R_TRY(allocator_result);
2616
2617 // We're going to perform an update, so create a helper.
2618 KScopedPageTableUpdater updater(this);
2619
2620 // Perform mapping operation.
2621 const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
2622 R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties,
2623 OperationType::Map, false));
2624
2625 // Update the blocks.
2626 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, KMemoryState::Static,
2627 perm, KMemoryAttribute::None,
2628 KMemoryBlockDisableMergeAttribute::Normal,
2629 KMemoryBlockDisableMergeAttribute::None);
2630
2631 // We successfully mapped the pages.
2632 R_SUCCEED();
2633}
2634
2635Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
2636 // Get the memory region.
2637 const KMemoryRegion* region =
2638 m_kernel.MemoryLayout().GetPhysicalMemoryRegionTree().FindFirstDerived(region_type);
2639 R_UNLESS(region != nullptr, ResultOutOfRange);
2640
2641 // Check that the region is valid.
2642 ASSERT(region->GetEndAddress() != 0);
2643
2644 // Map the region.
2645 R_TRY_CATCH(this->MapStatic(region->GetAddress(), region->GetSize(), perm)){
2646 R_CONVERT(ResultInvalidAddress, ResultOutOfRange)} R_END_TRY_CATCH;
2647
2648 R_SUCCEED();
2649}
2650
2651Result KPageTableBase::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
2652 KPhysicalAddress phys_addr, bool is_pa_valid,
2653 KProcessAddress region_start, size_t region_num_pages,
2654 KMemoryState state, KMemoryPermission perm) {
2655 ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
2656
2657 // Ensure this is a valid map request.
2658 R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
2659 ResultInvalidCurrentMemory);
2660 R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
2661
2662 // Lock the table.
2663 KScopedLightLock lk(m_general_lock);
2664
2665 // Find a random address to map at.
2666 KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
2667 0, this->GetNumGuardPages());
2668 R_UNLESS(addr != 0, ResultOutOfMemory);
2669 ASSERT(Common::IsAligned(GetInteger(addr), alignment));
2670 ASSERT(this->CanContain(addr, num_pages * PageSize, state));
2671 R_ASSERT(this->CheckMemoryState(
2672 addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
2673 KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None));
2674
2675 // Create an update allocator.
2676 Result allocator_result;
2677 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2678 m_memory_block_slab_manager);
2679 R_TRY(allocator_result);
2680
2681 // We're going to perform an update, so create a helper.
2682 KScopedPageTableUpdater updater(this);
2683
2684 // Perform mapping operation.
2685 if (is_pa_valid) {
2686 const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
2687 R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties,
2688 OperationType::Map, false));
2689 } else {
2690 R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
2691 }
2692
2693 // Update the blocks.
2694 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
2695 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2696 KMemoryBlockDisableMergeAttribute::None);
2697
2698 // We successfully mapped the pages.
2699 *out_addr = addr;
2700 R_SUCCEED();
2701}
2702
2703Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
2704 KMemoryPermission perm) {
2705 // Check that the map is in range.
2706 const size_t size = num_pages * PageSize;
2707 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
2708
2709 // Lock the table.
2710 KScopedLightLock lk(m_general_lock);
2711
2712 // Check the memory state.
2713 size_t num_allocator_blocks;
2714 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
2715 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
2716 KMemoryPermission::None, KMemoryAttribute::None,
2717 KMemoryAttribute::None));
2718
2719 // Create an update allocator.
2720 Result allocator_result;
2721 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2722 m_memory_block_slab_manager, num_allocator_blocks);
2723 R_TRY(allocator_result);
2724
2725 // We're going to perform an update, so create a helper.
2726 KScopedPageTableUpdater updater(this);
2727
2728 // Map the pages.
2729 R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
2730
2731 // Update the blocks.
2732 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
2733 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2734 KMemoryBlockDisableMergeAttribute::None);
2735
2736 R_SUCCEED();
2737}
2738
2739Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
2740 // Check that the unmap is in range.
2741 const size_t size = num_pages * PageSize;
2742 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
2743
2744 // Lock the table.
2745 KScopedLightLock lk(m_general_lock);
2746
2747 // Check the memory state.
2748 size_t num_allocator_blocks;
2749 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
2750 KMemoryState::All, state, KMemoryPermission::None,
2751 KMemoryPermission::None, KMemoryAttribute::All,
2752 KMemoryAttribute::None));
2753
2754 // Create an update allocator.
2755 Result allocator_result;
2756 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2757 m_memory_block_slab_manager, num_allocator_blocks);
2758 R_TRY(allocator_result);
2759
2760 // We're going to perform an update, so create a helper.
2761 KScopedPageTableUpdater updater(this);
2762
2763 // Perform the unmap.
2764 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
2765 DisableMergeAttribute::None};
2766 R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties,
2767 OperationType::Unmap, false));
2768
2769 // Update the blocks.
2770 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
2771 KMemoryPermission::None, KMemoryAttribute::None,
2772 KMemoryBlockDisableMergeAttribute::None,
2773 KMemoryBlockDisableMergeAttribute::Normal);
2774
2775 R_SUCCEED();
2776}
2777
2778Result KPageTableBase::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
2779 KProcessAddress region_start, size_t region_num_pages,
2780 KMemoryState state, KMemoryPermission perm) {
2781 ASSERT(!this->IsLockedByCurrentThread());
2782
2783 // Ensure this is a valid map request.
2784 const size_t num_pages = pg.GetNumPages();
2785 R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
2786 ResultInvalidCurrentMemory);
2787 R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
2788
2789 // Lock the table.
2790 KScopedLightLock lk(m_general_lock);
2791
2792 // Find a random address to map at.
2793 KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
2794 0, this->GetNumGuardPages());
2795 R_UNLESS(addr != 0, ResultOutOfMemory);
2796 ASSERT(this->CanContain(addr, num_pages * PageSize, state));
2797 R_ASSERT(this->CheckMemoryState(
2798 addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
2799 KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None));
2800
2801 // Create an update allocator.
2802 Result allocator_result;
2803 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2804 m_memory_block_slab_manager);
2805 R_TRY(allocator_result);
2806
2807 // We're going to perform an update, so create a helper.
2808 KScopedPageTableUpdater updater(this);
2809
2810 // Perform mapping operation.
2811 const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
2812 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
2813
2814 // Update the blocks.
2815 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
2816 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2817 KMemoryBlockDisableMergeAttribute::None);
2818
2819 // We successfully mapped the pages.
2820 *out_addr = addr;
2821 R_SUCCEED();
2822}
2823
2824Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
2825 KMemoryPermission perm) {
2826 ASSERT(!this->IsLockedByCurrentThread());
2827
2828 // Ensure this is a valid map request.
2829 const size_t num_pages = pg.GetNumPages();
2830 const size_t size = num_pages * PageSize;
2831 R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
2832
2833 // Lock the table.
2834 KScopedLightLock lk(m_general_lock);
2835
2836 // Check if state allows us to map.
2837 size_t num_allocator_blocks;
2838 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
2839 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
2840 KMemoryPermission::None, KMemoryAttribute::None,
2841 KMemoryAttribute::None));
2842
2843 // Create an update allocator.
2844 Result allocator_result;
2845 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2846 m_memory_block_slab_manager, num_allocator_blocks);
2847 R_TRY(allocator_result);
2848
2849 // We're going to perform an update, so create a helper.
2850 KScopedPageTableUpdater updater(this);
2851
2852 // Perform mapping operation.
2853 const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
2854 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
2855
2856 // Update the blocks.
2857 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
2858 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2859 KMemoryBlockDisableMergeAttribute::None);
2860
2861 // We successfully mapped the pages.
2862 R_SUCCEED();
2863}
2864
2865Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
2866 KMemoryState state) {
2867 ASSERT(!this->IsLockedByCurrentThread());
2868
2869 // Ensure this is a valid unmap request.
2870 const size_t num_pages = pg.GetNumPages();
2871 const size_t size = num_pages * PageSize;
2872 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
2873
2874 // Lock the table.
2875 KScopedLightLock lk(m_general_lock);
2876
2877 // Check if state allows us to unmap.
2878 size_t num_allocator_blocks;
2879 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
2880 KMemoryState::All, state, KMemoryPermission::None,
2881 KMemoryPermission::None, KMemoryAttribute::All,
2882 KMemoryAttribute::None));
2883
2884 // Check that the page group is valid.
2885 R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
2886
2887 // Create an update allocator.
2888 Result allocator_result;
2889 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2890 m_memory_block_slab_manager, num_allocator_blocks);
2891 R_TRY(allocator_result);
2892
2893 // We're going to perform an update, so create a helper.
2894 KScopedPageTableUpdater updater(this);
2895
2896 // Perform unmapping operation.
2897 const KPageProperties properties = {KMemoryPermission::None, false, false,
2898 DisableMergeAttribute::None};
2899 R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, properties,
2900 OperationType::Unmap, false));
2901
2902 // Update the blocks.
2903 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
2904 KMemoryPermission::None, KMemoryAttribute::None,
2905 KMemoryBlockDisableMergeAttribute::None,
2906 KMemoryBlockDisableMergeAttribute::Normal);
2907
2908 R_SUCCEED();
2909}
2910
2911Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address,
2912 size_t num_pages, KMemoryState state_mask,
2913 KMemoryState state, KMemoryPermission perm_mask,
2914 KMemoryPermission perm, KMemoryAttribute attr_mask,
2915 KMemoryAttribute attr) {
2916 // Ensure that the page group isn't null.
2917 ASSERT(out != nullptr);
2918
2919 // Make sure that the region we're mapping is valid for the table.
2920 const size_t size = num_pages * PageSize;
2921 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
2922
2923 // Lock the table.
2924 KScopedLightLock lk(m_general_lock);
2925
2926 // Check if state allows us to create the group.
2927 R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
2928 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
2929 attr_mask, attr));
2930
2931 // Create a new page group for the region.
2932 R_TRY(this->MakePageGroup(*out, address, num_pages));
2933
2934 // Open a new reference to the pages in the group.
2935 out->Open();
2936
2937 R_SUCCEED();
2938}
2939
2940Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) {
2941 // Check that the region is in range.
2942 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
2943
2944 // Lock the table.
2945 KScopedLightLock lk(m_general_lock);
2946
2947 // Check the memory state.
2948 R_TRY(this->CheckMemoryStateContiguous(
2949 address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
2950 KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite,
2951 KMemoryAttribute::Uncached, KMemoryAttribute::None));
2952
2953 // Get the impl.
2954 auto& impl = this->GetImpl();
2955
2956 // Begin traversal.
2957 TraversalContext context;
2958 TraversalEntry next_entry;
2959 bool traverse_valid =
2960 impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
2961 R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
2962
2963 // Prepare tracking variables.
2964 KPhysicalAddress cur_addr = next_entry.phys_addr;
2965 size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
2966 size_t tot_size = cur_size;
2967
2968 // Iterate.
2969 while (tot_size < size) {
2970 // Continue the traversal.
2971 traverse_valid =
2972 impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
2973 R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
2974
2975 if (next_entry.phys_addr != (cur_addr + cur_size)) {
2976 // Check that the pages are linearly mapped.
2977 R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
2978
2979 // Invalidate the block.
2980 if (cur_size > 0) {
2981 // NOTE: Nintendo does not check the result of invalidation.
2982 InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
2983 }
2984
2985 // Advance.
2986 cur_addr = next_entry.phys_addr;
2987 cur_size = next_entry.block_size;
2988 } else {
2989 cur_size += next_entry.block_size;
2990 }
2991
2992 tot_size += next_entry.block_size;
2993 }
2994
2995 // Ensure we use the right size for the last block.
2996 if (tot_size > size) {
2997 cur_size -= (tot_size - size);
2998 }
2999
3000 // Check that the last block is linearly mapped.
3001 R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
3002
3003 // Invalidate the last block.
3004 if (cur_size > 0) {
3005 // NOTE: Nintendo does not check the result of invalidation.
3006 InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
3007 }
3008
3009 R_SUCCEED();
3010}
3011
3012Result KPageTableBase::InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) {
3013 // Check pre-condition: this is being called on the current process.
3014 ASSERT(this == std::addressof(GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable()));
3015
3016 // Check that the region is in range.
3017 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
3018
3019 // Lock the table.
3020 KScopedLightLock lk(m_general_lock);
3021
3022 // Check the memory state.
3023 R_TRY(this->CheckMemoryStateContiguous(
3024 address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
3025 KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite,
3026 KMemoryAttribute::Uncached, KMemoryAttribute::None));
3027
3028 // Invalidate the data cache.
3029 R_RETURN(InvalidateDataCache(address, size));
3030}
3031
3032Result KPageTableBase::ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address,
3033 size_t size) {
3034 // Lightly validate the region is in range.
3035 R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory);
3036
3037 // Lock the table.
3038 KScopedLightLock lk(m_general_lock);
3039
3040 // Require that the memory either be user readable or debuggable.
3041 const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(
3042 src_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserRead,
3043 KMemoryPermission::UserRead, KMemoryAttribute::None, KMemoryAttribute::None));
3044 if (!can_read) {
3045 const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(
3046 src_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug,
3047 KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None,
3048 KMemoryAttribute::None));
3049 R_UNLESS(can_debug, ResultInvalidCurrentMemory);
3050 }
3051
3052 // Get the impl.
3053 auto& impl = this->GetImpl();
3054 auto& dst_memory = GetCurrentMemory(m_system.Kernel());
3055
3056 // Begin traversal.
3057 TraversalContext context;
3058 TraversalEntry next_entry;
3059 bool traverse_valid =
3060 impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_address);
3061 R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
3062
3063 // Prepare tracking variables.
3064 KPhysicalAddress cur_addr = next_entry.phys_addr;
3065 size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
3066 size_t tot_size = cur_size;
3067
3068 auto PerformCopy = [&]() -> Result {
3069 // Ensure the address is linear mapped.
3070 R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
3071
3072 // Copy as much aligned data as we can.
3073 if (cur_size >= sizeof(u32)) {
3074 const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
3075 const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
3076 FlushDataCache(copy_src, copy_size);
3077 R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, copy_size), ResultInvalidPointer);
3078
3079 dst_address += copy_size;
3080 cur_addr += copy_size;
3081 cur_size -= copy_size;
3082 }
3083
3084 // Copy remaining data.
3085 if (cur_size > 0) {
3086 const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
3087 FlushDataCache(copy_src, cur_size);
3088 R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, cur_size), ResultInvalidPointer);
3089 }
3090
3091 R_SUCCEED();
3092 };
3093
3094 // Iterate.
3095 while (tot_size < size) {
3096 // Continue the traversal.
3097 traverse_valid =
3098 impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
3099 ASSERT(traverse_valid);
3100
3101 if (next_entry.phys_addr != (cur_addr + cur_size)) {
3102 // Perform copy.
3103 R_TRY(PerformCopy());
3104
3105 // Advance.
3106 dst_address += cur_size;
3107
3108 cur_addr = next_entry.phys_addr;
3109 cur_size = next_entry.block_size;
3110 } else {
3111 cur_size += next_entry.block_size;
3112 }
3113
3114 tot_size += next_entry.block_size;
3115 }
3116
3117 // Ensure we use the right size for the last block.
3118 if (tot_size > size) {
3119 cur_size -= (tot_size - size);
3120 }
3121
3122 // Perform copy for the last block.
3123 R_TRY(PerformCopy());
3124
3125 R_SUCCEED();
3126}
3127
3128Result KPageTableBase::WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address,
3129 size_t size) {
3130 // Lightly validate the region is in range.
3131 R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory);
3132
3133 // Lock the table.
3134 KScopedLightLock lk(m_general_lock);
3135
3136 // Require that the memory either be user writable or debuggable.
3137 const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(
3138 dst_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserReadWrite,
3139 KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None));
3140 if (!can_read) {
3141 const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(
3142 dst_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug,
3143 KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None,
3144 KMemoryAttribute::None));
3145 R_UNLESS(can_debug, ResultInvalidCurrentMemory);
3146 }
3147
3148 // Get the impl.
3149 auto& impl = this->GetImpl();
3150 auto& src_memory = GetCurrentMemory(m_system.Kernel());
3151
3152 // Begin traversal.
3153 TraversalContext context;
3154 TraversalEntry next_entry;
3155 bool traverse_valid =
3156 impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address);
3157 R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
3158
3159 // Prepare tracking variables.
3160 KPhysicalAddress cur_addr = next_entry.phys_addr;
3161 size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
3162 size_t tot_size = cur_size;
3163
3164 auto PerformCopy = [&]() -> Result {
3165 // Ensure the address is linear mapped.
3166 R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
3167
3168 // Copy as much aligned data as we can.
3169 if (cur_size >= sizeof(u32)) {
3170 const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
3171 void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
3172 R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, copy_size),
3173 ResultInvalidCurrentMemory);
3174
3175 StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), copy_size);
3176
3177 src_address += copy_size;
3178 cur_addr += copy_size;
3179 cur_size -= copy_size;
3180 }
3181
3182 // Copy remaining data.
3183 if (cur_size > 0) {
3184 void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
3185 R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, cur_size),
3186 ResultInvalidCurrentMemory);
3187
3188 StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
3189 }
3190
3191 R_SUCCEED();
3192 };
3193
3194 // Iterate.
3195 while (tot_size < size) {
3196 // Continue the traversal.
3197 traverse_valid =
3198 impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
3199 ASSERT(traverse_valid);
3200
3201 if (next_entry.phys_addr != (cur_addr + cur_size)) {
3202 // Perform copy.
3203 R_TRY(PerformCopy());
3204
3205 // Advance.
3206 src_address += cur_size;
3207
3208 cur_addr = next_entry.phys_addr;
3209 cur_size = next_entry.block_size;
3210 } else {
3211 cur_size += next_entry.block_size;
3212 }
3213
3214 tot_size += next_entry.block_size;
3215 }
3216
3217 // Ensure we use the right size for the last block.
3218 if (tot_size > size) {
3219 cur_size -= (tot_size - size);
3220 }
3221
3222 // Perform copy for the last block.
3223 R_TRY(PerformCopy());
3224
3225 // Invalidate the entire instruction cache, as this svc allows modifying executable pages.
3226 InvalidateEntireInstructionCache(m_system);
3227
3228 R_SUCCEED();
3229}
3230
3231Result KPageTableBase::ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr,
3232 size_t size, KMemoryState state) {
3233 // Check pre-conditions.
3234 ASSERT(this->IsLockedByCurrentThread());
3235
3236 // Determine the mapping extents.
3237 const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize);
3238 const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize);
3239 const size_t map_size = map_end - map_start;
3240
3241 // Get the memory reference to write into.
3242 auto& dst_memory = GetCurrentMemory(m_kernel);
3243
3244 // We're going to perform an update, so create a helper.
3245 KScopedPageTableUpdater updater(this);
3246
3247 // Temporarily map the io memory.
3248 KProcessAddress io_addr;
3249 R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size,
3250 state, KMemoryPermission::UserRead));
3251
3252 // Ensure we unmap the io memory when we're done with it.
3253 const KPageProperties unmap_properties =
3254 KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
3255 SCOPE_EXIT({
3256 R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
3257 unmap_properties, OperationType::Unmap, true));
3258 });
3259
3260 // Read the memory.
3261 const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
3262 dst_memory.CopyBlock(dst_addr, read_addr, size);
3263
3264 R_SUCCEED();
3265}
3266
3267Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr,
3268 size_t size, KMemoryState state) {
3269 // Check pre-conditions.
3270 ASSERT(this->IsLockedByCurrentThread());
3271
3272 // Determine the mapping extents.
3273 const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize);
3274 const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize);
3275 const size_t map_size = map_end - map_start;
3276
3277 // Get the memory reference to read from.
3278 auto& src_memory = GetCurrentMemory(m_kernel);
3279
3280 // We're going to perform an update, so create a helper.
3281 KScopedPageTableUpdater updater(this);
3282
3283 // Temporarily map the io memory.
3284 KProcessAddress io_addr;
3285 R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size,
3286 state, KMemoryPermission::UserReadWrite));
3287
3288 // Ensure we unmap the io memory when we're done with it.
3289 const KPageProperties unmap_properties =
3290 KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
3291 SCOPE_EXIT({
3292 R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
3293 unmap_properties, OperationType::Unmap, true));
3294 });
3295
3296 // Write the memory.
3297 const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
3298 R_UNLESS(src_memory.CopyBlock(write_addr, src_addr, size), ResultInvalidPointer);
3299
3300 R_SUCCEED();
3301}
3302
3303Result KPageTableBase::ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address,
3304 size_t size, KMemoryState state) {
3305 // Lightly validate the range before doing anything else.
3306 R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory);
3307
3308 // We need to lock both this table, and the current process's table, so set up some aliases.
3309 KPageTableBase& src_page_table = *this;
3310 KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable();
3311
3312 // Acquire the table locks.
3313 KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
3314
3315 // Check that the desired range is readable io memory.
3316 R_TRY(this->CheckMemoryStateContiguous(src_address, size, KMemoryState::All, state,
3317 KMemoryPermission::UserRead, KMemoryPermission::UserRead,
3318 KMemoryAttribute::None, KMemoryAttribute::None));
3319
3320 // Read the memory.
3321 KProcessAddress dst = dst_address;
3322 const KProcessAddress last_address = src_address + size - 1;
3323 while (src_address <= last_address) {
3324 // Get the current physical address.
3325 KPhysicalAddress phys_addr;
3326 ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), src_address));
3327
3328 // Determine the current read size.
3329 const size_t cur_size =
3330 std::min<size_t>(last_address - src_address + 1,
3331 Common::AlignDown(GetInteger(src_address) + PageSize, PageSize) -
3332 GetInteger(src_address));
3333
3334 // Read.
3335 R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state));
3336
3337 // Advance.
3338 src_address += cur_size;
3339 dst += cur_size;
3340 }
3341
3342 R_SUCCEED();
3343}
3344
3345Result KPageTableBase::WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address,
3346 size_t size, KMemoryState state) {
3347 // Lightly validate the range before doing anything else.
3348 R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory);
3349
3350 // We need to lock both this table, and the current process's table, so set up some aliases.
3351 KPageTableBase& src_page_table = *this;
3352 KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable();
3353
3354 // Acquire the table locks.
3355 KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
3356
3357 // Check that the desired range is writable io memory.
3358 R_TRY(this->CheckMemoryStateContiguous(
3359 dst_address, size, KMemoryState::All, state, KMemoryPermission::UserReadWrite,
3360 KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None));
3361
3362 // Read the memory.
3363 KProcessAddress src = src_address;
3364 const KProcessAddress last_address = dst_address + size - 1;
3365 while (dst_address <= last_address) {
3366 // Get the current physical address.
3367 KPhysicalAddress phys_addr;
3368 ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), dst_address));
3369
3370 // Determine the current read size.
3371 const size_t cur_size =
3372 std::min<size_t>(last_address - dst_address + 1,
3373 Common::AlignDown(GetInteger(dst_address) + PageSize, PageSize) -
3374 GetInteger(dst_address));
3375
3376 // Read.
3377 R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state));
3378
3379 // Advance.
3380 dst_address += cur_size;
3381 src += cur_size;
3382 }
3383
3384 R_SUCCEED();
3385}
3386
3387Result KPageTableBase::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
3388 size_t size, KMemoryPermission perm,
3389 bool is_aligned, bool check_heap) {
3390 // Lightly validate the range before doing anything else.
3391 const size_t num_pages = size / PageSize;
3392 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
3393
3394 // Lock the table.
3395 KScopedLightLock lk(m_general_lock);
3396
3397 // Check the memory state.
3398 const KMemoryState test_state =
3399 (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
3400 (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
3401 size_t num_allocator_blocks;
3402 KMemoryState old_state;
3403 R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
3404 std::addressof(num_allocator_blocks), address, size, test_state,
3405 test_state, perm, perm,
3406 KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
3407 KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
3408
3409 // Create an update allocator.
3410 Result allocator_result;
3411 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
3412 m_memory_block_slab_manager, num_allocator_blocks);
3413 R_TRY(allocator_result);
3414
3415 // Update the memory blocks.
3416 m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
3417 &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
3418
3419 // Set whether the locked memory was io.
3420 *out_is_io =
3421 static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
3422
3423 R_SUCCEED();
3424}
3425
3426Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
3427 bool check_heap) {
3428 // Lightly validate the range before doing anything else.
3429 const size_t num_pages = size / PageSize;
3430 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
3431
3432 // Lock the table.
3433 KScopedLightLock lk(m_general_lock);
3434
3435 // Check the memory state.
3436 const KMemoryState test_state =
3437 KMemoryState::FlagCanDeviceMap |
3438 (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
3439 size_t num_allocator_blocks;
3440 R_TRY(this->CheckMemoryStateContiguous(
3441 std::addressof(num_allocator_blocks), address, size, test_state, test_state,
3442 KMemoryPermission::None, KMemoryPermission::None,
3443 KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
3444
3445 // Create an update allocator.
3446 Result allocator_result;
3447 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
3448 m_memory_block_slab_manager, num_allocator_blocks);
3449 R_TRY(allocator_result);
3450
3451 // Update the memory blocks.
3452 const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
3453 m_enable_device_address_space_merge
3454 ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
3455 : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
3456 m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
3457 KMemoryPermission::None);
3458
3459 R_SUCCEED();
3460}
3461
3462Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
3463 // Lightly validate the range before doing anything else.
3464 const size_t num_pages = size / PageSize;
3465 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
3466
3467 // Lock the table.
3468 KScopedLightLock lk(m_general_lock);
3469
3470 // Check the memory state.
3471 size_t num_allocator_blocks;
3472 R_TRY(this->CheckMemoryStateContiguous(
3473 std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
3474 KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
3475 KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
3476
3477 // Create an update allocator.
3478 Result allocator_result;
3479 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
3480 m_memory_block_slab_manager, num_allocator_blocks);
3481 R_TRY(allocator_result);
3482
3483 // Update the memory blocks.
3484 m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
3485 &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
3486
3487 R_SUCCEED();
3488}
3489
3490Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
3491 // Lightly validate the range before doing anything else.
3492 const size_t num_pages = size / PageSize;
3493 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
3494
3495 // Lock the table.
3496 KScopedLightLock lk(m_general_lock);
3497
3498 // Check memory state.
3499 size_t allocator_num_blocks = 0;
3500 R_TRY(this->CheckMemoryStateContiguous(
3501 std::addressof(allocator_num_blocks), address, size, KMemoryState::FlagCanDeviceMap,
3502 KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
3503 KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
3504
3505 // Create an update allocator for the region.
3506 Result allocator_result;
3507 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
3508 m_memory_block_slab_manager, allocator_num_blocks);
3509 R_TRY(allocator_result);
3510
3511 // Update the memory blocks.
3512 m_memory_block_manager.UpdateLock(
3513 std::addressof(allocator), address, num_pages,
3514 m_enable_device_address_space_merge
3515 ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare
3516 : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight,
3517 KMemoryPermission::None);
3518
3519 R_SUCCEED();
3520}
3521
3522Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
3523 KProcessAddress address, size_t size,
3524 KMemoryPermission perm,
3525 bool is_aligned) {
3526 // Lock the table.
3527 KScopedLightLock lk(m_general_lock);
3528
3529 // Get the range.
3530 const KMemoryState test_state =
3531 (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap);
3532 R_TRY(this->GetContiguousMemoryRangeWithState(
3533 out, address, size, test_state, test_state, perm, perm,
3534 KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, KMemoryAttribute::None));
3535
3536 // We got the range, so open it.
3537 out->Open();
3538
3539 R_SUCCEED();
3540}
3541
3542Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out,
3543 KProcessAddress address,
3544 size_t size) {
3545 // Lock the table.
3546 KScopedLightLock lk(m_general_lock);
3547
3548 // Get the range.
3549 R_TRY(this->GetContiguousMemoryRangeWithState(
3550 out, address, size, KMemoryState::FlagCanDeviceMap, KMemoryState::FlagCanDeviceMap,
3551 KMemoryPermission::None, KMemoryPermission::None,
3552 KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
3553
3554 // We got the range, so open it.
3555 out->Open();
3556
3557 R_SUCCEED();
3558}
3559
3560Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
3561 size_t size) {
3562 R_RETURN(this->LockMemoryAndOpen(
3563 nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
3564 KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
3565 KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
3566 static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
3567 KMemoryPermission::KernelReadWrite),
3568 KMemoryAttribute::Locked));
3569}
3570
3571Result KPageTableBase::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
3572 R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
3573 KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
3574 KMemoryPermission::None, KMemoryAttribute::All,
3575 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
3576 KMemoryAttribute::Locked, nullptr));
3577}
3578
3579Result KPageTableBase::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
3580 KMemoryPermission perm) {
3581 R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer,
3582 KMemoryState::FlagCanTransfer, KMemoryPermission::All,
3583 KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
3584 KMemoryAttribute::None, perm, KMemoryAttribute::Locked));
3585}
3586
3587Result KPageTableBase::UnlockForTransferMemory(KProcessAddress address, size_t size,
3588 const KPageGroup& pg) {
3589 R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer,
3590 KMemoryState::FlagCanTransfer, KMemoryPermission::None,
3591 KMemoryPermission::None, KMemoryAttribute::All,
3592 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
3593 KMemoryAttribute::Locked, std::addressof(pg)));
3594}
3595
3596Result KPageTableBase::LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) {
3597 R_RETURN(this->LockMemoryAndOpen(
3598 out, nullptr, address, size, KMemoryState::FlagCanCodeMemory,
3599 KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
3600 KMemoryAttribute::All, KMemoryAttribute::None,
3601 static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
3602 KMemoryPermission::KernelReadWrite),
3603 KMemoryAttribute::Locked));
3604}
3605
3606Result KPageTableBase::UnlockForCodeMemory(KProcessAddress address, size_t size,
3607 const KPageGroup& pg) {
3608 R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanCodeMemory,
3609 KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
3610 KMemoryPermission::None, KMemoryAttribute::All,
3611 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
3612 KMemoryAttribute::Locked, std::addressof(pg)));
3613}
3614
3615Result KPageTableBase::OpenMemoryRangeForProcessCacheOperation(MemoryRange* out,
3616 KProcessAddress address,
3617 size_t size) {
3618 // Lock the table.
3619 KScopedLightLock lk(m_general_lock);
3620
3621 // Get the range.
3622 R_TRY(this->GetContiguousMemoryRangeWithState(
3623 out, address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
3624 KMemoryPermission::UserRead, KMemoryPermission::UserRead, KMemoryAttribute::Uncached,
3625 KMemoryAttribute::None));
3626
3627 // We got the range, so open it.
3628 out->Open();
3629
3630 R_SUCCEED();
3631}
3632
3633Result KPageTableBase::CopyMemoryFromLinearToUser(
3634 KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask,
3635 KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask,
3636 KMemoryAttribute src_attr) {
3637 // Lightly validate the range before doing anything else.
3638 R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory);
3639
3640 // Get the destination memory reference.
3641 auto& dst_memory = GetCurrentMemory(m_kernel);
3642
3643 // Copy the memory.
3644 {
3645 // Lock the table.
3646 KScopedLightLock lk(m_general_lock);
3647
3648 // Check memory state.
3649 R_TRY(this->CheckMemoryStateContiguous(
3650 src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
3651 src_attr_mask | KMemoryAttribute::Uncached, src_attr));
3652
3653 auto& impl = this->GetImpl();
3654
3655 // Begin traversal.
3656 TraversalContext context;
3657 TraversalEntry next_entry;
3658 bool traverse_valid =
3659 impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
3660 ASSERT(traverse_valid);
3661
3662 // Prepare tracking variables.
3663 KPhysicalAddress cur_addr = next_entry.phys_addr;
3664 size_t cur_size =
3665 next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
3666 size_t tot_size = cur_size;
3667
3668 auto PerformCopy = [&]() -> Result {
3669 // Ensure the address is linear mapped.
3670 R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
3671
3672 // Copy as much aligned data as we can.
3673 if (cur_size >= sizeof(u32)) {
3674 const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
3675 R_UNLESS(dst_memory.WriteBlock(dst_addr,
3676 GetLinearMappedVirtualPointer(m_kernel, cur_addr),
3677 copy_size),
3678 ResultInvalidCurrentMemory);
3679
3680 dst_addr += copy_size;
3681 cur_addr += copy_size;
3682 cur_size -= copy_size;
3683 }
3684
3685 // Copy remaining data.
3686 if (cur_size > 0) {
3687 R_UNLESS(dst_memory.WriteBlock(
3688 dst_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size),
3689 ResultInvalidCurrentMemory);
3690 }
3691
3692 R_SUCCEED();
3693 };
3694
3695 // Iterate.
3696 while (tot_size < size) {
3697 // Continue the traversal.
3698 traverse_valid =
3699 impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
3700 ASSERT(traverse_valid);
3701
3702 if (next_entry.phys_addr != (cur_addr + cur_size)) {
3703 // Perform copy.
3704 R_TRY(PerformCopy());
3705
3706 // Advance.
3707 dst_addr += cur_size;
3708
3709 cur_addr = next_entry.phys_addr;
3710 cur_size = next_entry.block_size;
3711 } else {
3712 cur_size += next_entry.block_size;
3713 }
3714
3715 tot_size += next_entry.block_size;
3716 }
3717
3718 // Ensure we use the right size for the last block.
3719 if (tot_size > size) {
3720 cur_size -= (tot_size - size);
3721 }
3722
3723 // Perform copy for the last block.
3724 R_TRY(PerformCopy());
3725 }
3726
3727 R_SUCCEED();
3728}
3729
3730Result KPageTableBase::CopyMemoryFromLinearToKernel(
3731 void* buffer, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask,
3732 KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask,
3733 KMemoryAttribute src_attr) {
3734 // Lightly validate the range before doing anything else.
3735 R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory);
3736
3737 // Copy the memory.
3738 {
3739 // Lock the table.
3740 KScopedLightLock lk(m_general_lock);
3741
3742 // Check memory state.
3743 R_TRY(this->CheckMemoryStateContiguous(
3744 src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
3745 src_attr_mask | KMemoryAttribute::Uncached, src_attr));
3746
3747 auto& impl = this->GetImpl();
3748
3749 // Begin traversal.
3750 TraversalContext context;
3751 TraversalEntry next_entry;
3752 bool traverse_valid =
3753 impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
3754 ASSERT(traverse_valid);
3755
3756 // Prepare tracking variables.
3757 KPhysicalAddress cur_addr = next_entry.phys_addr;
3758 size_t cur_size =
3759 next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
3760 size_t tot_size = cur_size;
3761
3762 auto PerformCopy = [&]() -> Result {
3763 // Ensure the address is linear mapped.
3764 R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
3765
3766 // Copy the data.
3767 std::memcpy(buffer, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
3768
3769 R_SUCCEED();
3770 };
3771
3772 // Iterate.
3773 while (tot_size < size) {
3774 // Continue the traversal.
3775 traverse_valid =
3776 impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
3777 ASSERT(traverse_valid);
3778
3779 if (next_entry.phys_addr != (cur_addr + cur_size)) {
3780 // Perform copy.
3781 R_TRY(PerformCopy());
3782
3783 // Advance.
3784 buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
3785
3786 cur_addr = next_entry.phys_addr;
3787 cur_size = next_entry.block_size;
3788 } else {
3789 cur_size += next_entry.block_size;
3790 }
3791
3792 tot_size += next_entry.block_size;
3793 }
3794
3795 // Ensure we use the right size for the last block.
3796 if (tot_size > size) {
3797 cur_size -= (tot_size - size);
3798 }
3799
3800 // Perform copy for the last block.
3801 R_TRY(PerformCopy());
3802 }
3803
3804 R_SUCCEED();
3805}
3806
3807Result KPageTableBase::CopyMemoryFromUserToLinear(
3808 KProcessAddress dst_addr, size_t size, KMemoryState dst_state_mask, KMemoryState dst_state,
3809 KMemoryPermission dst_test_perm, KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
3810 KProcessAddress src_addr) {
3811 // Lightly validate the range before doing anything else.
3812 R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory);
3813
3814 // Get the source memory reference.
3815 auto& src_memory = GetCurrentMemory(m_kernel);
3816
3817 // Copy the memory.
3818 {
3819 // Lock the table.
3820 KScopedLightLock lk(m_general_lock);
3821
3822 // Check memory state.
3823 R_TRY(this->CheckMemoryStateContiguous(
3824 dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
3825 dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
3826
3827 auto& impl = this->GetImpl();
3828
3829 // Begin traversal.
3830 TraversalContext context;
3831 TraversalEntry next_entry;
3832 bool traverse_valid =
3833 impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
3834 ASSERT(traverse_valid);
3835
3836 // Prepare tracking variables.
3837 KPhysicalAddress cur_addr = next_entry.phys_addr;
3838 size_t cur_size =
3839 next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
3840 size_t tot_size = cur_size;
3841
3842 auto PerformCopy = [&]() -> Result {
3843 // Ensure the address is linear mapped.
3844 R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
3845
3846 // Copy as much aligned data as we can.
3847 if (cur_size >= sizeof(u32)) {
3848 const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
3849 R_UNLESS(src_memory.ReadBlock(src_addr,
3850 GetLinearMappedVirtualPointer(m_kernel, cur_addr),
3851 copy_size),
3852 ResultInvalidCurrentMemory);
3853 src_addr += copy_size;
3854 cur_addr += copy_size;
3855 cur_size -= copy_size;
3856 }
3857
3858 // Copy remaining data.
3859 if (cur_size > 0) {
3860 R_UNLESS(src_memory.ReadBlock(
3861 src_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size),
3862 ResultInvalidCurrentMemory);
3863 }
3864
3865 R_SUCCEED();
3866 };
3867
3868 // Iterate.
3869 while (tot_size < size) {
3870 // Continue the traversal.
3871 traverse_valid =
3872 impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
3873 ASSERT(traverse_valid);
3874
3875 if (next_entry.phys_addr != (cur_addr + cur_size)) {
3876 // Perform copy.
3877 R_TRY(PerformCopy());
3878
3879 // Advance.
3880 src_addr += cur_size;
3881
3882 cur_addr = next_entry.phys_addr;
3883 cur_size = next_entry.block_size;
3884 } else {
3885 cur_size += next_entry.block_size;
3886 }
3887
3888 tot_size += next_entry.block_size;
3889 }
3890
3891 // Ensure we use the right size for the last block.
3892 if (tot_size > size) {
3893 cur_size -= (tot_size - size);
3894 }
3895
3896 // Perform copy for the last block.
3897 R_TRY(PerformCopy());
3898 }
3899
3900 R_SUCCEED();
3901}
3902
3903Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
3904 KMemoryState dst_state_mask,
3905 KMemoryState dst_state,
3906 KMemoryPermission dst_test_perm,
3907 KMemoryAttribute dst_attr_mask,
3908 KMemoryAttribute dst_attr, void* buffer) {
3909 // Lightly validate the range before doing anything else.
3910 R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory);
3911
3912 // Copy the memory.
3913 {
3914 // Lock the table.
3915 KScopedLightLock lk(m_general_lock);
3916
3917 // Check memory state.
3918 R_TRY(this->CheckMemoryStateContiguous(
3919 dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
3920 dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
3921
3922 auto& impl = this->GetImpl();
3923
3924 // Begin traversal.
3925 TraversalContext context;
3926 TraversalEntry next_entry;
3927 bool traverse_valid =
3928 impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
3929 ASSERT(traverse_valid);
3930
3931 // Prepare tracking variables.
3932 KPhysicalAddress cur_addr = next_entry.phys_addr;
3933 size_t cur_size =
3934 next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
3935 size_t tot_size = cur_size;
3936
3937 auto PerformCopy = [&]() -> Result {
3938 // Ensure the address is linear mapped.
3939 R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
3940
3941 // Copy the data.
3942 std::memcpy(GetLinearMappedVirtualPointer(m_kernel, cur_addr), buffer, cur_size);
3943
3944 R_SUCCEED();
3945 };
3946
3947 // Iterate.
3948 while (tot_size < size) {
3949 // Continue the traversal.
3950 traverse_valid =
3951 impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
3952 ASSERT(traverse_valid);
3953
3954 if (next_entry.phys_addr != (cur_addr + cur_size)) {
3955 // Perform copy.
3956 R_TRY(PerformCopy());
3957
3958 // Advance.
3959 buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
3960
3961 cur_addr = next_entry.phys_addr;
3962 cur_size = next_entry.block_size;
3963 } else {
3964 cur_size += next_entry.block_size;
3965 }
3966
3967 tot_size += next_entry.block_size;
3968 }
3969
3970 // Ensure we use the right size for the last block.
3971 if (tot_size > size) {
3972 cur_size -= (tot_size - size);
3973 }
3974
3975 // Perform copy for the last block.
3976 R_TRY(PerformCopy());
3977 }
3978
3979 R_SUCCEED();
3980}
3981
3982Result KPageTableBase::CopyMemoryFromHeapToHeap(
3983 KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
3984 KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
3985 KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
3986 KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
3987 KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
3988 // For convenience, alias this.
3989 KPageTableBase& src_page_table = *this;
3990
3991 // Lightly validate the ranges before doing anything else.
3992 R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory);
3993 R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory);
3994
3995 // Copy the memory.
3996 {
3997 // Acquire the table locks.
3998 KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
3999
4000 // Check memory state.
4001 R_TRY(src_page_table.CheckMemoryStateContiguous(
4002 src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
4003 src_attr_mask | KMemoryAttribute::Uncached, src_attr));
4004 R_TRY(dst_page_table.CheckMemoryStateContiguous(
4005 dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
4006 dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
4007
4008 // Get implementations.
4009 auto& src_impl = src_page_table.GetImpl();
4010 auto& dst_impl = dst_page_table.GetImpl();
4011
4012 // Prepare for traversal.
4013 TraversalContext src_context;
4014 TraversalContext dst_context;
4015 TraversalEntry src_next_entry;
4016 TraversalEntry dst_next_entry;
4017 bool traverse_valid;
4018
4019 // Begin traversal.
4020 traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry),
4021 std::addressof(src_context), src_addr);
4022 ASSERT(traverse_valid);
4023 traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry),
4024 std::addressof(dst_context), dst_addr);
4025 ASSERT(traverse_valid);
4026
4027 // Prepare tracking variables.
4028 KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
4029 KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
4030 size_t cur_src_size = src_next_entry.block_size -
4031 (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
4032 size_t cur_dst_size = dst_next_entry.block_size -
4033 (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
4034
4035 // Adjust the initial block sizes.
4036 src_next_entry.block_size = cur_src_size;
4037 dst_next_entry.block_size = cur_dst_size;
4038
4039 // Before we get any crazier, succeed if there's nothing to do.
4040 R_SUCCEED_IF(size == 0);
4041
4042 // We're going to manage dual traversal via an offset against the total size.
4043 KPhysicalAddress cur_src_addr = cur_src_block_addr;
4044 KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
4045 size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
4046
4047 // Iterate.
4048 size_t ofs = 0;
4049 while (ofs < size) {
4050 // Determine how much we can copy this iteration.
4051 const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
4052
4053 // If we need to advance the traversals, do so.
4054 bool updated_src = false, updated_dst = false, skip_copy = false;
4055 if (ofs + cur_copy_size != size) {
4056 if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
4057 // Continue the src traversal.
4058 traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry),
4059 std::addressof(src_context));
4060 ASSERT(traverse_valid);
4061
4062 // Update source.
4063 updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr;
4064 }
4065
4066 if (cur_dst_addr + cur_min_size ==
4067 dst_next_entry.phys_addr + dst_next_entry.block_size) {
4068 // Continue the dst traversal.
4069 traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry),
4070 std::addressof(dst_context));
4071 ASSERT(traverse_valid);
4072
4073 // Update destination.
4074 updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr;
4075 }
4076
4077 // If we didn't update either of source/destination, skip the copy this iteration.
4078 if (!updated_src && !updated_dst) {
4079 skip_copy = true;
4080
4081 // Update the source block address.
4082 cur_src_block_addr = src_next_entry.phys_addr;
4083 }
4084 }
4085
4086 // Do the copy, unless we're skipping it.
4087 if (!skip_copy) {
4088 // We need both ends of the copy to be heap blocks.
4089 R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory);
4090 R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory);
4091
4092 // Copy the data.
4093 std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr),
4094 GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size);
4095
4096 // Update.
4097 cur_src_block_addr = src_next_entry.phys_addr;
4098 cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
4099 cur_dst_block_addr = dst_next_entry.phys_addr;
4100 cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
4101
4102 // Advance offset.
4103 ofs += cur_copy_size;
4104 }
4105
4106 // Update min size.
4107 cur_src_size = src_next_entry.block_size;
4108 cur_dst_size = dst_next_entry.block_size;
4109 cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size,
4110 cur_dst_block_addr - cur_dst_addr + cur_dst_size);
4111 }
4112 }
4113
4114 R_SUCCEED();
4115}
4116
4117Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination(
4118 KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
4119 KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
4120 KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
4121 KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
4122 KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
4123 // For convenience, alias this.
4124 KPageTableBase& src_page_table = *this;
4125
4126 // Lightly validate the ranges before doing anything else.
4127 R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory);
4128 R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory);
4129
4130 // Copy the memory.
4131 {
4132 // Acquire the table locks.
4133 KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
4134
4135 // Check memory state for source.
4136 R_TRY(src_page_table.CheckMemoryStateContiguous(
4137 src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
4138 src_attr_mask | KMemoryAttribute::Uncached, src_attr));
4139
4140 // Destination state is intentionally unchecked.
4141
4142 // Get implementations.
4143 auto& src_impl = src_page_table.GetImpl();
4144 auto& dst_impl = dst_page_table.GetImpl();
4145
4146 // Prepare for traversal.
4147 TraversalContext src_context;
4148 TraversalContext dst_context;
4149 TraversalEntry src_next_entry;
4150 TraversalEntry dst_next_entry;
4151 bool traverse_valid;
4152
4153 // Begin traversal.
4154 traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry),
4155 std::addressof(src_context), src_addr);
4156 ASSERT(traverse_valid);
4157 traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry),
4158 std::addressof(dst_context), dst_addr);
4159 ASSERT(traverse_valid);
4160
4161 // Prepare tracking variables.
4162 KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
4163 KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
4164 size_t cur_src_size = src_next_entry.block_size -
4165 (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
4166 size_t cur_dst_size = dst_next_entry.block_size -
4167 (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
4168
4169 // Adjust the initial block sizes.
4170 src_next_entry.block_size = cur_src_size;
4171 dst_next_entry.block_size = cur_dst_size;
4172
4173 // Before we get any crazier, succeed if there's nothing to do.
4174 R_SUCCEED_IF(size == 0);
4175
4176 // We're going to manage dual traversal via an offset against the total size.
4177 KPhysicalAddress cur_src_addr = cur_src_block_addr;
4178 KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
4179 size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
4180
4181 // Iterate.
4182 size_t ofs = 0;
4183 while (ofs < size) {
4184 // Determine how much we can copy this iteration.
4185 const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
4186
4187 // If we need to advance the traversals, do so.
4188 bool updated_src = false, updated_dst = false, skip_copy = false;
4189 if (ofs + cur_copy_size != size) {
4190 if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
4191 // Continue the src traversal.
4192 traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry),
4193 std::addressof(src_context));
4194 ASSERT(traverse_valid);
4195
4196 // Update source.
4197 updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr;
4198 }
4199
4200 if (cur_dst_addr + cur_min_size ==
4201 dst_next_entry.phys_addr + dst_next_entry.block_size) {
4202 // Continue the dst traversal.
4203 traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry),
4204 std::addressof(dst_context));
4205 ASSERT(traverse_valid);
4206
4207 // Update destination.
4208 updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr;
4209 }
4210
4211 // If we didn't update either of source/destination, skip the copy this iteration.
4212 if (!updated_src && !updated_dst) {
4213 skip_copy = true;
4214
4215 // Update the source block address.
4216 cur_src_block_addr = src_next_entry.phys_addr;
4217 }
4218 }
4219
4220 // Do the copy, unless we're skipping it.
4221 if (!skip_copy) {
4222 // We need both ends of the copy to be heap blocks.
4223 R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory);
4224 R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory);
4225
4226 // Copy the data.
4227 std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr),
4228 GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size);
4229
4230 // Update.
4231 cur_src_block_addr = src_next_entry.phys_addr;
4232 cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
4233 cur_dst_block_addr = dst_next_entry.phys_addr;
4234 cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
4235
4236 // Advance offset.
4237 ofs += cur_copy_size;
4238 }
4239
4240 // Update min size.
4241 cur_src_size = src_next_entry.block_size;
4242 cur_dst_size = dst_next_entry.block_size;
4243 cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size,
4244 cur_dst_block_addr - cur_dst_addr + cur_dst_size);
4245 }
4246 }
4247
4248 R_SUCCEED();
4249}
4250
4251Result KPageTableBase::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
4252 KProcessAddress address, size_t size,
4253 KMemoryPermission test_perm, KMemoryState dst_state) {
4254 // Validate pre-conditions.
4255 ASSERT(this->IsLockedByCurrentThread());
4256 ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
4257 test_perm == KMemoryPermission::UserRead);
4258
4259 // Check that the address is in range.
4260 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
4261
4262 // Get the source permission.
4263 const auto src_perm = static_cast<KMemoryPermission>(
4264 (test_perm == KMemoryPermission::UserReadWrite)
4265 ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
4266 : KMemoryPermission::UserRead);
4267
4268 // Get aligned extents.
4269 const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
4270 const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
4271 const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
4272 const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
4273
4274 const auto aligned_src_last = GetInteger(aligned_src_end) - 1;
4275 const auto mapping_src_last = GetInteger(mapping_src_end) - 1;
4276
4277 // Get the test state and attribute mask.
4278 KMemoryState test_state;
4279 KMemoryAttribute test_attr_mask;
4280 switch (dst_state) {
4281 case KMemoryState::Ipc:
4282 test_state = KMemoryState::FlagCanUseIpc;
4283 test_attr_mask =
4284 KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
4285 break;
4286 case KMemoryState::NonSecureIpc:
4287 test_state = KMemoryState::FlagCanUseNonSecureIpc;
4288 test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
4289 break;
4290 case KMemoryState::NonDeviceIpc:
4291 test_state = KMemoryState::FlagCanUseNonDeviceIpc;
4292 test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
4293 break;
4294 default:
4295 R_THROW(ResultInvalidCombination);
4296 }
4297
4298 // Ensure that on failure, we roll back appropriately.
4299 size_t mapped_size = 0;
4300 ON_RESULT_FAILURE {
4301 if (mapped_size > 0) {
4302 this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
4303 src_perm);
4304 }
4305 };
4306
4307 size_t blocks_needed = 0;
4308
4309 // Iterate, mapping as needed.
4310 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
4311 while (true) {
4312 const KMemoryInfo info = it->GetMemoryInfo();
4313
4314 // Validate the current block.
4315 R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
4316 test_attr_mask, KMemoryAttribute::None));
4317
4318 if (mapping_src_start < mapping_src_end &&
4319 GetInteger(mapping_src_start) < info.GetEndAddress() &&
4320 info.GetAddress() < GetInteger(mapping_src_end)) {
4321 const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
4322 ? info.GetAddress()
4323 : GetInteger(mapping_src_start);
4324 const auto cur_end = mapping_src_last >= info.GetLastAddress()
4325 ? info.GetEndAddress()
4326 : GetInteger(mapping_src_end);
4327 const size_t cur_size = cur_end - cur_start;
4328
4329 if (info.GetAddress() < GetInteger(mapping_src_start)) {
4330 ++blocks_needed;
4331 }
4332 if (mapping_src_last < info.GetLastAddress()) {
4333 ++blocks_needed;
4334 }
4335
4336 // Set the permissions on the block, if we need to.
4337 if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
4338 const DisableMergeAttribute head_body_attr =
4339 (GetInteger(mapping_src_start) >= info.GetAddress())
4340 ? DisableMergeAttribute::DisableHeadAndBody
4341 : DisableMergeAttribute::None;
4342 const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end))
4343 ? DisableMergeAttribute::DisableTail
4344 : DisableMergeAttribute::None;
4345 const KPageProperties properties = {
4346 src_perm, false, false,
4347 static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
4348 R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, 0, false, properties,
4349 OperationType::ChangePermissions, false));
4350 }
4351
4352 // Note that we mapped this part.
4353 mapped_size += cur_size;
4354 }
4355
4356 // If the block is at the end, we're done.
4357 if (aligned_src_last <= info.GetLastAddress()) {
4358 break;
4359 }
4360
4361 // Advance.
4362 ++it;
4363 ASSERT(it != m_memory_block_manager.end());
4364 }
4365
4366 if (out_blocks_needed != nullptr) {
4367 ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
4368 *out_blocks_needed = blocks_needed;
4369 }
4370
4371 R_SUCCEED();
4372}
4373
4374Result KPageTableBase::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
4375 KProcessAddress src_addr, KMemoryPermission test_perm,
4376 KMemoryState dst_state, KPageTableBase& src_page_table,
4377 bool send) {
4378 ASSERT(this->IsLockedByCurrentThread());
4379 ASSERT(src_page_table.IsLockedByCurrentThread());
4380
4381 // Check that we can theoretically map.
4382 const KProcessAddress region_start = m_alias_region_start;
4383 const size_t region_size = m_alias_region_end - m_alias_region_start;
4384 R_UNLESS(size < region_size, ResultOutOfAddressSpace);
4385
4386 // Get aligned source extents.
4387 const KProcessAddress src_start = src_addr;
4388 const KProcessAddress src_end = src_addr + size;
4389 const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
4390 const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
4391 const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
4392 const KProcessAddress mapping_src_end =
4393 Common::AlignDown(GetInteger(src_start) + size, PageSize);
4394 const size_t aligned_src_size = aligned_src_end - aligned_src_start;
4395 const size_t mapping_src_size =
4396 (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
4397
4398 // Select a random address to map at.
4399 KProcessAddress dst_addr = 0;
4400 {
4401 const size_t alignment = 4_KiB;
4402 const size_t offset = GetInteger(aligned_src_start) & (alignment - 1);
4403
4404 dst_addr =
4405 this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
4406 alignment, offset, this->GetNumGuardPages());
4407 R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
4408 }
4409
4410 // Check that we can perform the operation we're about to perform.
4411 ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
4412
4413 // Create an update allocator.
4414 Result allocator_result;
4415 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
4416 m_memory_block_slab_manager);
4417 R_TRY(allocator_result);
4418
4419 // We're going to perform an update, so create a helper.
4420 KScopedPageTableUpdater updater(this);
4421
4422 // Reserve space for any partial pages we allocate.
4423 const size_t unmapped_size = aligned_src_size - mapping_src_size;
4424 KScopedResourceReservation memory_reservation(
4425 m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, unmapped_size);
4426 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
4427
4428 // Ensure that we manage page references correctly.
4429 KPhysicalAddress start_partial_page = 0;
4430 KPhysicalAddress end_partial_page = 0;
4431 KProcessAddress cur_mapped_addr = dst_addr;
4432
4433 // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
4434 // free on scope exit.
4435 SCOPE_EXIT({
4436 if (start_partial_page != 0) {
4437 m_kernel.MemoryManager().Close(start_partial_page, 1);
4438 }
4439 if (end_partial_page != 0) {
4440 m_kernel.MemoryManager().Close(end_partial_page, 1);
4441 }
4442 });
4443
4444 ON_RESULT_FAILURE {
4445 if (cur_mapped_addr != dst_addr) {
4446 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
4447 DisableMergeAttribute::None};
4448 R_ASSERT(this->Operate(updater.GetPageList(), dst_addr,
4449 (cur_mapped_addr - dst_addr) / PageSize, 0, false,
4450 unmap_properties, OperationType::Unmap, true));
4451 }
4452 };
4453
4454 // Allocate the start page as needed.
4455 if (aligned_src_start < mapping_src_start) {
4456 start_partial_page =
4457 m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
4458 R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
4459 }
4460
4461 // Allocate the end page as needed.
4462 if (mapping_src_end < aligned_src_end &&
4463 (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
4464 end_partial_page =
4465 m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
4466 R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
4467 }
4468
4469 // Get the implementation.
4470 auto& src_impl = src_page_table.GetImpl();
4471
4472 // Get the fill value for partial pages.
4473 const auto fill_val = m_ipc_fill_value;
4474
4475 // Begin traversal.
4476 TraversalContext context;
4477 TraversalEntry next_entry;
4478 bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry),
4479 std::addressof(context), aligned_src_start);
4480 ASSERT(traverse_valid);
4481
4482 // Prepare tracking variables.
4483 KPhysicalAddress cur_block_addr = next_entry.phys_addr;
4484 size_t cur_block_size =
4485 next_entry.block_size - (GetInteger(cur_block_addr) & (next_entry.block_size - 1));
4486 size_t tot_block_size = cur_block_size;
4487
4488 // Map the start page, if we have one.
4489 if (start_partial_page != 0) {
4490 // Ensure the page holds correct data.
4491 u8* const start_partial_virt = GetHeapVirtualPointer(m_kernel, start_partial_page);
4492 if (send) {
4493 const size_t partial_offset = src_start - aligned_src_start;
4494 size_t copy_size, clear_size;
4495 if (src_end < mapping_src_start) {
4496 copy_size = size;
4497 clear_size = mapping_src_start - src_end;
4498 } else {
4499 copy_size = mapping_src_start - src_start;
4500 clear_size = 0;
4501 }
4502
4503 std::memset(start_partial_virt, fill_val, partial_offset);
4504 std::memcpy(start_partial_virt + partial_offset,
4505 GetHeapVirtualPointer(m_kernel, cur_block_addr) + partial_offset,
4506 copy_size);
4507 if (clear_size > 0) {
4508 std::memset(start_partial_virt + partial_offset + copy_size, fill_val, clear_size);
4509 }
4510 } else {
4511 std::memset(start_partial_virt, fill_val, PageSize);
4512 }
4513
4514 // Map the page.
4515 const KPageProperties start_map_properties = {test_perm, false, false,
4516 DisableMergeAttribute::DisableHead};
4517 R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, start_partial_page, true,
4518 start_map_properties, OperationType::Map, false));
4519
4520 // Update tracking extents.
4521 cur_mapped_addr += PageSize;
4522 cur_block_addr += PageSize;
4523 cur_block_size -= PageSize;
4524
4525 // If the block's size was one page, we may need to continue traversal.
4526 if (cur_block_size == 0 && aligned_src_size > PageSize) {
4527 traverse_valid =
4528 src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
4529 ASSERT(traverse_valid);
4530
4531 cur_block_addr = next_entry.phys_addr;
4532 cur_block_size = next_entry.block_size;
4533 tot_block_size += next_entry.block_size;
4534 }
4535 }
4536
4537 // Map the remaining pages.
4538 while (aligned_src_start + tot_block_size < mapping_src_end) {
4539 // Continue the traversal.
4540 traverse_valid =
4541 src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
4542 ASSERT(traverse_valid);
4543
4544 // Process the block.
4545 if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
4546 // Map the block we've been processing so far.
4547 const KPageProperties map_properties = {test_perm, false, false,
4548 (cur_mapped_addr == dst_addr)
4549 ? DisableMergeAttribute::DisableHead
4550 : DisableMergeAttribute::None};
4551 R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, cur_block_size / PageSize,
4552 cur_block_addr, true, map_properties, OperationType::Map, false));
4553
4554 // Update tracking extents.
4555 cur_mapped_addr += cur_block_size;
4556 cur_block_addr = next_entry.phys_addr;
4557 cur_block_size = next_entry.block_size;
4558 } else {
4559 cur_block_size += next_entry.block_size;
4560 }
4561 tot_block_size += next_entry.block_size;
4562 }
4563
4564 // Handle the last direct-mapped page.
4565 if (const KProcessAddress mapped_block_end =
4566 aligned_src_start + tot_block_size - cur_block_size;
4567 mapped_block_end < mapping_src_end) {
4568 const size_t last_block_size = mapping_src_end - mapped_block_end;
4569
4570 // Map the last block.
4571 const KPageProperties map_properties = {test_perm, false, false,
4572 (cur_mapped_addr == dst_addr)
4573 ? DisableMergeAttribute::DisableHead
4574 : DisableMergeAttribute::None};
4575 R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, last_block_size / PageSize,
4576 cur_block_addr, true, map_properties, OperationType::Map, false));
4577
4578 // Update tracking extents.
4579 cur_mapped_addr += last_block_size;
4580 cur_block_addr += last_block_size;
4581 if (mapped_block_end + cur_block_size < aligned_src_end &&
4582 cur_block_size == last_block_size) {
4583 traverse_valid =
4584 src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
4585 ASSERT(traverse_valid);
4586
4587 cur_block_addr = next_entry.phys_addr;
4588 }
4589 }
4590
4591 // Map the end page, if we have one.
4592 if (end_partial_page != 0) {
4593 // Ensure the page holds correct data.
4594 u8* const end_partial_virt = GetHeapVirtualPointer(m_kernel, end_partial_page);
4595 if (send) {
4596 const size_t copy_size = src_end - mapping_src_end;
4597 std::memcpy(end_partial_virt, GetHeapVirtualPointer(m_kernel, cur_block_addr),
4598 copy_size);
4599 std::memset(end_partial_virt + copy_size, fill_val, PageSize - copy_size);
4600 } else {
4601 std::memset(end_partial_virt, fill_val, PageSize);
4602 }
4603
4604 // Map the page.
4605 const KPageProperties map_properties = {test_perm, false, false,
4606 (cur_mapped_addr == dst_addr)
4607 ? DisableMergeAttribute::DisableHead
4608 : DisableMergeAttribute::None};
4609 R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, end_partial_page, true,
4610 map_properties, OperationType::Map, false));
4611 }
4612
4613 // Update memory blocks to reflect our changes
4614 m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
4615 dst_state, test_perm, KMemoryAttribute::None,
4616 KMemoryBlockDisableMergeAttribute::Normal,
4617 KMemoryBlockDisableMergeAttribute::None);
4618
4619 // Set the output address.
4620 *out_addr = dst_addr + (src_start - aligned_src_start);
4621
4622 // We succeeded.
4623 memory_reservation.Commit();
4624 R_SUCCEED();
4625}
4626
4627Result KPageTableBase::SetupForIpc(KProcessAddress* out_dst_addr, size_t size,
4628 KProcessAddress src_addr, KPageTableBase& src_page_table,
4629 KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
4630 // For convenience, alias this.
4631 KPageTableBase& dst_page_table = *this;
4632
4633 // Acquire the table locks.
4634 KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
4635
4636 // We're going to perform an update, so create a helper.
4637 KScopedPageTableUpdater updater(std::addressof(src_page_table));
4638
4639 // Perform client setup.
4640 size_t num_allocator_blocks;
4641 R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
4642 std::addressof(num_allocator_blocks), src_addr, size,
4643 test_perm, dst_state));
4644
4645 // Create an update allocator.
4646 Result allocator_result;
4647 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
4648 src_page_table.m_memory_block_slab_manager,
4649 num_allocator_blocks);
4650 R_TRY(allocator_result);
4651
4652 // Get the mapped extents.
4653 const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
4654 const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
4655 const size_t src_map_size = src_map_end - src_map_start;
4656
4657 // Ensure that we clean up appropriately if we fail after this.
4658 const auto src_perm = static_cast<KMemoryPermission>(
4659 (test_perm == KMemoryPermission::UserReadWrite)
4660 ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
4661 : KMemoryPermission::UserRead);
4662 ON_RESULT_FAILURE {
4663 if (src_map_end > src_map_start) {
4664 src_page_table.CleanupForIpcClientOnServerSetupFailure(
4665 updater.GetPageList(), src_map_start, src_map_size, src_perm);
4666 }
4667 };
4668
4669 // Perform server setup.
4670 R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
4671 src_page_table, send));
4672
4673 // If anything was mapped, ipc-lock the pages.
4674 if (src_map_start < src_map_end) {
4675 // Get the source permission.
4676 src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
4677 (src_map_end - src_map_start) / PageSize,
4678 &KMemoryBlock::LockForIpc, src_perm);
4679 }
4680
4681 R_SUCCEED();
4682}
4683
4684Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size,
4685 KMemoryState dst_state) {
4686 // Validate the address.
4687 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
4688
4689 // Lock the table.
4690 KScopedLightLock lk(m_general_lock);
4691
4692 // Validate the memory state.
4693 size_t num_allocator_blocks;
4694 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
4695 KMemoryState::All, dst_state, KMemoryPermission::UserRead,
4696 KMemoryPermission::UserRead, KMemoryAttribute::All,
4697 KMemoryAttribute::None));
4698
4699 // Create an update allocator.
4700 Result allocator_result;
4701 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
4702 m_memory_block_slab_manager, num_allocator_blocks);
4703 R_TRY(allocator_result);
4704
4705 // We're going to perform an update, so create a helper.
4706 KScopedPageTableUpdater updater(this);
4707
4708 // Get aligned extents.
4709 const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
4710 const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
4711 const size_t aligned_size = aligned_end - aligned_start;
4712 const size_t aligned_num_pages = aligned_size / PageSize;
4713
4714 // Unmap the pages.
4715 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
4716 DisableMergeAttribute::None};
4717 R_TRY(this->Operate(updater.GetPageList(), aligned_start, aligned_num_pages, 0, false,
4718 unmap_properties, OperationType::Unmap, false));
4719
4720 // Update memory blocks.
4721 m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
4722 KMemoryState::None, KMemoryPermission::None,
4723 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
4724 KMemoryBlockDisableMergeAttribute::Normal);
4725
4726 // Release from the resource limit as relevant.
4727 const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
4728 const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
4729 const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
4730 m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
4731 aligned_size - mapping_size);
4732
4733 R_SUCCEED();
4734}
4735
4736Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size,
4737 KMemoryState dst_state) {
4738 // Validate the address.
4739 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
4740
4741 // Get aligned source extents.
4742 const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
4743 const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
4744 const KProcessAddress mapping_last = mapping_end - 1;
4745 const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
4746
4747 // If nothing was mapped, we're actually done immediately.
4748 R_SUCCEED_IF(mapping_size == 0);
4749
4750 // Get the test state and attribute mask.
4751 KMemoryState test_state;
4752 KMemoryAttribute test_attr_mask;
4753 switch (dst_state) {
4754 case KMemoryState::Ipc:
4755 test_state = KMemoryState::FlagCanUseIpc;
4756 test_attr_mask =
4757 KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
4758 break;
4759 case KMemoryState::NonSecureIpc:
4760 test_state = KMemoryState::FlagCanUseNonSecureIpc;
4761 test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
4762 break;
4763 case KMemoryState::NonDeviceIpc:
4764 test_state = KMemoryState::FlagCanUseNonDeviceIpc;
4765 test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
4766 break;
4767 default:
4768 R_THROW(ResultInvalidCombination);
4769 }
4770
4771 // Lock the table.
4772 // NOTE: Nintendo does this *after* creating the updater below, but this does not follow
4773 // convention elsewhere in KPageTableBase.
4774 KScopedLightLock lk(m_general_lock);
4775
4776 // We're going to perform an update, so create a helper.
4777 KScopedPageTableUpdater updater(this);
4778
4779 // Ensure that on failure, we roll back appropriately.
4780 size_t mapped_size = 0;
4781 ON_RESULT_FAILURE {
4782 if (mapped_size > 0) {
4783 // Determine where the mapping ends.
4784 const auto mapped_end = GetInteger(mapping_start) + mapped_size;
4785 const auto mapped_last = mapped_end - 1;
4786
4787 // Get current and next iterators.
4788 KMemoryBlockManager::const_iterator start_it =
4789 m_memory_block_manager.FindIterator(mapping_start);
4790 KMemoryBlockManager::const_iterator next_it = start_it;
4791 ++next_it;
4792
4793 // Get the current block info.
4794 KMemoryInfo cur_info = start_it->GetMemoryInfo();
4795
4796 // Create tracking variables.
4797 KProcessAddress cur_address = cur_info.GetAddress();
4798 size_t cur_size = cur_info.GetSize();
4799 bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
4800 bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
4801 bool first = cur_info.GetIpcDisableMergeCount() == 1 &&
4802 False(cur_info.GetDisableMergeAttribute() &
4803 KMemoryBlockDisableMergeAttribute::Locked);
4804
4805 while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) {
4806 // Check that we have a next block.
4807 ASSERT(next_it != m_memory_block_manager.end());
4808
4809 // Get the next info.
4810 const KMemoryInfo next_info = next_it->GetMemoryInfo();
4811
4812 // Check if we can consolidate the next block's permission set with the current one.
4813 const bool next_perm_eq =
4814 next_info.GetPermission() == next_info.GetOriginalPermission();
4815 const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
4816 if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
4817 cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
4818 // We can consolidate the reprotection for the current and next block into a
4819 // single call.
4820 cur_size += next_info.GetSize();
4821 } else {
4822 // We have to operate on the current block.
4823 if ((cur_needs_set_perm || first) && !cur_perm_eq) {
4824 const KPageProperties properties = {
4825 cur_info.GetPermission(), false, false,
4826 first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail
4827 : DisableMergeAttribute::None};
4828 R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
4829 cur_size / PageSize, 0, false, properties,
4830 OperationType::ChangePermissions, true));
4831 }
4832
4833 // Advance.
4834 cur_address = next_info.GetAddress();
4835 cur_size = next_info.GetSize();
4836 first = false;
4837 }
4838
4839 // Advance.
4840 cur_info = next_info;
4841 cur_perm_eq = next_perm_eq;
4842 cur_needs_set_perm = next_needs_set_perm;
4843 ++next_it;
4844 }
4845
4846 // Process the last block.
4847 if ((first || cur_needs_set_perm) && !cur_perm_eq) {
4848 const KPageProperties properties = {
4849 cur_info.GetPermission(), false, false,
4850 first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail
4851 : DisableMergeAttribute::None};
4852 R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0,
4853 false, properties, OperationType::ChangePermissions, true));
4854 }
4855 }
4856 };
4857
4858 // Iterate, reprotecting as needed.
4859 {
4860 // Get current and next iterators.
4861 KMemoryBlockManager::const_iterator start_it =
4862 m_memory_block_manager.FindIterator(mapping_start);
4863 KMemoryBlockManager::const_iterator next_it = start_it;
4864 ++next_it;
4865
4866 // Validate the current block.
4867 KMemoryInfo cur_info = start_it->GetMemoryInfo();
4868 R_ASSERT(this->CheckMemoryState(
4869 cur_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None,
4870 test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked));
4871
4872 // Create tracking variables.
4873 KProcessAddress cur_address = cur_info.GetAddress();
4874 size_t cur_size = cur_info.GetSize();
4875 bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
4876 bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
4877 bool first =
4878 cur_info.GetIpcDisableMergeCount() == 1 &&
4879 False(cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked);
4880
4881 while ((cur_address + cur_size - 1) < mapping_last) {
4882 // Check that we have a next block.
4883 ASSERT(next_it != m_memory_block_manager.end());
4884
4885 // Get the next info.
4886 const KMemoryInfo next_info = next_it->GetMemoryInfo();
4887
4888 // Validate the next block.
4889 R_ASSERT(this->CheckMemoryState(
4890 next_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None,
4891 test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked));
4892
4893 // Check if we can consolidate the next block's permission set with the current one.
4894 const bool next_perm_eq =
4895 next_info.GetPermission() == next_info.GetOriginalPermission();
4896 const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
4897 if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
4898 cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
4899 // We can consolidate the reprotection for the current and next block into a single
4900 // call.
4901 cur_size += next_info.GetSize();
4902 } else {
4903 // We have to operate on the current block.
4904 if ((cur_needs_set_perm || first) && !cur_perm_eq) {
4905 const KPageProperties properties = {
4906 cur_needs_set_perm ? cur_info.GetOriginalPermission()
4907 : cur_info.GetPermission(),
4908 false, false,
4909 first ? DisableMergeAttribute::EnableHeadAndBody
4910 : DisableMergeAttribute::None};
4911 R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0,
4912 false, properties, OperationType::ChangePermissions,
4913 false));
4914 }
4915
4916 // Mark that we mapped the block.
4917 mapped_size += cur_size;
4918
4919 // Advance.
4920 cur_address = next_info.GetAddress();
4921 cur_size = next_info.GetSize();
4922 first = false;
4923 }
4924
4925 // Advance.
4926 cur_info = next_info;
4927 cur_perm_eq = next_perm_eq;
4928 cur_needs_set_perm = next_needs_set_perm;
4929 ++next_it;
4930 }
4931
4932 // Process the last block.
4933 const auto lock_count =
4934 cur_info.GetIpcLockCount() +
4935 (next_it != m_memory_block_manager.end()
4936 ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
4937 : 0);
4938 if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
4939 const DisableMergeAttribute head_body_attr =
4940 first ? DisableMergeAttribute::EnableHeadAndBody : DisableMergeAttribute::None;
4941 const DisableMergeAttribute tail_attr =
4942 lock_count == 1 ? DisableMergeAttribute::EnableTail : DisableMergeAttribute::None;
4943 const KPageProperties properties = {
4944 cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(),
4945 false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
4946 R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, false,
4947 properties, OperationType::ChangePermissions, false));
4948 }
4949 }
4950
4951 // Create an update allocator.
4952 // NOTE: Guaranteed zero blocks needed here.
4953 Result allocator_result;
4954 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
4955 m_memory_block_slab_manager, 0);
4956 R_TRY(allocator_result);
4957
4958 // Unlock the pages.
4959 m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
4960 mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
4961 KMemoryPermission::None);
4962
4963 R_SUCCEED();
4964}
4965
4966void KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list,
4967 KProcessAddress address, size_t size,
4968 KMemoryPermission prot_perm) {
4969 ASSERT(this->IsLockedByCurrentThread());
4970 ASSERT(Common::IsAligned(GetInteger(address), PageSize));
4971 ASSERT(Common::IsAligned(size, PageSize));
4972
4973 // Get the mapped extents.
4974 const KProcessAddress src_map_start = address;
4975 const KProcessAddress src_map_end = address + size;
4976 const KProcessAddress src_map_last = src_map_end - 1;
4977
4978 // This function is only invoked when there's something to do.
4979 ASSERT(src_map_end > src_map_start);
4980
4981 // Iterate over blocks, fixing permissions.
4982 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
4983 while (true) {
4984 const KMemoryInfo info = it->GetMemoryInfo();
4985
4986 const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
4987 ? info.GetAddress()
4988 : GetInteger(src_map_start);
4989 const auto cur_end =
4990 src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
4991
4992 // If we can, fix the protections on the block.
4993 if ((info.GetIpcLockCount() == 0 &&
4994 (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
4995 (info.GetIpcLockCount() != 0 &&
4996 (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
4997 // Check if we actually need to fix the protections on the block.
4998 if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
4999 (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
5000 const bool start_nc = (info.GetAddress() == GetInteger(src_map_start))
5001 ? (False(info.GetDisableMergeAttribute() &
5002 (KMemoryBlockDisableMergeAttribute::Locked |
5003 KMemoryBlockDisableMergeAttribute::IpcLeft)))
5004 : info.GetAddress() <= GetInteger(src_map_start);
5005
5006 const DisableMergeAttribute head_body_attr =
5007 start_nc ? DisableMergeAttribute::EnableHeadAndBody
5008 : DisableMergeAttribute::None;
5009 DisableMergeAttribute tail_attr;
5010 if (cur_end == src_map_end && info.GetEndAddress() == src_map_end) {
5011 auto next_it = it;
5012 ++next_it;
5013
5014 const auto lock_count =
5015 info.GetIpcLockCount() +
5016 (next_it != m_memory_block_manager.end()
5017 ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
5018 : 0);
5019 tail_attr = lock_count == 0 ? DisableMergeAttribute::EnableTail
5020 : DisableMergeAttribute::None;
5021 } else {
5022 tail_attr = DisableMergeAttribute::None;
5023 }
5024
5025 const KPageProperties properties = {
5026 info.GetPermission(), false, false,
5027 static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
5028 R_ASSERT(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, 0,
5029 false, properties, OperationType::ChangePermissions, true));
5030 }
5031 }
5032
5033 // If we're past the end of the region, we're done.
5034 if (src_map_last <= info.GetLastAddress()) {
5035 break;
5036 }
5037
5038 // Advance.
5039 ++it;
5040 ASSERT(it != m_memory_block_manager.end());
5041 }
5042}
5043
5044Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
5045 // Lock the physical memory lock.
5046 KScopedLightLock phys_lk(m_map_physical_memory_lock);
5047
5048 // Calculate the last address for convenience.
5049 const KProcessAddress last_address = address + size - 1;
5050
5051 // Define iteration variables.
5052 KProcessAddress cur_address;
5053 size_t mapped_size;
5054
5055 // The entire mapping process can be retried.
5056 while (true) {
5057 // Check if the memory is already mapped.
5058 {
5059 // Lock the table.
5060 KScopedLightLock lk(m_general_lock);
5061
5062 // Iterate over the memory.
5063 cur_address = address;
5064 mapped_size = 0;
5065
5066 auto it = m_memory_block_manager.FindIterator(cur_address);
5067 while (true) {
5068 // Check that the iterator is valid.
5069 ASSERT(it != m_memory_block_manager.end());
5070
5071 // Get the memory info.
5072 const KMemoryInfo info = it->GetMemoryInfo();
5073
5074 // Check if we're done.
5075 if (last_address <= info.GetLastAddress()) {
5076 if (info.GetState() != KMemoryState::Free) {
5077 mapped_size += (last_address + 1 - cur_address);
5078 }
5079 break;
5080 }
5081
5082 // Track the memory if it's mapped.
5083 if (info.GetState() != KMemoryState::Free) {
5084 mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
5085 }
5086
5087 // Advance.
5088 cur_address = info.GetEndAddress();
5089 ++it;
5090 }
5091
5092 // If the size mapped is the size requested, we've nothing to do.
5093 R_SUCCEED_IF(size == mapped_size);
5094 }
5095
5096 // Allocate and map the memory.
5097 {
5098 // Reserve the memory from the process resource limit.
5099 KScopedResourceReservation memory_reservation(
5100 m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, size - mapped_size);
5101 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
5102
5103 // Allocate pages for the new memory.
5104 KPageGroup pg(m_kernel, m_block_info_manager);
5105 R_TRY(m_kernel.MemoryManager().AllocateForProcess(
5106 std::addressof(pg), (size - mapped_size) / PageSize, m_allocate_option,
5107 GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value));
5108
5109 // If we fail in the next bit (or retry), we need to cleanup the pages.
5110 auto pg_guard = SCOPE_GUARD({
5111 pg.OpenFirst();
5112 pg.Close();
5113 });
5114
5115 // Map the memory.
5116 {
5117 // Lock the table.
5118 KScopedLightLock lk(m_general_lock);
5119
5120 size_t num_allocator_blocks = 0;
5121
5122 // Verify that nobody has mapped memory since we first checked.
5123 {
5124 // Iterate over the memory.
5125 size_t checked_mapped_size = 0;
5126 cur_address = address;
5127
5128 auto it = m_memory_block_manager.FindIterator(cur_address);
5129 while (true) {
5130 // Check that the iterator is valid.
5131 ASSERT(it != m_memory_block_manager.end());
5132
5133 // Get the memory info.
5134 const KMemoryInfo info = it->GetMemoryInfo();
5135
5136 const bool is_free = info.GetState() == KMemoryState::Free;
5137 if (is_free) {
5138 if (info.GetAddress() < GetInteger(address)) {
5139 ++num_allocator_blocks;
5140 }
5141 if (last_address < info.GetLastAddress()) {
5142 ++num_allocator_blocks;
5143 }
5144 }
5145
5146 // Check if we're done.
5147 if (last_address <= info.GetLastAddress()) {
5148 if (!is_free) {
5149 checked_mapped_size += (last_address + 1 - cur_address);
5150 }
5151 break;
5152 }
5153
5154 // Track the memory if it's mapped.
5155 if (!is_free) {
5156 checked_mapped_size +=
5157 KProcessAddress(info.GetEndAddress()) - cur_address;
5158 }
5159
5160 // Advance.
5161 cur_address = info.GetEndAddress();
5162 ++it;
5163 }
5164
5165 // If the size now isn't what it was before, somebody mapped or unmapped
5166 // concurrently. If this happened, retry.
5167 if (mapped_size != checked_mapped_size) {
5168 continue;
5169 }
5170 }
5171
5172 // Create an update allocator.
5173 ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
5174 Result allocator_result;
5175 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
5176 m_memory_block_slab_manager,
5177 num_allocator_blocks);
5178 R_TRY(allocator_result);
5179
5180 // We're going to perform an update, so create a helper.
5181 KScopedPageTableUpdater updater(this);
5182
5183 // Prepare to iterate over the memory.
5184 auto pg_it = pg.begin();
5185 KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
5186 size_t pg_pages = pg_it->GetNumPages();
5187
5188 // Reset the current tracking address, and make sure we clean up on failure.
5189 pg_guard.Cancel();
5190 cur_address = address;
5191 ON_RESULT_FAILURE {
5192 if (cur_address > address) {
5193 const KProcessAddress last_unmap_address = cur_address - 1;
5194
5195 // Iterate, unmapping the pages.
5196 cur_address = address;
5197
5198 auto it = m_memory_block_manager.FindIterator(cur_address);
5199 while (true) {
5200 // Check that the iterator is valid.
5201 ASSERT(it != m_memory_block_manager.end());
5202
5203 // Get the memory info.
5204 const KMemoryInfo info = it->GetMemoryInfo();
5205
5206 // If the memory state is free, we mapped it and need to unmap it.
5207 if (info.GetState() == KMemoryState::Free) {
5208 // Determine the range to unmap.
5209 const KPageProperties unmap_properties = {
5210 KMemoryPermission::None, false, false,
5211 DisableMergeAttribute::None};
5212 const size_t cur_pages =
5213 std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
5214 last_unmap_address + 1 - cur_address) /
5215 PageSize;
5216
5217 // Unmap.
5218 R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
5219 cur_pages, 0, false, unmap_properties,
5220 OperationType::Unmap, true));
5221 }
5222
5223 // Check if we're done.
5224 if (last_unmap_address <= info.GetLastAddress()) {
5225 break;
5226 }
5227
5228 // Advance.
5229 cur_address = info.GetEndAddress();
5230 ++it;
5231 }
5232 }
5233
5234 // Release any remaining unmapped memory.
5235 m_kernel.MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
5236 m_kernel.MemoryManager().Close(pg_phys_addr, pg_pages);
5237 for (++pg_it; pg_it != pg.end(); ++pg_it) {
5238 m_kernel.MemoryManager().OpenFirst(pg_it->GetAddress(),
5239 pg_it->GetNumPages());
5240 m_kernel.MemoryManager().Close(pg_it->GetAddress(), pg_it->GetNumPages());
5241 }
5242 };
5243
5244 auto it = m_memory_block_manager.FindIterator(cur_address);
5245 while (true) {
5246 // Check that the iterator is valid.
5247 ASSERT(it != m_memory_block_manager.end());
5248
5249 // Get the memory info.
5250 const KMemoryInfo info = it->GetMemoryInfo();
5251
5252 // If it's unmapped, we need to map it.
5253 if (info.GetState() == KMemoryState::Free) {
5254 // Determine the range to map.
5255 const KPageProperties map_properties = {
5256 KMemoryPermission::UserReadWrite, false, false,
5257 cur_address == this->GetAliasRegionStart()
5258 ? DisableMergeAttribute::DisableHead
5259 : DisableMergeAttribute::None};
5260 size_t map_pages =
5261 std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
5262 last_address + 1 - cur_address) /
5263 PageSize;
5264
5265 // While we have pages to map, map them.
5266 {
5267 // Create a page group for the current mapping range.
5268 KPageGroup cur_pg(m_kernel, m_block_info_manager);
5269 {
5270 ON_RESULT_FAILURE_2 {
5271 cur_pg.OpenFirst();
5272 cur_pg.Close();
5273 };
5274
5275 size_t remain_pages = map_pages;
5276 while (remain_pages > 0) {
5277 // Check if we're at the end of the physical block.
5278 if (pg_pages == 0) {
5279 // Ensure there are more pages to map.
5280 ASSERT(pg_it != pg.end());
5281
5282 // Advance our physical block.
5283 ++pg_it;
5284 pg_phys_addr = pg_it->GetAddress();
5285 pg_pages = pg_it->GetNumPages();
5286 }
5287
5288 // Add whatever we can to the current block.
5289 const size_t cur_pages = std::min(pg_pages, remain_pages);
5290 R_TRY(cur_pg.AddBlock(pg_phys_addr +
5291 ((pg_pages - cur_pages) * PageSize),
5292 cur_pages));
5293
5294 // Advance.
5295 remain_pages -= cur_pages;
5296 pg_pages -= cur_pages;
5297 }
5298 }
5299
5300 // Map the papges.
5301 R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages,
5302 cur_pg, map_properties,
5303 OperationType::MapFirstGroup, false));
5304 }
5305 }
5306
5307 // Check if we're done.
5308 if (last_address <= info.GetLastAddress()) {
5309 break;
5310 }
5311
5312 // Advance.
5313 cur_address = info.GetEndAddress();
5314 ++it;
5315 }
5316
5317 // We succeeded, so commit the memory reservation.
5318 memory_reservation.Commit();
5319
5320 // Increase our tracked mapped size.
5321 m_mapped_physical_memory_size += (size - mapped_size);
5322
5323 // Update the relevant memory blocks.
5324 m_memory_block_manager.UpdateIfMatch(
5325 std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
5326 KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
5327 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
5328 address == this->GetAliasRegionStart()
5329 ? KMemoryBlockDisableMergeAttribute::Normal
5330 : KMemoryBlockDisableMergeAttribute::None,
5331 KMemoryBlockDisableMergeAttribute::None);
5332
5333 R_SUCCEED();
5334 }
5335 }
5336 }
5337}
5338
5339Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
5340 // Lock the physical memory lock.
5341 KScopedLightLock phys_lk(m_map_physical_memory_lock);
5342
5343 // Lock the table.
5344 KScopedLightLock lk(m_general_lock);
5345
5346 // Calculate the last address for convenience.
5347 const KProcessAddress last_address = address + size - 1;
5348
5349 // Define iteration variables.
5350 KProcessAddress map_start_address = 0;
5351 KProcessAddress map_last_address = 0;
5352
5353 KProcessAddress cur_address;
5354 size_t mapped_size;
5355 size_t num_allocator_blocks = 0;
5356
5357 // Check if the memory is mapped.
5358 {
5359 // Iterate over the memory.
5360 cur_address = address;
5361 mapped_size = 0;
5362
5363 auto it = m_memory_block_manager.FindIterator(cur_address);
5364 while (true) {
5365 // Check that the iterator is valid.
5366 ASSERT(it != m_memory_block_manager.end());
5367
5368 // Get the memory info.
5369 const KMemoryInfo info = it->GetMemoryInfo();
5370
5371 // Verify the memory's state.
5372 const bool is_normal = info.GetState() == KMemoryState::Normal &&
5373 info.GetAttribute() == KMemoryAttribute::None;
5374 const bool is_free = info.GetState() == KMemoryState::Free;
5375 R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
5376
5377 if (is_normal) {
5378 R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
5379
5380 if (map_start_address == 0) {
5381 map_start_address = cur_address;
5382 }
5383 map_last_address =
5384 (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
5385
5386 if (info.GetAddress() < GetInteger(address)) {
5387 ++num_allocator_blocks;
5388 }
5389 if (last_address < info.GetLastAddress()) {
5390 ++num_allocator_blocks;
5391 }
5392
5393 mapped_size += (map_last_address + 1 - cur_address);
5394 }
5395
5396 // Check if we're done.
5397 if (last_address <= info.GetLastAddress()) {
5398 break;
5399 }
5400
5401 // Advance.
5402 cur_address = info.GetEndAddress();
5403 ++it;
5404 }
5405
5406 // If there's nothing mapped, we've nothing to do.
5407 R_SUCCEED_IF(mapped_size == 0);
5408 }
5409
5410 // Create an update allocator.
5411 ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
5412 Result allocator_result;
5413 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
5414 m_memory_block_slab_manager, num_allocator_blocks);
5415 R_TRY(allocator_result);
5416
5417 // We're going to perform an update, so create a helper.
5418 KScopedPageTableUpdater updater(this);
5419
5420 // Separate the mapping.
5421 const KPageProperties sep_properties = {KMemoryPermission::None, false, false,
5422 DisableMergeAttribute::None};
5423 R_TRY(this->Operate(updater.GetPageList(), map_start_address,
5424 (map_last_address + 1 - map_start_address) / PageSize, 0, false,
5425 sep_properties, OperationType::Separate, false));
5426
5427 // Reset the current tracking address, and make sure we clean up on failure.
5428 cur_address = address;
5429
5430 // Iterate over the memory, unmapping as we go.
5431 auto it = m_memory_block_manager.FindIterator(cur_address);
5432
5433 const auto clear_merge_attr =
5434 (it->GetState() == KMemoryState::Normal &&
5435 it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
5436 ? KMemoryBlockDisableMergeAttribute::Normal
5437 : KMemoryBlockDisableMergeAttribute::None;
5438
5439 while (true) {
5440 // Check that the iterator is valid.
5441 ASSERT(it != m_memory_block_manager.end());
5442
5443 // Get the memory info.
5444 const KMemoryInfo info = it->GetMemoryInfo();
5445
5446 // If the memory state is normal, we need to unmap it.
5447 if (info.GetState() == KMemoryState::Normal) {
5448 // Determine the range to unmap.
5449 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
5450 DisableMergeAttribute::None};
5451 const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
5452 last_address + 1 - cur_address) /
5453 PageSize;
5454
5455 // Unmap.
5456 R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false,
5457 unmap_properties, OperationType::Unmap, false));
5458 }
5459
5460 // Check if we're done.
5461 if (last_address <= info.GetLastAddress()) {
5462 break;
5463 }
5464
5465 // Advance.
5466 cur_address = info.GetEndAddress();
5467 ++it;
5468 }
5469
5470 // Release the memory resource.
5471 m_mapped_physical_memory_size -= mapped_size;
5472 m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, mapped_size);
5473
5474 // Update memory blocks.
5475 m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
5476 KMemoryState::Free, KMemoryPermission::None,
5477 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
5478 clear_merge_attr);
5479
5480 // We succeeded.
5481 R_SUCCEED();
5482}
5483
5484Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
5485 UNIMPLEMENTED();
5486 R_THROW(ResultNotImplemented);
5487}
5488
5489Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
5490 UNIMPLEMENTED();
5491 R_THROW(ResultNotImplemented);
5492}
5493
5494Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size,
5495 KPageTableBase& src_page_table,
5496 KProcessAddress src_address) {
5497 // We need to lock both this table, and the current process's table, so set up an alias.
5498 KPageTableBase& dst_page_table = *this;
5499
5500 // Acquire the table locks.
5501 KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
5502
5503 // Check that the memory is mapped in the destination process.
5504 size_t num_allocator_blocks;
5505 R_TRY(dst_page_table.CheckMemoryState(
5506 std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All,
5507 KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
5508 KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None));
5509
5510 // Check that the memory is mapped in the source process.
5511 R_TRY(src_page_table.CheckMemoryState(src_address, size, KMemoryState::FlagCanMapProcess,
5512 KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
5513 KMemoryPermission::None, KMemoryAttribute::All,
5514 KMemoryAttribute::None));
5515
5516 // Validate that the memory ranges are compatible.
5517 {
5518 // Define a helper type.
5519 struct ContiguousRangeInfo {
5520 public:
5521 KPageTableBase& m_pt;
5522 TraversalContext m_context;
5523 TraversalEntry m_entry;
5524 KPhysicalAddress m_phys_addr;
5525 size_t m_cur_size;
5526 size_t m_remaining_size;
5527
5528 public:
5529 ContiguousRangeInfo(KPageTableBase& pt, KProcessAddress address, size_t size)
5530 : m_pt(pt), m_remaining_size(size) {
5531 // Begin a traversal.
5532 ASSERT(m_pt.GetImpl().BeginTraversal(std::addressof(m_entry),
5533 std::addressof(m_context), address));
5534
5535 // Setup tracking fields.
5536 m_phys_addr = m_entry.phys_addr;
5537 m_cur_size = std::min<size_t>(
5538 m_remaining_size,
5539 m_entry.block_size - (GetInteger(m_phys_addr) & (m_entry.block_size - 1)));
5540
5541 // Consume the whole contiguous block.
5542 this->DetermineContiguousBlockExtents();
5543 }
5544
5545 void ContinueTraversal() {
5546 // Update our remaining size.
5547 m_remaining_size = m_remaining_size - m_cur_size;
5548
5549 // Update our tracking fields.
5550 if (m_remaining_size > 0) {
5551 m_phys_addr = m_entry.phys_addr;
5552 m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size);
5553
5554 // Consume the whole contiguous block.
5555 this->DetermineContiguousBlockExtents();
5556 }
5557 }
5558
5559 private:
5560 void DetermineContiguousBlockExtents() {
5561 // Continue traversing until we're not contiguous, or we have enough.
5562 while (m_cur_size < m_remaining_size) {
5563 ASSERT(m_pt.GetImpl().ContinueTraversal(std::addressof(m_entry),
5564 std::addressof(m_context)));
5565
5566 // If we're not contiguous, we're done.
5567 if (m_entry.phys_addr != m_phys_addr + m_cur_size) {
5568 break;
5569 }
5570
5571 // Update our current size.
5572 m_cur_size = std::min(m_remaining_size, m_cur_size + m_entry.block_size);
5573 }
5574 }
5575 };
5576
5577 // Create ranges for both tables.
5578 ContiguousRangeInfo src_range(src_page_table, src_address, size);
5579 ContiguousRangeInfo dst_range(dst_page_table, dst_address, size);
5580
5581 // Validate the ranges.
5582 while (src_range.m_remaining_size > 0 && dst_range.m_remaining_size > 0) {
5583 R_UNLESS(src_range.m_phys_addr == dst_range.m_phys_addr, ResultInvalidMemoryRegion);
5584 R_UNLESS(src_range.m_cur_size == dst_range.m_cur_size, ResultInvalidMemoryRegion);
5585
5586 src_range.ContinueTraversal();
5587 dst_range.ContinueTraversal();
5588 }
5589 }
5590
5591 // We no longer need to hold our lock on the source page table.
5592 lk.TryUnlockHalf(src_page_table.m_general_lock);
5593
5594 // Create an update allocator.
5595 Result allocator_result;
5596 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
5597 m_memory_block_slab_manager, num_allocator_blocks);
5598 R_TRY(allocator_result);
5599
5600 // We're going to perform an update, so create a helper.
5601 KScopedPageTableUpdater updater(this);
5602
5603 // Unmap the memory.
5604 const size_t num_pages = size / PageSize;
5605 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
5606 DisableMergeAttribute::None};
5607 R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, unmap_properties,
5608 OperationType::Unmap, false));
5609
5610 // Apply the memory block update.
5611 m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages,
5612 KMemoryState::Free, KMemoryPermission::None,
5613 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
5614 KMemoryBlockDisableMergeAttribute::Normal);
5615
5616 R_SUCCEED();
5617}
5618
5619Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr,
5620 size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid,
5621 const KPageProperties properties, OperationType operation,
5622 bool reuse_ll) {
5623 ASSERT(this->IsLockedByCurrentThread());
5624 ASSERT(num_pages > 0);
5625 ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
5626 ASSERT(this->ContainsPages(virt_addr, num_pages));
5627
5628 // As we don't allocate page entries in guest memory, we don't need to allocate them from
5629 // or free them to the page list, and so it goes unused (along with page properties).
5630
5631 switch (operation) {
5632 case OperationType::Unmap: {
5633 // Ensure that any pages we track are closed on exit.
5634 KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
5635 SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
5636
5637 // Make a page group representing the region to unmap.
5638 this->MakePageGroup(pages_to_close, virt_addr, num_pages);
5639
5640 // Unmap.
5641 m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize);
5642
5643 R_SUCCEED();
5644 }
5645 case OperationType::Map: {
5646 ASSERT(virt_addr != 0);
5647 ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
5648 m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr);
5649
5650 // Open references to pages, if we should.
5651 if (this->IsHeapPhysicalAddress(phys_addr)) {
5652 m_kernel.MemoryManager().Open(phys_addr, num_pages);
5653 }
5654
5655 R_SUCCEED();
5656 }
5657 case OperationType::Separate: {
5658 // TODO: Unimplemented.
5659 R_SUCCEED();
5660 }
5661 case OperationType::ChangePermissions:
5662 case OperationType::ChangePermissionsAndRefresh:
5663 case OperationType::ChangePermissionsAndRefreshAndFlush:
5664 R_SUCCEED();
5665 default:
5666 UNREACHABLE();
5667 }
5668}
5669
5670Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr,
5671 size_t num_pages, const KPageGroup& page_group,
5672 const KPageProperties properties, OperationType operation,
5673 bool reuse_ll) {
5674 ASSERT(this->IsLockedByCurrentThread());
5675 ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
5676 ASSERT(num_pages > 0);
5677 ASSERT(num_pages == page_group.GetNumPages());
5678
5679 // As we don't allocate page entries in guest memory, we don't need to allocate them from
5680 // the page list, and so it goes unused (along with page properties).
5681
5682 switch (operation) {
5683 case OperationType::MapGroup:
5684 case OperationType::MapFirstGroup: {
5685 // We want to maintain a new reference to every page in the group.
5686 KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
5687
5688 for (const auto& node : page_group) {
5689 const size_t size{node.GetNumPages() * PageSize};
5690
5691 // Map the pages.
5692 m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress());
5693
5694 virt_addr += size;
5695 }
5696
5697 // We succeeded! We want to persist the reference to the pages.
5698 spg.CancelClose();
5699
5700 R_SUCCEED();
5701 }
5702 default:
5703 UNREACHABLE();
5704 }
5705}
5706
5707void KPageTableBase::FinalizeUpdate(PageLinkedList* page_list) {
5708 while (page_list->Peek()) {
5709 [[maybe_unused]] auto page = page_list->Pop();
5710
5711 // TODO: Free page entries once they are allocated in guest memory.
5712 // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
5713 // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
5714 // this->GetPageTableManager().Free(page);
5715 }
5716}
5717
5718} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h
new file mode 100644
index 000000000..ee2c41e67
--- /dev/null
+++ b/src/core/hle/kernel/k_page_table_base.h
@@ -0,0 +1,759 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <memory>
7
8#include "common/common_funcs.h"
9#include "common/page_table.h"
10#include "core/core.h"
11#include "core/hle/kernel/k_dynamic_resource_manager.h"
12#include "core/hle/kernel/k_light_lock.h"
13#include "core/hle/kernel/k_memory_block.h"
14#include "core/hle/kernel/k_memory_block_manager.h"
15#include "core/hle/kernel/k_memory_layout.h"
16#include "core/hle/kernel/k_memory_manager.h"
17#include "core/hle/kernel/k_typed_address.h"
18#include "core/hle/kernel/kernel.h"
19#include "core/hle/result.h"
20#include "core/memory.h"
21
22namespace Kernel {
23
24enum class DisableMergeAttribute : u8 {
25 None = (0U << 0),
26
27 DisableHead = (1U << 0),
28 DisableHeadAndBody = (1U << 1),
29 EnableHeadAndBody = (1U << 2),
30 DisableTail = (1U << 3),
31 EnableTail = (1U << 4),
32 EnableAndMergeHeadBodyTail = (1U << 5),
33
34 EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
35 DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
36};
37DECLARE_ENUM_FLAG_OPERATORS(DisableMergeAttribute);
38
39struct KPageProperties {
40 KMemoryPermission perm;
41 bool io;
42 bool uncached;
43 DisableMergeAttribute disable_merge_attributes;
44};
45static_assert(std::is_trivial_v<KPageProperties>);
46static_assert(sizeof(KPageProperties) == sizeof(u32));
47
48class KResourceLimit;
49class KSystemResource;
50
51class KPageTableBase {
52 YUZU_NON_COPYABLE(KPageTableBase);
53 YUZU_NON_MOVEABLE(KPageTableBase);
54
55public:
56 using TraversalEntry = Common::PageTable::TraversalEntry;
57 using TraversalContext = Common::PageTable::TraversalContext;
58
59 class MemoryRange {
60 private:
61 KernelCore& m_kernel;
62 KPhysicalAddress m_address;
63 size_t m_size;
64 bool m_heap;
65
66 public:
67 explicit MemoryRange(KernelCore& kernel)
68 : m_kernel(kernel), m_address(0), m_size(0), m_heap(false) {}
69
70 void Set(KPhysicalAddress address, size_t size, bool heap) {
71 m_address = address;
72 m_size = size;
73 m_heap = heap;
74 }
75
76 KPhysicalAddress GetAddress() const {
77 return m_address;
78 }
79 size_t GetSize() const {
80 return m_size;
81 }
82 bool IsHeap() const {
83 return m_heap;
84 }
85
86 void Open();
87 void Close();
88 };
89
90protected:
91 enum MemoryFillValue : u8 {
92 MemoryFillValue_Zero = 0,
93 MemoryFillValue_Stack = 'X',
94 MemoryFillValue_Ipc = 'Y',
95 MemoryFillValue_Heap = 'Z',
96 };
97
98 enum class OperationType {
99 Map = 0,
100 MapGroup = 1,
101 MapFirstGroup = 2,
102 Unmap = 3,
103 ChangePermissions = 4,
104 ChangePermissionsAndRefresh = 5,
105 ChangePermissionsAndRefreshAndFlush = 6,
106 Separate = 7,
107 };
108
109 static constexpr size_t MaxPhysicalMapAlignment = 1_GiB;
110 static constexpr size_t RegionAlignment = 2_MiB;
111 static_assert(RegionAlignment == KernelAslrAlignment);
112
113 struct PageLinkedList {
114 private:
115 struct Node {
116 Node* m_next;
117 std::array<u8, PageSize - sizeof(Node*)> m_buffer;
118 };
119 static_assert(std::is_trivial_v<Node>);
120
121 private:
122 Node* m_root{};
123
124 public:
125 constexpr PageLinkedList() : m_root(nullptr) {}
126
127 void Push(Node* n) {
128 ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
129 n->m_next = m_root;
130 m_root = n;
131 }
132
133 Node* Peek() const {
134 return m_root;
135 }
136
137 Node* Pop() {
138 Node* const r = m_root;
139
140 m_root = r->m_next;
141 r->m_next = nullptr;
142
143 return r;
144 }
145 };
146 static_assert(std::is_trivially_destructible_v<PageLinkedList>);
147
148 static constexpr auto DefaultMemoryIgnoreAttr =
149 KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
150
151 static constexpr size_t GetAddressSpaceWidth(Svc::CreateProcessFlag as_type) {
152 switch (static_cast<Svc::CreateProcessFlag>(as_type &
153 Svc::CreateProcessFlag::AddressSpaceMask)) {
154 case Svc::CreateProcessFlag::AddressSpace64Bit:
155 return 39;
156 case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
157 return 36;
158 case Svc::CreateProcessFlag::AddressSpace32Bit:
159 case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
160 return 32;
161 default:
162 UNREACHABLE();
163 }
164 }
165
166private:
167 class KScopedPageTableUpdater {
168 private:
169 KPageTableBase* m_pt;
170 PageLinkedList m_ll;
171
172 public:
173 explicit KScopedPageTableUpdater(KPageTableBase* pt) : m_pt(pt), m_ll() {}
174 explicit KScopedPageTableUpdater(KPageTableBase& pt)
175 : KScopedPageTableUpdater(std::addressof(pt)) {}
176 ~KScopedPageTableUpdater() {
177 m_pt->FinalizeUpdate(this->GetPageList());
178 }
179
180 PageLinkedList* GetPageList() {
181 return std::addressof(m_ll);
182 }
183 };
184
185private:
186 KernelCore& m_kernel;
187 Core::System& m_system;
188 KProcessAddress m_address_space_start{};
189 KProcessAddress m_address_space_end{};
190 KProcessAddress m_heap_region_start{};
191 KProcessAddress m_heap_region_end{};
192 KProcessAddress m_current_heap_end{};
193 KProcessAddress m_alias_region_start{};
194 KProcessAddress m_alias_region_end{};
195 KProcessAddress m_stack_region_start{};
196 KProcessAddress m_stack_region_end{};
197 KProcessAddress m_kernel_map_region_start{};
198 KProcessAddress m_kernel_map_region_end{};
199 KProcessAddress m_alias_code_region_start{};
200 KProcessAddress m_alias_code_region_end{};
201 KProcessAddress m_code_region_start{};
202 KProcessAddress m_code_region_end{};
203 size_t m_max_heap_size{};
204 size_t m_mapped_physical_memory_size{};
205 size_t m_mapped_unsafe_physical_memory{};
206 size_t m_mapped_insecure_memory{};
207 size_t m_mapped_ipc_server_memory{};
208 mutable KLightLock m_general_lock;
209 mutable KLightLock m_map_physical_memory_lock;
210 KLightLock m_device_map_lock;
211 std::unique_ptr<Common::PageTable> m_impl{};
212 Core::Memory::Memory* m_memory{};
213 KMemoryBlockManager m_memory_block_manager{};
214 u32 m_allocate_option{};
215 u32 m_address_space_width{};
216 bool m_is_kernel{};
217 bool m_enable_aslr{};
218 bool m_enable_device_address_space_merge{};
219 KMemoryBlockSlabManager* m_memory_block_slab_manager{};
220 KBlockInfoManager* m_block_info_manager{};
221 KResourceLimit* m_resource_limit{};
222 const KMemoryRegion* m_cached_physical_linear_region{};
223 const KMemoryRegion* m_cached_physical_heap_region{};
224 MemoryFillValue m_heap_fill_value{};
225 MemoryFillValue m_ipc_fill_value{};
226 MemoryFillValue m_stack_fill_value{};
227
228public:
229 explicit KPageTableBase(KernelCore& kernel);
230 ~KPageTableBase();
231
232 Result InitializeForKernel(bool is_64_bit, KVirtualAddress start, KVirtualAddress end,
233 Core::Memory::Memory& memory);
234 Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
235 bool enable_device_address_space_merge, bool from_back,
236 KMemoryManager::Pool pool, KProcessAddress code_address,
237 size_t code_size, KSystemResource* system_resource,
238 KResourceLimit* resource_limit, Core::Memory::Memory& memory);
239
240 void Finalize();
241
242 bool IsKernel() const {
243 return m_is_kernel;
244 }
245 bool IsAslrEnabled() const {
246 return m_enable_aslr;
247 }
248
249 bool Contains(KProcessAddress addr) const {
250 return m_address_space_start <= addr && addr <= m_address_space_end - 1;
251 }
252
253 bool Contains(KProcessAddress addr, size_t size) const {
254 return m_address_space_start <= addr && addr < addr + size &&
255 addr + size - 1 <= m_address_space_end - 1;
256 }
257
258 bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
259 return this->Contains(addr, size) && m_alias_region_start <= addr &&
260 addr + size - 1 <= m_alias_region_end - 1;
261 }
262
263 bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
264 return this->Contains(addr, size) && m_heap_region_start <= addr &&
265 addr + size - 1 <= m_heap_region_end - 1;
266 }
267
268 bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
269 // Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the
270 // alias code region.
271 return this->CanContain(addr, size, Svc::MemoryState::AliasCode);
272 }
273
274 KScopedLightLock AcquireDeviceMapLock() {
275 return KScopedLightLock(m_device_map_lock);
276 }
277
278 KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
279 size_t GetRegionSize(Svc::MemoryState state) const;
280 bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
281
282 KProcessAddress GetRegionAddress(KMemoryState state) const {
283 return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
284 }
285 size_t GetRegionSize(KMemoryState state) const {
286 return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
287 }
288 bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
289 return this->CanContain(addr, size,
290 static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
291 }
292
293public:
294 Core::Memory::Memory& GetMemory() {
295 return *m_memory;
296 }
297
298 Core::Memory::Memory& GetMemory() const {
299 return *m_memory;
300 }
301
302 Common::PageTable& GetImpl() {
303 return *m_impl;
304 }
305
306 Common::PageTable& GetImpl() const {
307 return *m_impl;
308 }
309
310 size_t GetNumGuardPages() const {
311 return this->IsKernel() ? 1 : 4;
312 }
313
314protected:
315 // NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions
316 // in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived
317 // class, and this avoids unnecessary virtual function calls.
318 Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages,
319 KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties,
320 OperationType operation, bool reuse_ll);
321 Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages,
322 const KPageGroup& page_group, const KPageProperties properties,
323 OperationType operation, bool reuse_ll);
324 void FinalizeUpdate(PageLinkedList* page_list);
325
326 bool IsLockedByCurrentThread() const {
327 return m_general_lock.IsLockedByCurrentThread();
328 }
329
330 bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
331 ASSERT(this->IsLockedByCurrentThread());
332
333 return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress(
334 m_cached_physical_linear_region, phys_addr);
335 }
336
337 bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
338 ASSERT(this->IsLockedByCurrentThread());
339
340 return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress(
341 m_cached_physical_linear_region, phys_addr, size);
342 }
343
344 bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
345 ASSERT(this->IsLockedByCurrentThread());
346
347 return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
348 phys_addr);
349 }
350
351 bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
352 ASSERT(this->IsLockedByCurrentThread());
353
354 return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
355 phys_addr, size);
356 }
357
358 bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
359 ASSERT(!this->IsLockedByCurrentThread());
360
361 return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
362 phys_addr);
363 }
364
365 bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
366 return (m_address_space_start <= addr) &&
367 (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
368 (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
369 }
370
371private:
372 KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
373 size_t num_pages, size_t alignment, size_t offset,
374 size_t guard_pages) const;
375
376 Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
377 KMemoryState state_mask, KMemoryState state,
378 KMemoryPermission perm_mask, KMemoryPermission perm,
379 KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
380 Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
381 KMemoryState state, KMemoryPermission perm_mask,
382 KMemoryPermission perm, KMemoryAttribute attr_mask,
383 KMemoryAttribute attr) const {
384 R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
385 perm, attr_mask, attr));
386 }
387
388 Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
389 KMemoryPermission perm_mask, KMemoryPermission perm,
390 KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
391 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
392 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
393 KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
394 KMemoryState state_mask, KMemoryState state,
395 KMemoryPermission perm_mask, KMemoryPermission perm,
396 KMemoryAttribute attr_mask, KMemoryAttribute attr,
397 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
398 Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
399 KMemoryAttribute* out_attr, size_t* out_blocks_needed,
400 KProcessAddress addr, size_t size, KMemoryState state_mask,
401 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
402 KMemoryAttribute attr_mask, KMemoryAttribute attr,
403 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
404 Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
405 KMemoryState state_mask, KMemoryState state,
406 KMemoryPermission perm_mask, KMemoryPermission perm,
407 KMemoryAttribute attr_mask, KMemoryAttribute attr,
408 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
409 R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
410 state_mask, state, perm_mask, perm, attr_mask, attr,
411 ignore_attr));
412 }
413 Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
414 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
415 KMemoryAttribute attr_mask, KMemoryAttribute attr,
416 KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
417 R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
418 attr_mask, attr, ignore_attr));
419 }
420
421 Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr, KProcessAddress addr,
422 size_t size, KMemoryState state_mask, KMemoryState state,
423 KMemoryPermission perm_mask, KMemoryPermission perm,
424 KMemoryAttribute attr_mask, KMemoryAttribute attr,
425 KMemoryPermission new_perm, KMemoryAttribute lock_attr);
426 Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
427 KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
428 KMemoryAttribute attr_mask, KMemoryAttribute attr,
429 KMemoryPermission new_perm, KMemoryAttribute lock_attr,
430 const KPageGroup* pg);
431
432 Result QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page,
433 KProcessAddress address) const;
434
435 Result QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size,
436 Svc::MemoryState state) const;
437
438 Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
439 size_t num_pages, KMemoryPermission perm);
440 Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
441 const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
442
443 void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
444 const KPageGroup& pg);
445
446 Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
447 bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
448
449 Result GetContiguousMemoryRangeWithState(MemoryRange* out, KProcessAddress address, size_t size,
450 KMemoryState state_mask, KMemoryState state,
451 KMemoryPermission perm_mask, KMemoryPermission perm,
452 KMemoryAttribute attr_mask, KMemoryAttribute attr);
453
454 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
455 KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
456 size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
457
458 Result MapIoImpl(KProcessAddress* out, PageLinkedList* page_list, KPhysicalAddress phys_addr,
459 size_t size, KMemoryState state, KMemoryPermission perm);
460 Result ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr, size_t size,
461 KMemoryState state);
462 Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr, size_t size,
463 KMemoryState state);
464
465 Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
466 KProcessAddress address, size_t size, KMemoryPermission test_perm,
467 KMemoryState dst_state);
468 Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
469 KMemoryPermission test_perm, KMemoryState dst_state,
470 KPageTableBase& src_page_table, bool send);
471 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
472 size_t size, KMemoryPermission prot_perm);
473
474 size_t GetSize(KMemoryState state) const;
475
476 bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
477 // Validate pre-conditions.
478 ASSERT(this->IsLockedByCurrentThread());
479
480 return this->GetImpl().GetPhysicalAddress(out, virt_addr);
481 }
482
483public:
484 bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress virt_addr) const {
485 // Validate pre-conditions.
486 ASSERT(!this->IsLockedByCurrentThread());
487
488 // Acquire exclusive access to the table while doing address translation.
489 KScopedLightLock lk(m_general_lock);
490
491 return this->GetPhysicalAddressLocked(out, virt_addr);
492 }
493
494 KBlockInfoManager* GetBlockInfoManager() const {
495 return m_block_info_manager;
496 }
497
498 Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
499 Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
500 Svc::MemoryPermission perm);
501 Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
502 KMemoryAttribute attr);
503 Result SetHeapSize(KProcessAddress* out, size_t size);
504 Result SetMaxHeapSize(size_t size);
505 Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
506 KProcessAddress addr) const;
507 Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) const;
508 Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const {
509 R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Static));
510 }
511 Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const {
512 R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Io));
513 }
514 Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
515 Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
516 Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
517 Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
518 Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
519 Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
520 Svc::MemoryMapping mapping, Svc::MemoryPermission perm);
521 Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
522 Svc::MemoryMapping mapping);
523 Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
524 Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
525 Result MapInsecureMemory(KProcessAddress address, size_t size);
526 Result UnmapInsecureMemory(KProcessAddress address, size_t size);
527
528 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
529 KPhysicalAddress phys_addr, KProcessAddress region_start,
530 size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
531 R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
532 region_num_pages, state, perm));
533 }
534
535 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
536 KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
537 R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
538 this->GetRegionAddress(state),
539 this->GetRegionSize(state) / PageSize, state, perm));
540 }
541
542 Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
543 KMemoryPermission perm) {
544 R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
545 this->GetRegionAddress(state),
546 this->GetRegionSize(state) / PageSize, state, perm));
547 }
548
549 Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
550 KMemoryPermission perm);
551 Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
552
553 Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
554 KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
555 KMemoryPermission perm);
556 Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
557 KMemoryPermission perm);
558 Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
559
560 Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
561 KMemoryState state_mask, KMemoryState state,
562 KMemoryPermission perm_mask, KMemoryPermission perm,
563 KMemoryAttribute attr_mask, KMemoryAttribute attr);
564
565 Result InvalidateProcessDataCache(KProcessAddress address, size_t size);
566 Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size);
567
568 Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
569 Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
570 KMemoryState state);
571
572 Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
573 Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
574 KMemoryState state);
575
576 Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
577 KMemoryPermission perm, bool is_aligned, bool check_heap);
578 Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
579
580 Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
581 Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size);
582
583 Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
584 KProcessAddress address, size_t size,
585 KMemoryPermission perm, bool is_aligned);
586 Result OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out, KProcessAddress address,
587 size_t size);
588
589 Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
590 Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
591
592 Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
593 KMemoryPermission perm);
594 Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
595 Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size);
596 Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
597
598 Result OpenMemoryRangeForProcessCacheOperation(MemoryRange* out, KProcessAddress address,
599 size_t size);
600
601 Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size,
602 KProcessAddress src_addr, KMemoryState src_state_mask,
603 KMemoryState src_state, KMemoryPermission src_test_perm,
604 KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
605 Result CopyMemoryFromLinearToKernel(void* buffer, size_t size, KProcessAddress src_addr,
606 KMemoryState src_state_mask, KMemoryState src_state,
607 KMemoryPermission src_test_perm,
608 KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
609 Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size,
610 KMemoryState dst_state_mask, KMemoryState dst_state,
611 KMemoryPermission dst_test_perm,
612 KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
613 KProcessAddress src_addr);
614 Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
615 KMemoryState dst_state_mask, KMemoryState dst_state,
616 KMemoryPermission dst_test_perm,
617 KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
618 void* buffer);
619 Result CopyMemoryFromHeapToHeap(KPageTableBase& dst_page_table, KProcessAddress dst_addr,
620 size_t size, KMemoryState dst_state_mask,
621 KMemoryState dst_state, KMemoryPermission dst_test_perm,
622 KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
623 KProcessAddress src_addr, KMemoryState src_state_mask,
624 KMemoryState src_state, KMemoryPermission src_test_perm,
625 KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
626 Result CopyMemoryFromHeapToHeapWithoutCheckDestination(
627 KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
628 KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
629 KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
630 KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
631 KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
632
633 Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
634 KPageTableBase& src_page_table, KMemoryPermission test_perm,
635 KMemoryState dst_state, bool send);
636 Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
637 Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
638
639 Result MapPhysicalMemory(KProcessAddress address, size_t size);
640 Result UnmapPhysicalMemory(KProcessAddress address, size_t size);
641
642 Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
643 Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
644
645 Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase& src_pt,
646 KProcessAddress src_address);
647
648public:
649 KProcessAddress GetAddressSpaceStart() const {
650 return m_address_space_start;
651 }
652 KProcessAddress GetHeapRegionStart() const {
653 return m_heap_region_start;
654 }
655 KProcessAddress GetAliasRegionStart() const {
656 return m_alias_region_start;
657 }
658 KProcessAddress GetStackRegionStart() const {
659 return m_stack_region_start;
660 }
661 KProcessAddress GetKernelMapRegionStart() const {
662 return m_kernel_map_region_start;
663 }
664 KProcessAddress GetCodeRegionStart() const {
665 return m_code_region_start;
666 }
667 KProcessAddress GetAliasCodeRegionStart() const {
668 return m_alias_code_region_start;
669 }
670
671 size_t GetAddressSpaceSize() const {
672 return m_address_space_end - m_address_space_start;
673 }
674 size_t GetHeapRegionSize() const {
675 return m_heap_region_end - m_heap_region_start;
676 }
677 size_t GetAliasRegionSize() const {
678 return m_alias_region_end - m_alias_region_start;
679 }
680 size_t GetStackRegionSize() const {
681 return m_stack_region_end - m_stack_region_start;
682 }
683 size_t GetKernelMapRegionSize() const {
684 return m_kernel_map_region_end - m_kernel_map_region_start;
685 }
686 size_t GetCodeRegionSize() const {
687 return m_code_region_end - m_code_region_start;
688 }
689 size_t GetAliasCodeRegionSize() const {
690 return m_alias_code_region_end - m_alias_code_region_start;
691 }
692
693 size_t GetNormalMemorySize() const {
694 // Lock the table.
695 KScopedLightLock lk(m_general_lock);
696
697 return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size;
698 }
699
700 size_t GetCodeSize() const;
701 size_t GetCodeDataSize() const;
702 size_t GetAliasCodeSize() const;
703 size_t GetAliasCodeDataSize() const;
704
705 u32 GetAllocateOption() const {
706 return m_allocate_option;
707 }
708
709 u32 GetAddressSpaceWidth() const {
710 return m_address_space_width;
711 }
712
713public:
714 // Linear mapped
715 static u8* GetLinearMappedVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) {
716 return kernel.System().DeviceMemory().GetPointer<u8>(addr);
717 }
718
719 static KPhysicalAddress GetLinearMappedPhysicalAddress(KernelCore& kernel,
720 KVirtualAddress addr) {
721 return kernel.MemoryLayout().GetLinearPhysicalAddress(addr);
722 }
723
724 static KVirtualAddress GetLinearMappedVirtualAddress(KernelCore& kernel,
725 KPhysicalAddress addr) {
726 return kernel.MemoryLayout().GetLinearVirtualAddress(addr);
727 }
728
729 // Heap
730 static u8* GetHeapVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) {
731 return kernel.System().DeviceMemory().GetPointer<u8>(addr);
732 }
733
734 static KPhysicalAddress GetHeapPhysicalAddress(KernelCore& kernel, KVirtualAddress addr) {
735 return GetLinearMappedPhysicalAddress(kernel, addr);
736 }
737
738 static KVirtualAddress GetHeapVirtualAddress(KernelCore& kernel, KPhysicalAddress addr) {
739 return GetLinearMappedVirtualAddress(kernel, addr);
740 }
741
742 // Member heap
743 u8* GetHeapVirtualPointer(KPhysicalAddress addr) {
744 return GetHeapVirtualPointer(m_kernel, addr);
745 }
746
747 KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) {
748 return GetHeapPhysicalAddress(m_kernel, addr);
749 }
750
751 KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) {
752 return GetHeapVirtualAddress(m_kernel, addr);
753 }
754
755 // TODO: GetPageTableVirtualAddress
756 // TODO: GetPageTablePhysicalAddress
757};
758
759} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 1f4b0755d..3cfb414e5 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -298,9 +298,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa
298 const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr); 298 const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
299 const bool enable_das_merge = 299 const bool enable_das_merge =
300 False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); 300 False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
301 R_TRY(m_page_table.InitializeForProcess( 301 R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
302 as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, 302 params.code_address, params.code_num_pages * PageSize,
303 params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetMemory())); 303 m_system_resource, res_limit, this->GetMemory()));
304 } 304 }
305 ON_RESULT_FAILURE_2 { 305 ON_RESULT_FAILURE_2 {
306 m_page_table.Finalize(); 306 m_page_table.Finalize();
@@ -391,9 +391,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
391 const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr); 391 const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
392 const bool enable_das_merge = 392 const bool enable_das_merge =
393 False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); 393 False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
394 R_TRY(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge, 394 R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
395 !enable_aslr, pool, params.code_address, code_size, 395 params.code_address, code_size, m_system_resource, res_limit,
396 m_system_resource, res_limit, this->GetMemory())); 396 this->GetMemory()));
397 } 397 }
398 ON_RESULT_FAILURE_2 { 398 ON_RESULT_FAILURE_2 {
399 m_page_table.Finalize(); 399 m_page_table.Finalize();
@@ -1122,9 +1122,9 @@ Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_
1122void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {} 1122void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
1123 1123
1124KProcess::KProcess(KernelCore& kernel) 1124KProcess::KProcess(KernelCore& kernel)
1125 : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel.System()}, 1125 : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
1126 m_state_lock{kernel}, m_list_lock{kernel}, m_cond_var{kernel.System()}, 1126 m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
1127 m_address_arbiter{kernel.System()}, m_handle_table{kernel} {} 1127 m_handle_table{kernel} {}
1128KProcess::~KProcess() = default; 1128KProcess::~KProcess() = default;
1129 1129
1130Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size, 1130Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index f9f755afa..8339465fd 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -5,13 +5,14 @@
5 5
6#include <map> 6#include <map>
7 7
8#include "core/file_sys/program_metadata.h"
8#include "core/hle/kernel/code_set.h" 9#include "core/hle/kernel/code_set.h"
9#include "core/hle/kernel/k_address_arbiter.h" 10#include "core/hle/kernel/k_address_arbiter.h"
10#include "core/hle/kernel/k_capabilities.h" 11#include "core/hle/kernel/k_capabilities.h"
11#include "core/hle/kernel/k_condition_variable.h" 12#include "core/hle/kernel/k_condition_variable.h"
12#include "core/hle/kernel/k_handle_table.h" 13#include "core/hle/kernel/k_handle_table.h"
13#include "core/hle/kernel/k_page_table.h"
14#include "core/hle/kernel/k_page_table_manager.h" 14#include "core/hle/kernel/k_page_table_manager.h"
15#include "core/hle/kernel/k_process_page_table.h"
15#include "core/hle/kernel/k_system_resource.h" 16#include "core/hle/kernel/k_system_resource.h"
16#include "core/hle/kernel/k_thread.h" 17#include "core/hle/kernel/k_thread.h"
17#include "core/hle/kernel/k_thread_local_page.h" 18#include "core/hle/kernel/k_thread_local_page.h"
@@ -65,7 +66,7 @@ private:
65 using TLPIterator = TLPTree::iterator; 66 using TLPIterator = TLPTree::iterator;
66 67
67private: 68private:
68 KPageTable m_page_table; 69 KProcessPageTable m_page_table;
69 std::atomic<size_t> m_used_kernel_memory_size{}; 70 std::atomic<size_t> m_used_kernel_memory_size{};
70 TLPTree m_fully_used_tlp_tree{}; 71 TLPTree m_fully_used_tlp_tree{};
71 TLPTree m_partially_used_tlp_tree{}; 72 TLPTree m_partially_used_tlp_tree{};
@@ -254,9 +255,8 @@ public:
254 return m_is_hbl; 255 return m_is_hbl;
255 } 256 }
256 257
257 Kernel::KMemoryManager::Direction GetAllocateOption() const { 258 u32 GetAllocateOption() const {
258 // TODO: property of the KPageTableBase 259 return m_page_table.GetAllocateOption();
259 return KMemoryManager::Direction::FromFront;
260 } 260 }
261 261
262 ThreadList& GetThreadList() { 262 ThreadList& GetThreadList() {
@@ -295,10 +295,10 @@ public:
295 return m_list_lock; 295 return m_list_lock;
296 } 296 }
297 297
298 KPageTable& GetPageTable() { 298 KProcessPageTable& GetPageTable() {
299 return m_page_table; 299 return m_page_table;
300 } 300 }
301 const KPageTable& GetPageTable() const { 301 const KProcessPageTable& GetPageTable() const {
302 return m_page_table; 302 return m_page_table;
303 } 303 }
304 304
diff --git a/src/core/hle/kernel/k_process_page_table.h b/src/core/hle/kernel/k_process_page_table.h
new file mode 100644
index 000000000..b7ae5abd0
--- /dev/null
+++ b/src/core/hle/kernel/k_process_page_table.h
@@ -0,0 +1,480 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "core/hle/kernel/k_page_table.h"
7#include "core/hle/kernel/k_scoped_lock.h"
8#include "core/hle/kernel/svc_types.h"
9
10namespace Core {
11class ARM_Interface;
12}
13
14namespace Kernel {
15
16class KProcessPageTable {
17private:
18 KPageTable m_page_table;
19
20public:
21 KProcessPageTable(KernelCore& kernel) : m_page_table(kernel) {}
22
23 Result Initialize(Svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge,
24 bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address,
25 size_t code_size, KSystemResource* system_resource,
26 KResourceLimit* resource_limit, Core::Memory::Memory& memory) {
27 R_RETURN(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge,
28 from_back, pool, code_address, code_size,
29 system_resource, resource_limit, memory));
30 }
31
32 void Finalize() {
33 m_page_table.Finalize();
34 }
35
36 Core::Memory::Memory& GetMemory() {
37 return m_page_table.GetMemory();
38 }
39
40 Core::Memory::Memory& GetMemory() const {
41 return m_page_table.GetMemory();
42 }
43
44 Common::PageTable& GetImpl() {
45 return m_page_table.GetImpl();
46 }
47
48 Common::PageTable& GetImpl() const {
49 return m_page_table.GetImpl();
50 }
51
52 size_t GetNumGuardPages() const {
53 return m_page_table.GetNumGuardPages();
54 }
55
56 KScopedLightLock AcquireDeviceMapLock() {
57 return m_page_table.AcquireDeviceMapLock();
58 }
59
60 Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm) {
61 R_RETURN(m_page_table.SetMemoryPermission(addr, size, perm));
62 }
63
64 Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
65 Svc::MemoryPermission perm) {
66 R_RETURN(m_page_table.SetProcessMemoryPermission(addr, size, perm));
67 }
68
69 Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
70 KMemoryAttribute attr) {
71 R_RETURN(m_page_table.SetMemoryAttribute(addr, size, mask, attr));
72 }
73
74 Result SetHeapSize(KProcessAddress* out, size_t size) {
75 R_RETURN(m_page_table.SetHeapSize(out, size));
76 }
77
78 Result SetMaxHeapSize(size_t size) {
79 R_RETURN(m_page_table.SetMaxHeapSize(size));
80 }
81
82 Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
83 KProcessAddress addr) const {
84 R_RETURN(m_page_table.QueryInfo(out_info, out_page_info, addr));
85 }
86
87 Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) {
88 R_RETURN(m_page_table.QueryPhysicalAddress(out, address));
89 }
90
91 Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) {
92 R_RETURN(m_page_table.QueryStaticMapping(out, address, size));
93 }
94
95 Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) {
96 R_RETURN(m_page_table.QueryIoMapping(out, address, size));
97 }
98
99 Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
100 R_RETURN(m_page_table.MapMemory(dst_address, src_address, size));
101 }
102
103 Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
104 R_RETURN(m_page_table.UnmapMemory(dst_address, src_address, size));
105 }
106
107 Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
108 R_RETURN(m_page_table.MapCodeMemory(dst_address, src_address, size));
109 }
110
111 Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
112 R_RETURN(m_page_table.UnmapCodeMemory(dst_address, src_address, size));
113 }
114
115 Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
116 R_RETURN(m_page_table.MapIo(phys_addr, size, perm));
117 }
118
119 Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
120 Svc::MemoryMapping mapping, Svc::MemoryPermission perm) {
121 R_RETURN(m_page_table.MapIoRegion(dst_address, phys_addr, size, mapping, perm));
122 }
123
124 Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
125 Svc::MemoryMapping mapping) {
126 R_RETURN(m_page_table.UnmapIoRegion(dst_address, phys_addr, size, mapping));
127 }
128
129 Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
130 R_RETURN(m_page_table.MapStatic(phys_addr, size, perm));
131 }
132
133 Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
134 R_RETURN(m_page_table.MapRegion(region_type, perm));
135 }
136
137 Result MapInsecureMemory(KProcessAddress address, size_t size) {
138 R_RETURN(m_page_table.MapInsecureMemory(address, size));
139 }
140
141 Result UnmapInsecureMemory(KProcessAddress address, size_t size) {
142 R_RETURN(m_page_table.UnmapInsecureMemory(address, size));
143 }
144
145 Result MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
146 KMemoryPermission perm) {
147 R_RETURN(m_page_table.MapPageGroup(addr, pg, state, perm));
148 }
149
150 Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state) {
151 R_RETURN(m_page_table.UnmapPageGroup(address, pg, state));
152 }
153
154 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
155 KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
156 R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm));
157 }
158
159 Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
160 KMemoryPermission perm) {
161 R_RETURN(m_page_table.MapPages(out_addr, num_pages, state, perm));
162 }
163
164 Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
165 KMemoryPermission perm) {
166 R_RETURN(m_page_table.MapPages(address, num_pages, state, perm));
167 }
168
169 Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
170 R_RETURN(m_page_table.UnmapPages(addr, num_pages, state));
171 }
172
173 Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
174 KMemoryState state_mask, KMemoryState state,
175 KMemoryPermission perm_mask, KMemoryPermission perm,
176 KMemoryAttribute attr_mask, KMemoryAttribute attr) {
177 R_RETURN(m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state,
178 perm_mask, perm, attr_mask, attr));
179 }
180
181 Result InvalidateProcessDataCache(KProcessAddress address, size_t size) {
182 R_RETURN(m_page_table.InvalidateProcessDataCache(address, size));
183 }
184
185 Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
186 R_RETURN(m_page_table.ReadDebugMemory(dst_address, src_address, size));
187 }
188
189 Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
190 KMemoryState state) {
191 R_RETURN(m_page_table.ReadDebugIoMemory(dst_address, src_address, size, state));
192 }
193
194 Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
195 R_RETURN(m_page_table.WriteDebugMemory(dst_address, src_address, size));
196 }
197
198 Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
199 KMemoryState state) {
200 R_RETURN(m_page_table.WriteDebugIoMemory(dst_address, src_address, size, state));
201 }
202
203 Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
204 KMemoryPermission perm, bool is_aligned, bool check_heap) {
205 R_RETURN(m_page_table.LockForMapDeviceAddressSpace(out_is_io, address, size, perm,
206 is_aligned, check_heap));
207 }
208
209 Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) {
210 R_RETURN(m_page_table.LockForUnmapDeviceAddressSpace(address, size, check_heap));
211 }
212
213 Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
214 R_RETURN(m_page_table.UnlockForDeviceAddressSpace(address, size));
215 }
216
217 Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
218 R_RETURN(m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size));
219 }
220
221 Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
222 KProcessAddress address, size_t size,
223 KMemoryPermission perm, bool is_aligned) {
224 R_RETURN(m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm,
225 is_aligned));
226 }
227
228 Result OpenMemoryRangeForUnmapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
229 KProcessAddress address, size_t size) {
230 R_RETURN(m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size));
231 }
232
233 Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size) {
234 R_RETURN(m_page_table.LockForIpcUserBuffer(out, address, size));
235 }
236
237 Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
238 R_RETURN(m_page_table.UnlockForIpcUserBuffer(address, size));
239 }
240
241 Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
242 KMemoryPermission perm) {
243 R_RETURN(m_page_table.LockForTransferMemory(out, address, size, perm));
244 }
245
246 Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg) {
247 R_RETURN(m_page_table.UnlockForTransferMemory(address, size, pg));
248 }
249
250 Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) {
251 R_RETURN(m_page_table.LockForCodeMemory(out, address, size));
252 }
253
254 Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg) {
255 R_RETURN(m_page_table.UnlockForCodeMemory(address, size, pg));
256 }
257
258 Result OpenMemoryRangeForProcessCacheOperation(KPageTableBase::MemoryRange* out,
259 KProcessAddress address, size_t size) {
260 R_RETURN(m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size));
261 }
262
263 Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size,
264 KProcessAddress src_addr, KMemoryState src_state_mask,
265 KMemoryState src_state, KMemoryPermission src_test_perm,
266 KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
267 R_RETURN(m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask,
268 src_state, src_test_perm, src_attr_mask,
269 src_attr));
270 }
271
272 Result CopyMemoryFromLinearToKernel(void* dst_addr, size_t size, KProcessAddress src_addr,
273 KMemoryState src_state_mask, KMemoryState src_state,
274 KMemoryPermission src_test_perm,
275 KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
276 R_RETURN(m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask,
277 src_state, src_test_perm, src_attr_mask,
278 src_attr));
279 }
280
281 Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size,
282 KMemoryState dst_state_mask, KMemoryState dst_state,
283 KMemoryPermission dst_test_perm,
284 KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
285 KProcessAddress src_addr) {
286 R_RETURN(m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state,
287 dst_test_perm, dst_attr_mask, dst_attr,
288 src_addr));
289 }
290
291 Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
292 KMemoryState dst_state_mask, KMemoryState dst_state,
293 KMemoryPermission dst_test_perm,
294 KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
295 void* src_addr) {
296 R_RETURN(m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask,
297 dst_state, dst_test_perm, dst_attr_mask,
298 dst_attr, src_addr));
299 }
300
301 Result CopyMemoryFromHeapToHeap(KProcessPageTable& dst_page_table, KProcessAddress dst_addr,
302 size_t size, KMemoryState dst_state_mask,
303 KMemoryState dst_state, KMemoryPermission dst_test_perm,
304 KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
305 KProcessAddress src_addr, KMemoryState src_state_mask,
306 KMemoryState src_state, KMemoryPermission src_test_perm,
307 KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
308 R_RETURN(m_page_table.CopyMemoryFromHeapToHeap(
309 dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm,
310 dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm,
311 src_attr_mask, src_attr));
312 }
313
314 Result CopyMemoryFromHeapToHeapWithoutCheckDestination(
315 KProcessPageTable& dst_page_table, KProcessAddress dst_addr, size_t size,
316 KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
317 KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
318 KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
319 KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
320 R_RETURN(m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(
321 dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm,
322 dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm,
323 src_attr_mask, src_attr));
324 }
325
326 Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
327 KProcessPageTable& src_page_table, KMemoryPermission test_perm,
328 KMemoryState dst_state, bool send) {
329 R_RETURN(m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table,
330 test_perm, dst_state, send));
331 }
332
333 Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) {
334 R_RETURN(m_page_table.CleanupForIpcServer(address, size, dst_state));
335 }
336
337 Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
338 R_RETURN(m_page_table.CleanupForIpcClient(address, size, dst_state));
339 }
340
341 Result MapPhysicalMemory(KProcessAddress address, size_t size) {
342 R_RETURN(m_page_table.MapPhysicalMemory(address, size));
343 }
344
345 Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
346 R_RETURN(m_page_table.UnmapPhysicalMemory(address, size));
347 }
348
349 Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
350 R_RETURN(m_page_table.MapPhysicalMemoryUnsafe(address, size));
351 }
352
353 Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
354 R_RETURN(m_page_table.UnmapPhysicalMemoryUnsafe(address, size));
355 }
356
357 Result UnmapProcessMemory(KProcessAddress dst_address, size_t size,
358 KProcessPageTable& src_page_table, KProcessAddress src_address) {
359 R_RETURN(m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table,
360 src_address));
361 }
362
363 bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress address) {
364 return m_page_table.GetPhysicalAddress(out, address);
365 }
366
367 bool Contains(KProcessAddress addr, size_t size) const {
368 return m_page_table.Contains(addr, size);
369 }
370
371 bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
372 return m_page_table.IsInAliasRegion(addr, size);
373 }
374 bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
375 return m_page_table.IsInHeapRegion(addr, size);
376 }
377 bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
378 return m_page_table.IsInUnsafeAliasRegion(addr, size);
379 }
380
381 bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
382 return m_page_table.CanContain(addr, size, state);
383 }
384
385 KProcessAddress GetAddressSpaceStart() const {
386 return m_page_table.GetAddressSpaceStart();
387 }
388 KProcessAddress GetHeapRegionStart() const {
389 return m_page_table.GetHeapRegionStart();
390 }
391 KProcessAddress GetAliasRegionStart() const {
392 return m_page_table.GetAliasRegionStart();
393 }
394 KProcessAddress GetStackRegionStart() const {
395 return m_page_table.GetStackRegionStart();
396 }
397 KProcessAddress GetKernelMapRegionStart() const {
398 return m_page_table.GetKernelMapRegionStart();
399 }
400 KProcessAddress GetCodeRegionStart() const {
401 return m_page_table.GetCodeRegionStart();
402 }
403 KProcessAddress GetAliasCodeRegionStart() const {
404 return m_page_table.GetAliasCodeRegionStart();
405 }
406
407 size_t GetAddressSpaceSize() const {
408 return m_page_table.GetAddressSpaceSize();
409 }
410 size_t GetHeapRegionSize() const {
411 return m_page_table.GetHeapRegionSize();
412 }
413 size_t GetAliasRegionSize() const {
414 return m_page_table.GetAliasRegionSize();
415 }
416 size_t GetStackRegionSize() const {
417 return m_page_table.GetStackRegionSize();
418 }
419 size_t GetKernelMapRegionSize() const {
420 return m_page_table.GetKernelMapRegionSize();
421 }
422 size_t GetCodeRegionSize() const {
423 return m_page_table.GetCodeRegionSize();
424 }
425 size_t GetAliasCodeRegionSize() const {
426 return m_page_table.GetAliasCodeRegionSize();
427 }
428
429 size_t GetNormalMemorySize() const {
430 return m_page_table.GetNormalMemorySize();
431 }
432
433 size_t GetCodeSize() const {
434 return m_page_table.GetCodeSize();
435 }
436 size_t GetCodeDataSize() const {
437 return m_page_table.GetCodeDataSize();
438 }
439
440 size_t GetAliasCodeSize() const {
441 return m_page_table.GetAliasCodeSize();
442 }
443 size_t GetAliasCodeDataSize() const {
444 return m_page_table.GetAliasCodeDataSize();
445 }
446
447 u32 GetAllocateOption() const {
448 return m_page_table.GetAllocateOption();
449 }
450
451 u32 GetAddressSpaceWidth() const {
452 return m_page_table.GetAddressSpaceWidth();
453 }
454
455 KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) {
456 return m_page_table.GetHeapPhysicalAddress(address);
457 }
458
459 u8* GetHeapVirtualPointer(KPhysicalAddress address) {
460 return m_page_table.GetHeapVirtualPointer(address);
461 }
462
463 KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) {
464 return m_page_table.GetHeapVirtualAddress(address);
465 }
466
467 KBlockInfoManager* GetBlockInfoManager() {
468 return m_page_table.GetBlockInfoManager();
469 }
470
471 KPageTable& GetBasePageTable() {
472 return m_page_table;
473 }
474
475 const KPageTable& GetBasePageTable() const {
476 return m_page_table;
477 }
478};
479
480} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index c64ceb530..3ea653163 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -383,7 +383,7 @@ Result KServerSession::SendReply(bool is_hle) {
383 if (event != nullptr) { 383 if (event != nullptr) {
384 // // Get the client process/page table. 384 // // Get the client process/page table.
385 // KProcess *client_process = client_thread->GetOwnerProcess(); 385 // KProcess *client_process = client_thread->GetOwnerProcess();
386 // KPageTable *client_page_table = std::addressof(client_process->PageTable()); 386 // KProcessPageTable *client_page_table = std::addressof(client_process->PageTable());
387 387
388 // // If we need to, reply with an async error. 388 // // If we need to, reply with an async error.
389 // if (R_FAILED(client_result)) { 389 // if (R_FAILED(client_result)) {
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp
index 07e92aa80..b51941faf 100644
--- a/src/core/hle/kernel/k_system_resource.cpp
+++ b/src/core/hle/kernel/k_system_resource.cpp
@@ -40,7 +40,7 @@ Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_l
40 40
41 // Get resource pointer. 41 // Get resource pointer.
42 KPhysicalAddress resource_paddr = 42 KPhysicalAddress resource_paddr =
43 KPageTable::GetHeapPhysicalAddress(m_kernel.MemoryLayout(), m_resource_address); 43 KPageTable::GetHeapPhysicalAddress(m_kernel, m_resource_address);
44 auto* resource = 44 auto* resource =
45 m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr); 45 m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr);
46 46
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index 2c45b4232..a632d1634 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -37,8 +37,8 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
37 37
38Result KThreadLocalPage::Finalize() { 38Result KThreadLocalPage::Finalize() {
39 // Get the physical address of the page. 39 // Get the physical address of the page.
40 const KPhysicalAddress phys_addr = m_owner->GetPageTable().GetPhysicalAddr(m_virt_addr); 40 KPhysicalAddress phys_addr{};
41 ASSERT(phys_addr); 41 ASSERT(m_owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), m_virt_addr));
42 42
43 // Unmap the page. 43 // Unmap the page.
44 R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal)); 44 R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal));
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
deleted file mode 100644
index 773319ad8..000000000
--- a/src/core/hle/kernel/process_capability.cpp
+++ /dev/null
@@ -1,389 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include <bit>
5
6#include "common/bit_util.h"
7#include "common/logging/log.h"
8#include "core/hle/kernel/k_handle_table.h"
9#include "core/hle/kernel/k_page_table.h"
10#include "core/hle/kernel/process_capability.h"
11#include "core/hle/kernel/svc_results.h"
12
13namespace Kernel {
14namespace {
15
16// clang-format off
17
18// Shift offsets for kernel capability types.
19enum : u32 {
20 CapabilityOffset_PriorityAndCoreNum = 3,
21 CapabilityOffset_Syscall = 4,
22 CapabilityOffset_MapPhysical = 6,
23 CapabilityOffset_MapIO = 7,
24 CapabilityOffset_MapRegion = 10,
25 CapabilityOffset_Interrupt = 11,
26 CapabilityOffset_ProgramType = 13,
27 CapabilityOffset_KernelVersion = 14,
28 CapabilityOffset_HandleTableSize = 15,
29 CapabilityOffset_Debug = 16,
30};
31
32// Combined mask of all parameters that may be initialized only once.
33constexpr u32 InitializeOnceMask = (1U << CapabilityOffset_PriorityAndCoreNum) |
34 (1U << CapabilityOffset_ProgramType) |
35 (1U << CapabilityOffset_KernelVersion) |
36 (1U << CapabilityOffset_HandleTableSize) |
37 (1U << CapabilityOffset_Debug);
38
39// Packed kernel version indicating 10.4.0
40constexpr u32 PackedKernelVersion = 0x520000;
41
42// Indicates possible types of capabilities that can be specified.
43enum class CapabilityType : u32 {
44 Unset = 0U,
45 PriorityAndCoreNum = (1U << CapabilityOffset_PriorityAndCoreNum) - 1,
46 Syscall = (1U << CapabilityOffset_Syscall) - 1,
47 MapPhysical = (1U << CapabilityOffset_MapPhysical) - 1,
48 MapIO = (1U << CapabilityOffset_MapIO) - 1,
49 MapRegion = (1U << CapabilityOffset_MapRegion) - 1,
50 Interrupt = (1U << CapabilityOffset_Interrupt) - 1,
51 ProgramType = (1U << CapabilityOffset_ProgramType) - 1,
52 KernelVersion = (1U << CapabilityOffset_KernelVersion) - 1,
53 HandleTableSize = (1U << CapabilityOffset_HandleTableSize) - 1,
54 Debug = (1U << CapabilityOffset_Debug) - 1,
55 Ignorable = 0xFFFFFFFFU,
56};
57
58// clang-format on
59
60constexpr CapabilityType GetCapabilityType(u32 value) {
61 return static_cast<CapabilityType>((~value & (value + 1)) - 1);
62}
63
64u32 GetFlagBitOffset(CapabilityType type) {
65 const auto value = static_cast<u32>(type);
66 return static_cast<u32>(Common::BitSize<u32>() - static_cast<u32>(std::countl_zero(value)));
67}
68
69} // Anonymous namespace
70
71Result ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
72 std::size_t num_capabilities,
73 KPageTable& page_table) {
74 Clear();
75
76 // Allow all cores and priorities.
77 core_mask = 0xF;
78 priority_mask = 0xFFFFFFFFFFFFFFFF;
79 kernel_version = PackedKernelVersion;
80
81 return ParseCapabilities(capabilities, num_capabilities, page_table);
82}
83
84Result ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
85 std::size_t num_capabilities,
86 KPageTable& page_table) {
87 Clear();
88
89 return ParseCapabilities(capabilities, num_capabilities, page_table);
90}
91
92void ProcessCapabilities::InitializeForMetadatalessProcess() {
93 // Allow all cores and priorities
94 core_mask = 0xF;
95 priority_mask = 0xFFFFFFFFFFFFFFFF;
96 kernel_version = PackedKernelVersion;
97
98 // Allow all system calls and interrupts.
99 svc_capabilities.set();
100 interrupt_capabilities.set();
101
102 // Allow using the maximum possible amount of handles
103 handle_table_size = static_cast<s32>(KHandleTable::MaxTableSize);
104
105 // Allow all debugging capabilities.
106 is_debuggable = true;
107 can_force_debug = true;
108}
109
110Result ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
111 KPageTable& page_table) {
112 u32 set_flags = 0;
113 u32 set_svc_bits = 0;
114
115 for (std::size_t i = 0; i < num_capabilities; ++i) {
116 const u32 descriptor = capabilities[i];
117 const auto type = GetCapabilityType(descriptor);
118
119 if (type == CapabilityType::MapPhysical) {
120 i++;
121
122 // The MapPhysical type uses two descriptor flags for its parameters.
123 // If there's only one, then there's a problem.
124 if (i >= num_capabilities) {
125 LOG_ERROR(Kernel, "Invalid combination! i={}", i);
126 return ResultInvalidCombination;
127 }
128
129 const auto size_flags = capabilities[i];
130 if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
131 LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
132 return ResultInvalidCombination;
133 }
134
135 const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
136 if (result.IsError()) {
137 LOG_ERROR(Kernel, "Failed to map physical flags! descriptor={}, size_flags={}",
138 descriptor, size_flags);
139 return result;
140 }
141 } else {
142 const auto result =
143 ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table);
144 if (result.IsError()) {
145 LOG_ERROR(
146 Kernel,
147 "Failed to parse capability flag! set_flags={}, set_svc_bits={}, descriptor={}",
148 set_flags, set_svc_bits, descriptor);
149 return result;
150 }
151 }
152 }
153
154 return ResultSuccess;
155}
156
157Result ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
158 KPageTable& page_table) {
159 const auto type = GetCapabilityType(flag);
160
161 if (type == CapabilityType::Unset) {
162 return ResultInvalidArgument;
163 }
164
165 // Bail early on ignorable entries, as one would expect,
166 // ignorable descriptors can be ignored.
167 if (type == CapabilityType::Ignorable) {
168 return ResultSuccess;
169 }
170
171 // Ensure that the give flag hasn't already been initialized before.
172 // If it has been, then bail.
173 const u32 flag_length = GetFlagBitOffset(type);
174 const u32 set_flag = 1U << flag_length;
175 if ((set_flag & set_flags & InitializeOnceMask) != 0) {
176 LOG_ERROR(Kernel,
177 "Attempted to initialize flags that may only be initialized once. set_flags={}",
178 set_flags);
179 return ResultInvalidCombination;
180 }
181 set_flags |= set_flag;
182
183 switch (type) {
184 case CapabilityType::PriorityAndCoreNum:
185 return HandlePriorityCoreNumFlags(flag);
186 case CapabilityType::Syscall:
187 return HandleSyscallFlags(set_svc_bits, flag);
188 case CapabilityType::MapIO:
189 return HandleMapIOFlags(flag, page_table);
190 case CapabilityType::MapRegion:
191 return HandleMapRegionFlags(flag, page_table);
192 case CapabilityType::Interrupt:
193 return HandleInterruptFlags(flag);
194 case CapabilityType::ProgramType:
195 return HandleProgramTypeFlags(flag);
196 case CapabilityType::KernelVersion:
197 return HandleKernelVersionFlags(flag);
198 case CapabilityType::HandleTableSize:
199 return HandleHandleTableFlags(flag);
200 case CapabilityType::Debug:
201 return HandleDebugFlags(flag);
202 default:
203 break;
204 }
205
206 LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
207 return ResultInvalidArgument;
208}
209
210void ProcessCapabilities::Clear() {
211 svc_capabilities.reset();
212 interrupt_capabilities.reset();
213
214 core_mask = 0;
215 priority_mask = 0;
216
217 handle_table_size = 0;
218 kernel_version = 0;
219
220 program_type = ProgramType::SysModule;
221
222 is_debuggable = false;
223 can_force_debug = false;
224}
225
226Result ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
227 if (priority_mask != 0 || core_mask != 0) {
228 LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
229 priority_mask, core_mask);
230 return ResultInvalidArgument;
231 }
232
233 const u32 core_num_min = (flags >> 16) & 0xFF;
234 const u32 core_num_max = (flags >> 24) & 0xFF;
235 if (core_num_min > core_num_max) {
236 LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
237 core_num_min, core_num_max);
238 return ResultInvalidCombination;
239 }
240
241 const u32 priority_min = (flags >> 10) & 0x3F;
242 const u32 priority_max = (flags >> 4) & 0x3F;
243 if (priority_min > priority_max) {
244 LOG_ERROR(Kernel,
245 "Priority min is greater than priority max! priority_min={}, priority_max={}",
246 core_num_min, priority_max);
247 return ResultInvalidCombination;
248 }
249
250 // The switch only has 4 usable cores.
251 if (core_num_max >= 4) {
252 LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
253 return ResultInvalidCoreId;
254 }
255
256 const auto make_mask = [](u64 min, u64 max) {
257 const u64 range = max - min + 1;
258 const u64 mask = (1ULL << range) - 1;
259
260 return mask << min;
261 };
262
263 core_mask = make_mask(core_num_min, core_num_max);
264 priority_mask = make_mask(priority_min, priority_max);
265 return ResultSuccess;
266}
267
268Result ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
269 const u32 index = flags >> 29;
270 const u32 svc_bit = 1U << index;
271
272 // If we've already set this svc before, bail.
273 if ((set_svc_bits & svc_bit) != 0) {
274 return ResultInvalidCombination;
275 }
276 set_svc_bits |= svc_bit;
277
278 const u32 svc_mask = (flags >> 5) & 0xFFFFFF;
279 for (u32 i = 0; i < 24; ++i) {
280 const u32 svc_number = index * 24 + i;
281
282 if ((svc_mask & (1U << i)) == 0) {
283 continue;
284 }
285
286 svc_capabilities[svc_number] = true;
287 }
288
289 return ResultSuccess;
290}
291
292Result ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
293 KPageTable& page_table) {
294 // TODO(Lioncache): Implement once the memory manager can handle this.
295 return ResultSuccess;
296}
297
298Result ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
299 // TODO(Lioncache): Implement once the memory manager can handle this.
300 return ResultSuccess;
301}
302
303Result ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
304 // TODO(Lioncache): Implement once the memory manager can handle this.
305 return ResultSuccess;
306}
307
308Result ProcessCapabilities::HandleInterruptFlags(u32 flags) {
309 constexpr u32 interrupt_ignore_value = 0x3FF;
310 const u32 interrupt0 = (flags >> 12) & 0x3FF;
311 const u32 interrupt1 = (flags >> 22) & 0x3FF;
312
313 for (u32 interrupt : {interrupt0, interrupt1}) {
314 if (interrupt == interrupt_ignore_value) {
315 continue;
316 }
317
318 // NOTE:
319 // This should be checking a generic interrupt controller value
320 // as part of the calculation, however, given we don't currently
321 // emulate that, it's sufficient to mark every interrupt as defined.
322
323 if (interrupt >= interrupt_capabilities.size()) {
324 LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
325 interrupt);
326 return ResultOutOfRange;
327 }
328
329 interrupt_capabilities[interrupt] = true;
330 }
331
332 return ResultSuccess;
333}
334
335Result ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
336 const u32 reserved = flags >> 17;
337 if (reserved != 0) {
338 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
339 return ResultReservedUsed;
340 }
341
342 program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
343 return ResultSuccess;
344}
345
346Result ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
347 // Yes, the internal member variable is checked in the actual kernel here.
348 // This might look odd for options that are only allowed to be initialized
349 // just once, however the kernel has a separate initialization function for
350 // kernel processes and userland processes. The kernel variant sets this
351 // member variable ahead of time.
352
353 const u32 major_version = kernel_version >> 19;
354
355 if (major_version != 0 || flags < 0x80000) {
356 LOG_ERROR(Kernel,
357 "Kernel version is non zero or flags are too small! major_version={}, flags={}",
358 major_version, flags);
359 return ResultInvalidArgument;
360 }
361
362 kernel_version = flags;
363 return ResultSuccess;
364}
365
366Result ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
367 const u32 reserved = flags >> 26;
368 if (reserved != 0) {
369 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
370 return ResultReservedUsed;
371 }
372
373 handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
374 return ResultSuccess;
375}
376
377Result ProcessCapabilities::HandleDebugFlags(u32 flags) {
378 const u32 reserved = flags >> 19;
379 if (reserved != 0) {
380 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
381 return ResultReservedUsed;
382 }
383
384 is_debuggable = (flags & 0x20000) != 0;
385 can_force_debug = (flags & 0x40000) != 0;
386 return ResultSuccess;
387}
388
389} // namespace Kernel
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h
deleted file mode 100644
index ff05dc5ff..000000000
--- a/src/core/hle/kernel/process_capability.h
+++ /dev/null
@@ -1,266 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <bitset>
7
8#include "common/common_types.h"
9
10union Result;
11
12namespace Kernel {
13
14class KPageTable;
15
16/// The possible types of programs that may be indicated
17/// by the program type capability descriptor.
18enum class ProgramType {
19 SysModule,
20 Application,
21 Applet,
22};
23
24/// Handles kernel capability descriptors that are provided by
25/// application metadata. These descriptors provide information
26/// that alters certain parameters for kernel process instance
27/// that will run said application (or applet).
28///
29/// Capabilities are a sequence of flag descriptors, that indicate various
30/// configurations and constraints for a particular process.
31///
32/// Flag types are indicated by a sequence of set low bits. E.g. the
33/// types are indicated with the low bits as follows (where x indicates "don't care"):
34///
35/// - Priority and core mask : 0bxxxxxxxxxxxx0111
36/// - Allowed service call mask: 0bxxxxxxxxxxx01111
37/// - Map physical memory : 0bxxxxxxxxx0111111
38/// - Map IO memory : 0bxxxxxxxx01111111
39/// - Interrupts : 0bxxxx011111111111
40/// - Application type : 0bxx01111111111111
41/// - Kernel version : 0bx011111111111111
42/// - Handle table size : 0b0111111111111111
43/// - Debugger flags : 0b1111111111111111
44///
45/// These are essentially a bit offset subtracted by 1 to create a mask.
46/// e.g. The first entry in the above list is simply bit 3 (value 8 -> 0b1000)
47/// subtracted by one (7 -> 0b0111)
48///
49/// An example of a bit layout (using the map physical layout):
50/// <example>
51/// The MapPhysical type indicates a sequence entry pair of:
52///
53/// [initial, memory_flags], where:
54///
55/// initial:
56/// bits:
57/// 7-24: Starting page to map memory at.
58/// 25 : Indicates if the memory should be mapped as read only.
59///
60/// memory_flags:
61/// bits:
62/// 7-20 : Number of pages to map
63/// 21-25: Seems to be reserved (still checked against though)
64/// 26 : Whether or not the memory being mapped is IO memory, or physical memory
65/// </example>
66///
67class ProcessCapabilities {
68public:
69 using InterruptCapabilities = std::bitset<1024>;
70 using SyscallCapabilities = std::bitset<192>;
71
72 ProcessCapabilities() = default;
73 ProcessCapabilities(const ProcessCapabilities&) = delete;
74 ProcessCapabilities(ProcessCapabilities&&) = default;
75
76 ProcessCapabilities& operator=(const ProcessCapabilities&) = delete;
77 ProcessCapabilities& operator=(ProcessCapabilities&&) = default;
78
79 /// Initializes this process capabilities instance for a kernel process.
80 ///
81 /// @param capabilities The capabilities to parse
82 /// @param num_capabilities The number of capabilities to parse.
83 /// @param page_table The memory manager to use for handling any mapping-related
84 /// operations (such as mapping IO memory, etc).
85 ///
86 /// @returns ResultSuccess if this capabilities instance was able to be initialized,
87 /// otherwise, an error code upon failure.
88 ///
89 Result InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
90 KPageTable& page_table);
91
92 /// Initializes this process capabilities instance for a userland process.
93 ///
94 /// @param capabilities The capabilities to parse.
95 /// @param num_capabilities The total number of capabilities to parse.
96 /// @param page_table The memory manager to use for handling any mapping-related
97 /// operations (such as mapping IO memory, etc).
98 ///
99 /// @returns ResultSuccess if this capabilities instance was able to be initialized,
100 /// otherwise, an error code upon failure.
101 ///
102 Result InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
103 KPageTable& page_table);
104
105 /// Initializes this process capabilities instance for a process that does not
106 /// have any metadata to parse.
107 ///
108 /// This is necessary, as we allow running raw executables, and the internal
109 /// kernel process capabilities also determine what CPU cores the process is
110 /// allowed to run on, and what priorities are allowed for threads. It also
111 /// determines the max handle table size, what the program type is, whether or
112 /// not the process can be debugged, or whether it's possible for a process to
113 /// forcibly debug another process.
114 ///
115 /// Given the above, this essentially enables all capabilities across the board
116 /// for the process. It allows the process to:
117 ///
118 /// - Run on any core
119 /// - Use any thread priority
120 /// - Use the maximum amount of handles a process is allowed to.
121 /// - Be debuggable
122 /// - Forcibly debug other processes.
123 ///
124 /// Note that this is not a behavior that the kernel allows a process to do via
125 /// a single function like this. This is yuzu-specific behavior to handle
126 /// executables with no capability descriptors whatsoever to derive behavior from.
127 /// It being yuzu-specific is why this is also not the default behavior and not
128 /// done by default in the constructor.
129 ///
130 void InitializeForMetadatalessProcess();
131
132 /// Gets the allowable core mask
133 u64 GetCoreMask() const {
134 return core_mask;
135 }
136
137 /// Gets the allowable priority mask
138 u64 GetPriorityMask() const {
139 return priority_mask;
140 }
141
142 /// Gets the SVC access permission bits
143 const SyscallCapabilities& GetServiceCapabilities() const {
144 return svc_capabilities;
145 }
146
147 /// Gets the valid interrupt bits.
148 const InterruptCapabilities& GetInterruptCapabilities() const {
149 return interrupt_capabilities;
150 }
151
152 /// Gets the program type for this process.
153 ProgramType GetProgramType() const {
154 return program_type;
155 }
156
157 /// Gets the number of total allowable handles for the process' handle table.
158 s32 GetHandleTableSize() const {
159 return handle_table_size;
160 }
161
162 /// Gets the kernel version value.
163 u32 GetKernelVersion() const {
164 return kernel_version;
165 }
166
167 /// Whether or not this process can be debugged.
168 bool IsDebuggable() const {
169 return is_debuggable;
170 }
171
172 /// Whether or not this process can forcibly debug another
173 /// process, even if that process is not considered debuggable.
174 bool CanForceDebug() const {
175 return can_force_debug;
176 }
177
178private:
179 /// Attempts to parse a given sequence of capability descriptors.
180 ///
181 /// @param capabilities The sequence of capability descriptors to parse.
182 /// @param num_capabilities The number of descriptors within the given sequence.
183 /// @param page_table The memory manager that will perform any memory
184 /// mapping if necessary.
185 ///
186 /// @return ResultSuccess if no errors occur, otherwise an error code.
187 ///
188 Result ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
189 KPageTable& page_table);
190
191 /// Attempts to parse a capability descriptor that is only represented by a
192 /// single flag set.
193 ///
194 /// @param set_flags Running set of flags that are used to catch
195 /// flags being initialized more than once when they shouldn't be.
196 /// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask.
197 /// @param flag The flag to attempt to parse.
198 /// @param page_table The memory manager that will perform any memory
199 /// mapping if necessary.
200 ///
201 /// @return ResultSuccess if no errors occurred, otherwise an error code.
202 ///
203 Result ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
204 KPageTable& page_table);
205
206 /// Clears the internal state of this process capability instance. Necessary,
207 /// to have a sane starting point due to us allowing running executables without
208 /// configuration metadata. We assume a process is not going to have metadata,
209 /// and if it turns out that the process does, in fact, have metadata, then
210 /// we attempt to parse it. Thus, we need this to reset data members back to
211 /// a good state.
212 ///
213 /// DO NOT ever make this a public member function. This isn't an invariant
214 /// anything external should depend upon (and if anything comes to rely on it,
215 /// you should immediately be questioning the design of that thing, not this
216 /// class. If the kernel itself can run without depending on behavior like that,
217 /// then so can yuzu).
218 ///
219 void Clear();
220
221 /// Handles flags related to the priority and core number capability flags.
222 Result HandlePriorityCoreNumFlags(u32 flags);
223
224 /// Handles flags related to determining the allowable SVC mask.
225 Result HandleSyscallFlags(u32& set_svc_bits, u32 flags);
226
227 /// Handles flags related to mapping physical memory pages.
228 Result HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
229
230 /// Handles flags related to mapping IO pages.
231 Result HandleMapIOFlags(u32 flags, KPageTable& page_table);
232
233 /// Handles flags related to mapping physical memory regions.
234 Result HandleMapRegionFlags(u32 flags, KPageTable& page_table);
235
236 /// Handles flags related to the interrupt capability flags.
237 Result HandleInterruptFlags(u32 flags);
238
239 /// Handles flags related to the program type.
240 Result HandleProgramTypeFlags(u32 flags);
241
242 /// Handles flags related to the handle table size.
243 Result HandleHandleTableFlags(u32 flags);
244
245 /// Handles flags related to the kernel version capability flags.
246 Result HandleKernelVersionFlags(u32 flags);
247
248 /// Handles flags related to debug-specific capabilities.
249 Result HandleDebugFlags(u32 flags);
250
251 SyscallCapabilities svc_capabilities;
252 InterruptCapabilities interrupt_capabilities;
253
254 u64 core_mask = 0;
255 u64 priority_mask = 0;
256
257 s32 handle_table_size = 0;
258 u32 kernel_version = 0;
259
260 ProgramType program_type = ProgramType::SysModule;
261
262 bool is_debuggable = false;
263 bool can_force_debug = false;
264};
265
266} // namespace Kernel
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp
index 97f1210de..4ca62860d 100644
--- a/src/core/hle/kernel/svc/svc_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_memory.cpp
@@ -29,7 +29,8 @@ constexpr bool IsValidAddressRange(u64 address, u64 size) {
29// Helper function that performs the common sanity checks for svcMapMemory 29// Helper function that performs the common sanity checks for svcMapMemory
30// and svcUnmapMemory. This is doable, as both functions perform their sanitizing 30// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
31// in the same order. 31// in the same order.
32Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) { 32Result MapUnmapMemorySanityChecks(const KProcessPageTable& manager, u64 dst_addr, u64 src_addr,
33 u64 size) {
33 if (!Common::Is4KBAligned(dst_addr)) { 34 if (!Common::Is4KBAligned(dst_addr)) {
34 LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); 35 LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
35 R_THROW(ResultInvalidAddress); 36 R_THROW(ResultInvalidAddress);
@@ -123,7 +124,8 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask,
123 R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); 124 R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
124 125
125 // Set the memory attribute. 126 // Set the memory attribute.
126 R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr)); 127 R_RETURN(page_table.SetMemoryAttribute(address, size, static_cast<KMemoryAttribute>(mask),
128 static_cast<KMemoryAttribute>(attr)));
127} 129}
128 130
129/// Maps a memory range into a different range. 131/// Maps a memory range into a different range.
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp
index 99330d02a..793e9f8d0 100644
--- a/src/core/hle/kernel/svc/svc_physical_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp
@@ -16,7 +16,14 @@ Result SetHeapSize(Core::System& system, u64* out_address, u64 size) {
16 R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize); 16 R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
17 17
18 // Set the heap size. 18 // Set the heap size.
19 R_RETURN(GetCurrentProcess(system.Kernel()).GetPageTable().SetHeapSize(out_address, size)); 19 KProcessAddress address{};
20 R_TRY(GetCurrentProcess(system.Kernel())
21 .GetPageTable()
22 .SetHeapSize(std::addressof(address), size));
23
24 // We succeeded.
25 *out_address = GetInteger(address);
26 R_SUCCEED();
20} 27}
21 28
22/// Maps memory at a desired address 29/// Maps memory at a desired address
diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp
index 07cd48175..e1427947b 100644
--- a/src/core/hle/kernel/svc/svc_process_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_process_memory.cpp
@@ -247,8 +247,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d
247 R_THROW(ResultInvalidCurrentMemory); 247 R_THROW(ResultInvalidCurrentMemory);
248 } 248 }
249 249
250 R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size, 250 R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size));
251 KPageTable::ICacheInvalidationStrategy::InvalidateAll));
252} 251}
253 252
254Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address, 253Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address,
diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp
index 51af06e97..816dcb8d0 100644
--- a/src/core/hle/kernel/svc/svc_query_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_query_memory.cpp
@@ -31,12 +31,12 @@ Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageIn
31 } 31 }
32 32
33 auto& current_memory{GetCurrentMemory(system.Kernel())}; 33 auto& current_memory{GetCurrentMemory(system.Kernel())};
34 const auto memory_info{process->GetPageTable().QueryInfo(address).GetSvcMemoryInfo()};
35 34
36 current_memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info)); 35 KMemoryInfo mem_info;
36 R_TRY(process->GetPageTable().QueryInfo(std::addressof(mem_info), out_page_info, address));
37 37
38 //! This is supposed to be part of the QueryInfo call. 38 const auto svc_mem_info = mem_info.GetSvcMemoryInfo();
39 *out_page_info = {}; 39 current_memory.WriteBlock(out_memory_info, std::addressof(svc_mem_info), sizeof(svc_mem_info));
40 40
41 R_SUCCEED(); 41 R_SUCCEED();
42} 42}
diff --git a/src/core/hle/result.h b/src/core/hle/result.h
index dd0b27f47..749f51f69 100644
--- a/src/core/hle/result.h
+++ b/src/core/hle/result.h
@@ -407,3 +407,34 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
407 407
408/// Evaluates a boolean expression, and succeeds if that expression is true. 408/// Evaluates a boolean expression, and succeeds if that expression is true.
409#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess) 409#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess)
410
411#define R_TRY_CATCH(res_expr) \
412 { \
413 const auto R_CURRENT_RESULT = (res_expr); \
414 if (R_FAILED(R_CURRENT_RESULT)) { \
415 if (false)
416
417#define R_END_TRY_CATCH \
418 else if (R_FAILED(R_CURRENT_RESULT)) { \
419 R_THROW(R_CURRENT_RESULT); \
420 } \
421 } \
422 }
423
424#define R_CATCH_ALL() \
425 } \
426 else if (R_FAILED(R_CURRENT_RESULT)) { \
427 if (true)
428
429#define R_CATCH(res_expr) \
430 } \
431 else if ((res_expr) == (R_CURRENT_RESULT)) { \
432 if (true)
433
434#define R_CONVERT(catch_type, convert_type) \
435 R_CATCH(catch_type) { R_THROW(static_cast<Result>(convert_type)); }
436
437#define R_CONVERT_ALL(convert_type) \
438 R_CATCH_ALL() { R_THROW(static_cast<Result>(convert_type)); }
439
440#define R_ASSERT(res_expr) ASSERT(R_SUCCEEDED(res_expr))
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index c73035c77..97b6a9385 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -286,9 +286,14 @@ public:
286 rb.Push(ResultSuccess); 286 rb.Push(ResultSuccess);
287 } 287 }
288 288
289 bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const { 289 bool ValidateRegionForMap(Kernel::KProcessPageTable& page_table, VAddr start,
290 std::size_t size) const {
290 const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; 291 const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
291 const auto start_info{page_table.QueryInfo(start - 1)}; 292
293 Kernel::KMemoryInfo start_info;
294 Kernel::Svc::PageInfo page_info;
295 R_ASSERT(
296 page_table.QueryInfo(std::addressof(start_info), std::addressof(page_info), start - 1));
292 297
293 if (start_info.GetState() != Kernel::KMemoryState::Free) { 298 if (start_info.GetState() != Kernel::KMemoryState::Free) {
294 return {}; 299 return {};
@@ -298,7 +303,9 @@ public:
298 return {}; 303 return {};
299 } 304 }
300 305
301 const auto end_info{page_table.QueryInfo(start + size)}; 306 Kernel::KMemoryInfo end_info;
307 R_ASSERT(page_table.QueryInfo(std::addressof(end_info), std::addressof(page_info),
308 start + size));
302 309
303 if (end_info.GetState() != Kernel::KMemoryState::Free) { 310 if (end_info.GetState() != Kernel::KMemoryState::Free) {
304 return {}; 311 return {};
@@ -307,7 +314,7 @@ public:
307 return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize()); 314 return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
308 } 315 }
309 316
310 Result GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) { 317 Result GetAvailableMapRegion(Kernel::KProcessPageTable& page_table, u64 size, VAddr& out_addr) {
311 size = Common::AlignUp(size, Kernel::PageSize); 318 size = Common::AlignUp(size, Kernel::PageSize);
312 size += page_table.GetNumGuardPages() * Kernel::PageSize * 4; 319 size += page_table.GetNumGuardPages() * Kernel::PageSize * 4;
313 320
@@ -391,12 +398,8 @@ public:
391 398
392 if (bss_size) { 399 if (bss_size) {
393 auto block_guard = detail::ScopeExit([&] { 400 auto block_guard = detail::ScopeExit([&] {
394 page_table.UnmapCodeMemory( 401 page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size);
395 addr + nro_size, bss_addr, bss_size, 402 page_table.UnmapCodeMemory(addr, nro_addr, nro_size);
396 Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange);
397 page_table.UnmapCodeMemory(
398 addr, nro_addr, nro_size,
399 Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange);
400 }); 403 });
401 404
402 const Result result{page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)}; 405 const Result result{page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)};
@@ -578,21 +581,17 @@ public:
578 auto& page_table{system.ApplicationProcess()->GetPageTable()}; 581 auto& page_table{system.ApplicationProcess()->GetPageTable()};
579 582
580 if (info.bss_size != 0) { 583 if (info.bss_size != 0) {
581 R_TRY(page_table.UnmapCodeMemory( 584 R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size +
582 info.nro_address + info.text_size + info.ro_size + info.data_size, info.bss_address, 585 info.data_size,
583 info.bss_size, Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); 586 info.bss_address, info.bss_size));
584 } 587 }
585 588
586 R_TRY(page_table.UnmapCodeMemory( 589 R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size,
587 info.nro_address + info.text_size + info.ro_size, 590 info.src_addr + info.text_size + info.ro_size,
588 info.src_addr + info.text_size + info.ro_size, info.data_size, 591 info.data_size));
589 Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); 592 R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size,
590 R_TRY(page_table.UnmapCodeMemory( 593 info.src_addr + info.text_size, info.ro_size));
591 info.nro_address + info.text_size, info.src_addr + info.text_size, info.ro_size, 594 R_TRY(page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size));
592 Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
593 R_TRY(page_table.UnmapCodeMemory(
594 info.nro_address, info.src_addr, info.text_size,
595 Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
596 return ResultSuccess; 595 return ResultSuccess;
597 } 596 }
598 597
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index fa5273402..84b60a928 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -41,7 +41,7 @@ struct Memory::Impl {
41 explicit Impl(Core::System& system_) : system{system_} {} 41 explicit Impl(Core::System& system_) : system{system_} {}
42 42
43 void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) { 43 void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
44 current_page_table = &process.GetPageTable().PageTableImpl(); 44 current_page_table = &process.GetPageTable().GetImpl();
45 current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer(); 45 current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
46 46
47 const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth(); 47 const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth();
@@ -195,7 +195,7 @@ struct Memory::Impl {
195 195
196 bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped, 196 bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped,
197 auto on_memory, auto on_rasterizer, auto increment) { 197 auto on_memory, auto on_rasterizer, auto increment) {
198 const auto& page_table = system.ApplicationProcess()->GetPageTable().PageTableImpl(); 198 const auto& page_table = system.ApplicationProcess()->GetPageTable().GetImpl();
199 std::size_t remaining_size = size; 199 std::size_t remaining_size = size;
200 std::size_t page_index = addr >> YUZU_PAGEBITS; 200 std::size_t page_index = addr >> YUZU_PAGEBITS;
201 std::size_t page_offset = addr & YUZU_PAGEMASK; 201 std::size_t page_offset = addr & YUZU_PAGEMASK;
@@ -826,7 +826,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress b
826 826
827bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const { 827bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
828 const Kernel::KProcess& process = *system.ApplicationProcess(); 828 const Kernel::KProcess& process = *system.ApplicationProcess();
829 const auto& page_table = process.GetPageTable().PageTableImpl(); 829 const auto& page_table = process.GetPageTable().GetImpl();
830 const size_t page = vaddr >> YUZU_PAGEBITS; 830 const size_t page = vaddr >> YUZU_PAGEBITS;
831 if (page >= page_table.pointers.size()) { 831 if (page >= page_table.pointers.size()) {
832 return false; 832 return false;