diff options
| author | 2023-11-11 09:45:29 -0500 | |
|---|---|---|
| committer | 2023-11-11 09:45:29 -0500 | |
| commit | 40d4e9543b5792dfa761b44d4c6017d5692f77a3 (patch) | |
| tree | 019cb6ca42ee3b89b400bb624e933abf695e150c | |
| parent | Merge pull request #11981 from lucasreis1/patch (diff) | |
| parent | k_page_table: fix shutdown (diff) | |
| download | yuzu-40d4e9543b5792dfa761b44d4c6017d5692f77a3.tar.gz yuzu-40d4e9543b5792dfa761b44d4c6017d5692f77a3.tar.xz yuzu-40d4e9543b5792dfa761b44d4c6017d5692f77a3.zip | |
Merge pull request #11914 from liamwhite/newer-kpagetable
kernel: add KPageTableBase
31 files changed, 7202 insertions, 4879 deletions
diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp index 4b1690269..166dc3dce 100644 --- a/src/common/page_table.cpp +++ b/src/common/page_table.cpp | |||
| @@ -9,12 +9,12 @@ PageTable::PageTable() = default; | |||
| 9 | 9 | ||
| 10 | PageTable::~PageTable() noexcept = default; | 10 | PageTable::~PageTable() noexcept = default; |
| 11 | 11 | ||
| 12 | bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context, | 12 | bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, |
| 13 | u64 address) const { | 13 | Common::ProcessAddress address) const { |
| 14 | // Setup invalid defaults. | 14 | // Setup invalid defaults. |
| 15 | out_entry.phys_addr = 0; | 15 | out_entry->phys_addr = 0; |
| 16 | out_entry.block_size = page_size; | 16 | out_entry->block_size = page_size; |
| 17 | out_context.next_page = 0; | 17 | out_context->next_page = 0; |
| 18 | 18 | ||
| 19 | // Validate that we can read the actual entry. | 19 | // Validate that we can read the actual entry. |
| 20 | const auto page = address / page_size; | 20 | const auto page = address / page_size; |
| @@ -29,20 +29,20 @@ bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_ | |||
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | // Populate the results. | 31 | // Populate the results. |
| 32 | out_entry.phys_addr = phys_addr + address; | 32 | out_entry->phys_addr = phys_addr + GetInteger(address); |
| 33 | out_context.next_page = page + 1; | 33 | out_context->next_page = page + 1; |
| 34 | out_context.next_offset = address + page_size; | 34 | out_context->next_offset = GetInteger(address) + page_size; |
| 35 | 35 | ||
| 36 | return true; | 36 | return true; |
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const { | 39 | bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const { |
| 40 | // Setup invalid defaults. | 40 | // Setup invalid defaults. |
| 41 | out_entry.phys_addr = 0; | 41 | out_entry->phys_addr = 0; |
| 42 | out_entry.block_size = page_size; | 42 | out_entry->block_size = page_size; |
| 43 | 43 | ||
| 44 | // Validate that we can read the actual entry. | 44 | // Validate that we can read the actual entry. |
| 45 | const auto page = context.next_page; | 45 | const auto page = context->next_page; |
| 46 | if (page >= backing_addr.size()) { | 46 | if (page >= backing_addr.size()) { |
| 47 | return false; | 47 | return false; |
| 48 | } | 48 | } |
| @@ -54,9 +54,9 @@ bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& c | |||
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | // Populate the results. | 56 | // Populate the results. |
| 57 | out_entry.phys_addr = phys_addr + context.next_offset; | 57 | out_entry->phys_addr = phys_addr + context->next_offset; |
| 58 | context.next_page = page + 1; | 58 | context->next_page = page + 1; |
| 59 | context.next_offset += page_size; | 59 | context->next_offset += page_size; |
| 60 | 60 | ||
| 61 | return true; | 61 | return true; |
| 62 | } | 62 | } |
diff --git a/src/common/page_table.h b/src/common/page_table.h index e653d52ad..5340f7d86 100644 --- a/src/common/page_table.h +++ b/src/common/page_table.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <atomic> | 6 | #include <atomic> |
| 7 | 7 | ||
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "common/typed_address.h" | ||
| 9 | #include "common/virtual_buffer.h" | 10 | #include "common/virtual_buffer.h" |
| 10 | 11 | ||
| 11 | namespace Common { | 12 | namespace Common { |
| @@ -100,9 +101,9 @@ struct PageTable { | |||
| 100 | PageTable(PageTable&&) noexcept = default; | 101 | PageTable(PageTable&&) noexcept = default; |
| 101 | PageTable& operator=(PageTable&&) noexcept = default; | 102 | PageTable& operator=(PageTable&&) noexcept = default; |
| 102 | 103 | ||
| 103 | bool BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context, | 104 | bool BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, |
| 104 | u64 address) const; | 105 | Common::ProcessAddress address) const; |
| 105 | bool ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const; | 106 | bool ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const; |
| 106 | 107 | ||
| 107 | /** | 108 | /** |
| 108 | * Resizes the page table to be able to accommodate enough pages within | 109 | * Resizes the page table to be able to accommodate enough pages within |
| @@ -117,6 +118,16 @@ struct PageTable { | |||
| 117 | return current_address_space_width_in_bits; | 118 | return current_address_space_width_in_bits; |
| 118 | } | 119 | } |
| 119 | 120 | ||
| 121 | bool GetPhysicalAddress(Common::PhysicalAddress* out_phys_addr, | ||
| 122 | Common::ProcessAddress virt_addr) const { | ||
| 123 | if (virt_addr > (1ULL << this->GetAddressSpaceBits())) { | ||
| 124 | return false; | ||
| 125 | } | ||
| 126 | |||
| 127 | *out_phys_addr = backing_addr[virt_addr / page_size] + GetInteger(virt_addr); | ||
| 128 | return true; | ||
| 129 | } | ||
| 130 | |||
| 120 | /** | 131 | /** |
| 121 | * Vector of memory pointers backing each page. An entry can only be non-null if the | 132 | * Vector of memory pointers backing each page. An entry can only be non-null if the |
| 122 | * corresponding attribute element is of type `Memory`. | 133 | * corresponding attribute element is of type `Memory`. |
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index e4f499135..8be3bdd08 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -271,8 +271,9 @@ add_library(core STATIC | |||
| 271 | hle/kernel/k_page_heap.h | 271 | hle/kernel/k_page_heap.h |
| 272 | hle/kernel/k_page_group.cpp | 272 | hle/kernel/k_page_group.cpp |
| 273 | hle/kernel/k_page_group.h | 273 | hle/kernel/k_page_group.h |
| 274 | hle/kernel/k_page_table.cpp | ||
| 275 | hle/kernel/k_page_table.h | 274 | hle/kernel/k_page_table.h |
| 275 | hle/kernel/k_page_table_base.cpp | ||
| 276 | hle/kernel/k_page_table_base.h | ||
| 276 | hle/kernel/k_page_table_manager.h | 277 | hle/kernel/k_page_table_manager.h |
| 277 | hle/kernel/k_page_table_slab_heap.h | 278 | hle/kernel/k_page_table_slab_heap.h |
| 278 | hle/kernel/k_port.cpp | 279 | hle/kernel/k_port.cpp |
| @@ -280,6 +281,7 @@ add_library(core STATIC | |||
| 280 | hle/kernel/k_priority_queue.h | 281 | hle/kernel/k_priority_queue.h |
| 281 | hle/kernel/k_process.cpp | 282 | hle/kernel/k_process.cpp |
| 282 | hle/kernel/k_process.h | 283 | hle/kernel/k_process.h |
| 284 | hle/kernel/k_process_page_table.h | ||
| 283 | hle/kernel/k_readable_event.cpp | 285 | hle/kernel/k_readable_event.cpp |
| 284 | hle/kernel/k_readable_event.h | 286 | hle/kernel/k_readable_event.h |
| 285 | hle/kernel/k_resource_limit.cpp | 287 | hle/kernel/k_resource_limit.cpp |
| @@ -330,8 +332,6 @@ add_library(core STATIC | |||
| 330 | hle/kernel/physical_core.cpp | 332 | hle/kernel/physical_core.cpp |
| 331 | hle/kernel/physical_core.h | 333 | hle/kernel/physical_core.h |
| 332 | hle/kernel/physical_memory.h | 334 | hle/kernel/physical_memory.h |
| 333 | hle/kernel/process_capability.cpp | ||
| 334 | hle/kernel/process_capability.h | ||
| 335 | hle/kernel/slab_helpers.h | 335 | hle/kernel/slab_helpers.h |
| 336 | hle/kernel/svc.cpp | 336 | hle/kernel/svc.cpp |
| 337 | hle/kernel/svc.h | 337 | hle/kernel/svc.h |
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp index 6f5f5156b..e9bf57895 100644 --- a/src/core/debugger/gdbstub.cpp +++ b/src/core/debugger/gdbstub.cpp | |||
| @@ -727,29 +727,34 @@ static constexpr const char* GetMemoryPermissionString(const Kernel::Svc::Memory | |||
| 727 | } | 727 | } |
| 728 | } | 728 | } |
| 729 | 729 | ||
| 730 | static VAddr GetModuleEnd(Kernel::KPageTable& page_table, VAddr base) { | 730 | static VAddr GetModuleEnd(Kernel::KProcessPageTable& page_table, VAddr base) { |
| 731 | Kernel::Svc::MemoryInfo mem_info; | 731 | Kernel::KMemoryInfo mem_info; |
| 732 | Kernel::Svc::MemoryInfo svc_mem_info; | ||
| 733 | Kernel::Svc::PageInfo page_info; | ||
| 732 | VAddr cur_addr{base}; | 734 | VAddr cur_addr{base}; |
| 733 | 735 | ||
| 734 | // Expect: r-x Code (.text) | 736 | // Expect: r-x Code (.text) |
| 735 | mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); | 737 | R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr)); |
| 736 | cur_addr = mem_info.base_address + mem_info.size; | 738 | svc_mem_info = mem_info.GetSvcMemoryInfo(); |
| 737 | if (mem_info.state != Kernel::Svc::MemoryState::Code || | 739 | cur_addr = svc_mem_info.base_address + svc_mem_info.size; |
| 738 | mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) { | 740 | if (svc_mem_info.state != Kernel::Svc::MemoryState::Code || |
| 741 | svc_mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) { | ||
| 739 | return cur_addr - 1; | 742 | return cur_addr - 1; |
| 740 | } | 743 | } |
| 741 | 744 | ||
| 742 | // Expect: r-- Code (.rodata) | 745 | // Expect: r-- Code (.rodata) |
| 743 | mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); | 746 | R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr)); |
| 744 | cur_addr = mem_info.base_address + mem_info.size; | 747 | svc_mem_info = mem_info.GetSvcMemoryInfo(); |
| 745 | if (mem_info.state != Kernel::Svc::MemoryState::Code || | 748 | cur_addr = svc_mem_info.base_address + svc_mem_info.size; |
| 746 | mem_info.permission != Kernel::Svc::MemoryPermission::Read) { | 749 | if (svc_mem_info.state != Kernel::Svc::MemoryState::Code || |
| 750 | svc_mem_info.permission != Kernel::Svc::MemoryPermission::Read) { | ||
| 747 | return cur_addr - 1; | 751 | return cur_addr - 1; |
| 748 | } | 752 | } |
| 749 | 753 | ||
| 750 | // Expect: rw- CodeData (.data) | 754 | // Expect: rw- CodeData (.data) |
| 751 | mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); | 755 | R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr)); |
| 752 | cur_addr = mem_info.base_address + mem_info.size; | 756 | svc_mem_info = mem_info.GetSvcMemoryInfo(); |
| 757 | cur_addr = svc_mem_info.base_address + svc_mem_info.size; | ||
| 753 | return cur_addr - 1; | 758 | return cur_addr - 1; |
| 754 | } | 759 | } |
| 755 | 760 | ||
| @@ -767,7 +772,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { | |||
| 767 | 772 | ||
| 768 | if (command_str == "get fastmem") { | 773 | if (command_str == "get fastmem") { |
| 769 | if (Settings::IsFastmemEnabled()) { | 774 | if (Settings::IsFastmemEnabled()) { |
| 770 | const auto& impl = page_table.PageTableImpl(); | 775 | const auto& impl = page_table.GetImpl(); |
| 771 | const auto region = reinterpret_cast<uintptr_t>(impl.fastmem_arena); | 776 | const auto region = reinterpret_cast<uintptr_t>(impl.fastmem_arena); |
| 772 | const auto region_bits = impl.current_address_space_width_in_bits; | 777 | const auto region_bits = impl.current_address_space_width_in_bits; |
| 773 | const auto region_size = 1ULL << region_bits; | 778 | const auto region_size = 1ULL << region_bits; |
| @@ -785,20 +790,22 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { | |||
| 785 | reply = fmt::format("Process: {:#x} ({})\n" | 790 | reply = fmt::format("Process: {:#x} ({})\n" |
| 786 | "Program Id: {:#018x}\n", | 791 | "Program Id: {:#018x}\n", |
| 787 | process->GetProcessId(), process->GetName(), process->GetProgramId()); | 792 | process->GetProcessId(), process->GetName(), process->GetProgramId()); |
| 788 | reply += fmt::format("Layout:\n" | 793 | reply += fmt::format( |
| 789 | " Alias: {:#012x} - {:#012x}\n" | 794 | "Layout:\n" |
| 790 | " Heap: {:#012x} - {:#012x}\n" | 795 | " Alias: {:#012x} - {:#012x}\n" |
| 791 | " Aslr: {:#012x} - {:#012x}\n" | 796 | " Heap: {:#012x} - {:#012x}\n" |
| 792 | " Stack: {:#012x} - {:#012x}\n" | 797 | " Aslr: {:#012x} - {:#012x}\n" |
| 793 | "Modules:\n", | 798 | " Stack: {:#012x} - {:#012x}\n" |
| 794 | GetInteger(page_table.GetAliasRegionStart()), | 799 | "Modules:\n", |
| 795 | GetInteger(page_table.GetAliasRegionEnd()), | 800 | GetInteger(page_table.GetAliasRegionStart()), |
| 796 | GetInteger(page_table.GetHeapRegionStart()), | 801 | GetInteger(page_table.GetAliasRegionStart()) + page_table.GetAliasRegionSize() - 1, |
| 797 | GetInteger(page_table.GetHeapRegionEnd()), | 802 | GetInteger(page_table.GetHeapRegionStart()), |
| 798 | GetInteger(page_table.GetAliasCodeRegionStart()), | 803 | GetInteger(page_table.GetHeapRegionStart()) + page_table.GetHeapRegionSize() - 1, |
| 799 | GetInteger(page_table.GetAliasCodeRegionEnd()), | 804 | GetInteger(page_table.GetAliasCodeRegionStart()), |
| 800 | GetInteger(page_table.GetStackRegionStart()), | 805 | GetInteger(page_table.GetAliasCodeRegionStart()) + page_table.GetAliasCodeRegionSize() - |
| 801 | GetInteger(page_table.GetStackRegionEnd())); | 806 | 1, |
| 807 | GetInteger(page_table.GetStackRegionStart()), | ||
| 808 | GetInteger(page_table.GetStackRegionStart()) + page_table.GetStackRegionSize() - 1); | ||
| 802 | 809 | ||
| 803 | for (const auto& [vaddr, name] : modules) { | 810 | for (const auto& [vaddr, name] : modules) { |
| 804 | reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr, | 811 | reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr, |
| @@ -811,27 +818,34 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { | |||
| 811 | while (true) { | 818 | while (true) { |
| 812 | using MemoryAttribute = Kernel::Svc::MemoryAttribute; | 819 | using MemoryAttribute = Kernel::Svc::MemoryAttribute; |
| 813 | 820 | ||
| 814 | auto mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); | 821 | Kernel::KMemoryInfo mem_info{}; |
| 815 | 822 | Kernel::Svc::PageInfo page_info{}; | |
| 816 | if (mem_info.state != Kernel::Svc::MemoryState::Inaccessible || | 823 | R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), |
| 817 | mem_info.base_address + mem_info.size - 1 != std::numeric_limits<u64>::max()) { | 824 | cur_addr)); |
| 818 | const char* state = GetMemoryStateName(mem_info.state); | 825 | auto svc_mem_info = mem_info.GetSvcMemoryInfo(); |
| 819 | const char* perm = GetMemoryPermissionString(mem_info); | 826 | |
| 820 | 827 | if (svc_mem_info.state != Kernel::Svc::MemoryState::Inaccessible || | |
| 821 | const char l = True(mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-'; | 828 | svc_mem_info.base_address + svc_mem_info.size - 1 != |
| 822 | const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-'; | 829 | std::numeric_limits<u64>::max()) { |
| 823 | const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-'; | 830 | const char* state = GetMemoryStateName(svc_mem_info.state); |
| 824 | const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-'; | 831 | const char* perm = GetMemoryPermissionString(svc_mem_info); |
| 832 | |||
| 833 | const char l = True(svc_mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-'; | ||
| 834 | const char i = | ||
| 835 | True(svc_mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-'; | ||
| 836 | const char d = | ||
| 837 | True(svc_mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-'; | ||
| 838 | const char u = True(svc_mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-'; | ||
| 825 | const char p = | 839 | const char p = |
| 826 | True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-'; | 840 | True(svc_mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-'; |
| 827 | 841 | ||
| 828 | reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", | 842 | reply += fmt::format( |
| 829 | mem_info.base_address, | 843 | " {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", svc_mem_info.base_address, |
| 830 | mem_info.base_address + mem_info.size - 1, perm, state, l, i, | 844 | svc_mem_info.base_address + svc_mem_info.size - 1, perm, state, l, i, d, u, p, |
| 831 | d, u, p, mem_info.ipc_count, mem_info.device_count); | 845 | svc_mem_info.ipc_count, svc_mem_info.device_count); |
| 832 | } | 846 | } |
| 833 | 847 | ||
| 834 | const uintptr_t next_address = mem_info.base_address + mem_info.size; | 848 | const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size; |
| 835 | if (next_address <= cur_addr) { | 849 | if (next_address <= cur_addr) { |
| 836 | break; | 850 | break; |
| 837 | } | 851 | } |
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp index 59364efa1..37fa39a73 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp | |||
| @@ -222,7 +222,7 @@ Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* | |||
| 222 | }; | 222 | }; |
| 223 | 223 | ||
| 224 | // We succeeded. | 224 | // We succeeded. |
| 225 | *out = KPageTable::GetHeapVirtualAddress(kernel.MemoryLayout(), paddr); | 225 | *out = KPageTable::GetHeapVirtualAddress(kernel, paddr); |
| 226 | R_SUCCEED(); | 226 | R_SUCCEED(); |
| 227 | } | 227 | } |
| 228 | 228 | ||
| @@ -238,8 +238,17 @@ void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress addres | |||
| 238 | ASSERT(Common::IsAligned(size, alignment)); | 238 | ASSERT(Common::IsAligned(size, alignment)); |
| 239 | 239 | ||
| 240 | // Close the secure region's pages. | 240 | // Close the secure region's pages. |
| 241 | kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), address), | 241 | kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel, address), |
| 242 | size / PageSize); | 242 | size / PageSize); |
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | // Insecure Memory. | ||
| 246 | KResourceLimit* KSystemControl::GetInsecureMemoryResourceLimit(KernelCore& kernel) { | ||
| 247 | return kernel.GetSystemResourceLimit(); | ||
| 248 | } | ||
| 249 | |||
| 250 | u32 KSystemControl::GetInsecureMemoryPool() { | ||
| 251 | return static_cast<u32>(KMemoryManager::Pool::SystemNonSecure); | ||
| 252 | } | ||
| 253 | |||
| 245 | } // namespace Kernel::Board::Nintendo::Nx | 254 | } // namespace Kernel::Board::Nintendo::Nx |
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h index ff1feec70..60c5e58b7 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h | |||
| @@ -8,7 +8,8 @@ | |||
| 8 | 8 | ||
| 9 | namespace Kernel { | 9 | namespace Kernel { |
| 10 | class KernelCore; | 10 | class KernelCore; |
| 11 | } | 11 | class KResourceLimit; |
| 12 | } // namespace Kernel | ||
| 12 | 13 | ||
| 13 | namespace Kernel::Board::Nintendo::Nx { | 14 | namespace Kernel::Board::Nintendo::Nx { |
| 14 | 15 | ||
| @@ -40,6 +41,10 @@ public: | |||
| 40 | u32 pool); | 41 | u32 pool); |
| 41 | static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size, | 42 | static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size, |
| 42 | u32 pool); | 43 | u32 pool); |
| 44 | |||
| 45 | // Insecure Memory. | ||
| 46 | static KResourceLimit* GetInsecureMemoryResourceLimit(KernelCore& kernel); | ||
| 47 | static u32 GetInsecureMemoryPool(); | ||
| 43 | }; | 48 | }; |
| 44 | 49 | ||
| 45 | } // namespace Kernel::Board::Nintendo::Nx | 50 | } // namespace Kernel::Board::Nintendo::Nx |
diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp index e7da7a21d..fb890f978 100644 --- a/src/core/hle/kernel/k_capabilities.cpp +++ b/src/core/hle/kernel/k_capabilities.cpp | |||
| @@ -4,14 +4,15 @@ | |||
| 4 | #include "core/hardware_properties.h" | 4 | #include "core/hardware_properties.h" |
| 5 | #include "core/hle/kernel/k_capabilities.h" | 5 | #include "core/hle/kernel/k_capabilities.h" |
| 6 | #include "core/hle/kernel/k_memory_layout.h" | 6 | #include "core/hle/kernel/k_memory_layout.h" |
| 7 | #include "core/hle/kernel/k_page_table.h" | 7 | #include "core/hle/kernel/k_process_page_table.h" |
| 8 | #include "core/hle/kernel/kernel.h" | 8 | #include "core/hle/kernel/kernel.h" |
| 9 | #include "core/hle/kernel/svc_results.h" | 9 | #include "core/hle/kernel/svc_results.h" |
| 10 | #include "core/hle/kernel/svc_version.h" | 10 | #include "core/hle/kernel/svc_version.h" |
| 11 | 11 | ||
| 12 | namespace Kernel { | 12 | namespace Kernel { |
| 13 | 13 | ||
| 14 | Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) { | 14 | Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, |
| 15 | KProcessPageTable* page_table) { | ||
| 15 | // We're initializing an initial process. | 16 | // We're initializing an initial process. |
| 16 | m_svc_access_flags.reset(); | 17 | m_svc_access_flags.reset(); |
| 17 | m_irq_access_flags.reset(); | 18 | m_irq_access_flags.reset(); |
| @@ -41,7 +42,8 @@ Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTabl | |||
| 41 | R_RETURN(this->SetCapabilities(kern_caps, page_table)); | 42 | R_RETURN(this->SetCapabilities(kern_caps, page_table)); |
| 42 | } | 43 | } |
| 43 | 44 | ||
| 44 | Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table) { | 45 | Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, |
| 46 | KProcessPageTable* page_table) { | ||
| 45 | // We're initializing a user process. | 47 | // We're initializing a user process. |
| 46 | m_svc_access_flags.reset(); | 48 | m_svc_access_flags.reset(); |
| 47 | m_irq_access_flags.reset(); | 49 | m_irq_access_flags.reset(); |
| @@ -121,7 +123,7 @@ Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) { | |||
| 121 | R_SUCCEED(); | 123 | R_SUCCEED(); |
| 122 | } | 124 | } |
| 123 | 125 | ||
| 124 | Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) { | 126 | Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table) { |
| 125 | const auto range_pack = MapRange{cap}; | 127 | const auto range_pack = MapRange{cap}; |
| 126 | const auto size_pack = MapRangeSize{size_cap}; | 128 | const auto size_pack = MapRangeSize{size_cap}; |
| 127 | 129 | ||
| @@ -142,16 +144,13 @@ Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* p | |||
| 142 | ? KMemoryPermission::UserRead | 144 | ? KMemoryPermission::UserRead |
| 143 | : KMemoryPermission::UserReadWrite; | 145 | : KMemoryPermission::UserReadWrite; |
| 144 | if (MapRangeSize{size_cap}.normal) { | 146 | if (MapRangeSize{size_cap}.normal) { |
| 145 | // R_RETURN(page_table->MapStatic(phys_addr, size, perm)); | 147 | R_RETURN(page_table->MapStatic(phys_addr, size, perm)); |
| 146 | } else { | 148 | } else { |
| 147 | // R_RETURN(page_table->MapIo(phys_addr, size, perm)); | 149 | R_RETURN(page_table->MapIo(phys_addr, size, perm)); |
| 148 | } | 150 | } |
| 149 | |||
| 150 | UNIMPLEMENTED(); | ||
| 151 | R_SUCCEED(); | ||
| 152 | } | 151 | } |
| 153 | 152 | ||
| 154 | Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) { | 153 | Result KCapabilities::MapIoPage_(const u32 cap, KProcessPageTable* page_table) { |
| 155 | // Get/validate address/size | 154 | // Get/validate address/size |
| 156 | const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize; | 155 | const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize; |
| 157 | const size_t num_pages = 1; | 156 | const size_t num_pages = 1; |
| @@ -160,10 +159,7 @@ Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) { | |||
| 160 | R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress); | 159 | R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress); |
| 161 | 160 | ||
| 162 | // Do the mapping. | 161 | // Do the mapping. |
| 163 | // R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite)); | 162 | R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission::UserReadWrite)); |
| 164 | |||
| 165 | UNIMPLEMENTED(); | ||
| 166 | R_SUCCEED(); | ||
| 167 | } | 163 | } |
| 168 | 164 | ||
| 169 | template <typename F> | 165 | template <typename F> |
| @@ -200,13 +196,11 @@ Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) { | |||
| 200 | R_SUCCEED(); | 196 | R_SUCCEED(); |
| 201 | } | 197 | } |
| 202 | 198 | ||
| 203 | Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) { | 199 | Result KCapabilities::MapRegion_(const u32 cap, KProcessPageTable* page_table) { |
| 204 | // Map each region into the process's page table. | 200 | // Map each region into the process's page table. |
| 205 | return ProcessMapRegionCapability( | 201 | return ProcessMapRegionCapability( |
| 206 | cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result { | 202 | cap, [page_table](KMemoryRegionType region_type, KMemoryPermission perm) -> Result { |
| 207 | // R_RETURN(page_table->MapRegion(region_type, perm)); | 203 | R_RETURN(page_table->MapRegion(region_type, perm)); |
| 208 | UNIMPLEMENTED(); | ||
| 209 | R_SUCCEED(); | ||
| 210 | }); | 204 | }); |
| 211 | } | 205 | } |
| 212 | 206 | ||
| @@ -280,7 +274,7 @@ Result KCapabilities::SetDebugFlagsCapability(const u32 cap) { | |||
| 280 | } | 274 | } |
| 281 | 275 | ||
| 282 | Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc, | 276 | Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc, |
| 283 | KPageTable* page_table) { | 277 | KProcessPageTable* page_table) { |
| 284 | // Validate this is a capability we can act on. | 278 | // Validate this is a capability we can act on. |
| 285 | const auto type = GetCapabilityType(cap); | 279 | const auto type = GetCapabilityType(cap); |
| 286 | R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument); | 280 | R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument); |
| @@ -318,7 +312,7 @@ Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc, | |||
| 318 | } | 312 | } |
| 319 | } | 313 | } |
| 320 | 314 | ||
| 321 | Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* page_table) { | 315 | Result KCapabilities::SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table) { |
| 322 | u32 set_flags = 0, set_svc = 0; | 316 | u32 set_flags = 0, set_svc = 0; |
| 323 | 317 | ||
| 324 | for (size_t i = 0; i < caps.size(); i++) { | 318 | for (size_t i = 0; i < caps.size(); i++) { |
diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h index ebd4eedb1..013d952ad 100644 --- a/src/core/hle/kernel/k_capabilities.h +++ b/src/core/hle/kernel/k_capabilities.h | |||
| @@ -15,15 +15,15 @@ | |||
| 15 | 15 | ||
| 16 | namespace Kernel { | 16 | namespace Kernel { |
| 17 | 17 | ||
| 18 | class KPageTable; | 18 | class KProcessPageTable; |
| 19 | class KernelCore; | 19 | class KernelCore; |
| 20 | 20 | ||
| 21 | class KCapabilities { | 21 | class KCapabilities { |
| 22 | public: | 22 | public: |
| 23 | constexpr explicit KCapabilities() = default; | 23 | constexpr explicit KCapabilities() = default; |
| 24 | 24 | ||
| 25 | Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table); | 25 | Result InitializeForKip(std::span<const u32> kern_caps, KProcessPageTable* page_table); |
| 26 | Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table); | 26 | Result InitializeForUser(std::span<const u32> user_caps, KProcessPageTable* page_table); |
| 27 | 27 | ||
| 28 | static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps); | 28 | static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps); |
| 29 | 29 | ||
| @@ -264,9 +264,9 @@ private: | |||
| 264 | 264 | ||
| 265 | Result SetCorePriorityCapability(const u32 cap); | 265 | Result SetCorePriorityCapability(const u32 cap); |
| 266 | Result SetSyscallMaskCapability(const u32 cap, u32& set_svc); | 266 | Result SetSyscallMaskCapability(const u32 cap, u32& set_svc); |
| 267 | Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table); | 267 | Result MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table); |
| 268 | Result MapIoPage_(const u32 cap, KPageTable* page_table); | 268 | Result MapIoPage_(const u32 cap, KProcessPageTable* page_table); |
| 269 | Result MapRegion_(const u32 cap, KPageTable* page_table); | 269 | Result MapRegion_(const u32 cap, KProcessPageTable* page_table); |
| 270 | Result SetInterruptPairCapability(const u32 cap); | 270 | Result SetInterruptPairCapability(const u32 cap); |
| 271 | Result SetProgramTypeCapability(const u32 cap); | 271 | Result SetProgramTypeCapability(const u32 cap); |
| 272 | Result SetKernelVersionCapability(const u32 cap); | 272 | Result SetKernelVersionCapability(const u32 cap); |
| @@ -277,8 +277,9 @@ private: | |||
| 277 | static Result ProcessMapRegionCapability(const u32 cap, F f); | 277 | static Result ProcessMapRegionCapability(const u32 cap, F f); |
| 278 | static Result CheckMapRegion(KernelCore& kernel, const u32 cap); | 278 | static Result CheckMapRegion(KernelCore& kernel, const u32 cap); |
| 279 | 279 | ||
| 280 | Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table); | 280 | Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, |
| 281 | Result SetCapabilities(std::span<const u32> caps, KPageTable* page_table); | 281 | KProcessPageTable* page_table); |
| 282 | Result SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table); | ||
| 282 | 283 | ||
| 283 | private: | 284 | private: |
| 284 | Svc::SvcAccessFlagSet m_svc_access_flags{}; | 285 | Svc::SvcAccessFlagSet m_svc_access_flags{}; |
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp index f48896715..f0703f795 100644 --- a/src/core/hle/kernel/k_device_address_space.cpp +++ b/src/core/hle/kernel/k_device_address_space.cpp | |||
| @@ -54,7 +54,7 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) { | |||
| 54 | R_SUCCEED(); | 54 | R_SUCCEED(); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address, | 57 | Result KDeviceAddressSpace::Map(KProcessPageTable* page_table, KProcessAddress process_address, |
| 58 | size_t size, u64 device_address, u32 option, bool is_aligned) { | 58 | size_t size, u64 device_address, u32 option, bool is_aligned) { |
| 59 | // Check that the address falls within the space. | 59 | // Check that the address falls within the space. |
| 60 | R_UNLESS((m_space_address <= device_address && | 60 | R_UNLESS((m_space_address <= device_address && |
| @@ -113,7 +113,7 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_ | |||
| 113 | R_SUCCEED(); | 113 | R_SUCCEED(); |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address, | 116 | Result KDeviceAddressSpace::Unmap(KProcessPageTable* page_table, KProcessAddress process_address, |
| 117 | size_t size, u64 device_address) { | 117 | size_t size, u64 device_address) { |
| 118 | // Check that the address falls within the space. | 118 | // Check that the address falls within the space. |
| 119 | R_UNLESS((m_space_address <= device_address && | 119 | R_UNLESS((m_space_address <= device_address && |
diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h index 18556e3cc..ff0ec8152 100644 --- a/src/core/hle/kernel/k_device_address_space.h +++ b/src/core/hle/kernel/k_device_address_space.h | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #include <string> | 6 | #include <string> |
| 7 | 7 | ||
| 8 | #include "core/hle/kernel/k_page_table.h" | 8 | #include "core/hle/kernel/k_process_page_table.h" |
| 9 | #include "core/hle/kernel/k_typed_address.h" | 9 | #include "core/hle/kernel/k_typed_address.h" |
| 10 | #include "core/hle/kernel/slab_helpers.h" | 10 | #include "core/hle/kernel/slab_helpers.h" |
| 11 | #include "core/hle/result.h" | 11 | #include "core/hle/result.h" |
| @@ -31,23 +31,23 @@ public: | |||
| 31 | Result Attach(Svc::DeviceName device_name); | 31 | Result Attach(Svc::DeviceName device_name); |
| 32 | Result Detach(Svc::DeviceName device_name); | 32 | Result Detach(Svc::DeviceName device_name); |
| 33 | 33 | ||
| 34 | Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size, | 34 | Result MapByForce(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, |
| 35 | u64 device_address, u32 option) { | 35 | u64 device_address, u32 option) { |
| 36 | R_RETURN(this->Map(page_table, process_address, size, device_address, option, false)); | 36 | R_RETURN(this->Map(page_table, process_address, size, device_address, option, false)); |
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size, | 39 | Result MapAligned(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, |
| 40 | u64 device_address, u32 option) { | 40 | u64 device_address, u32 option) { |
| 41 | R_RETURN(this->Map(page_table, process_address, size, device_address, option, true)); | 41 | R_RETURN(this->Map(page_table, process_address, size, device_address, option, true)); |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size, | 44 | Result Unmap(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, |
| 45 | u64 device_address); | 45 | u64 device_address); |
| 46 | 46 | ||
| 47 | static void Initialize(); | 47 | static void Initialize(); |
| 48 | 48 | ||
| 49 | private: | 49 | private: |
| 50 | Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size, | 50 | Result Map(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, |
| 51 | u64 device_address, u32 option, bool is_aligned); | 51 | u64 device_address, u32 option, bool is_aligned); |
| 52 | 52 | ||
| 53 | private: | 53 | private: |
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index c8122644f..d7adb3169 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h | |||
| @@ -394,6 +394,14 @@ private: | |||
| 394 | return region.GetEndAddress(); | 394 | return region.GetEndAddress(); |
| 395 | } | 395 | } |
| 396 | 396 | ||
| 397 | public: | ||
| 398 | static const KMemoryRegion* Find(const KMemoryLayout& layout, KVirtualAddress address) { | ||
| 399 | return Find(address, layout.GetVirtualMemoryRegionTree()); | ||
| 400 | } | ||
| 401 | static const KMemoryRegion* Find(const KMemoryLayout& layout, KPhysicalAddress address) { | ||
| 402 | return Find(address, layout.GetPhysicalMemoryRegionTree()); | ||
| 403 | } | ||
| 404 | |||
| 397 | private: | 405 | private: |
| 398 | u64 m_linear_phys_to_virt_diff{}; | 406 | u64 m_linear_phys_to_virt_diff{}; |
| 399 | u64 m_linear_virt_to_phys_diff{}; | 407 | u64 m_linear_virt_to_phys_diff{}; |
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index cdc5572d8..0a973ec8c 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp | |||
| @@ -456,8 +456,7 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size, | |||
| 456 | } | 456 | } |
| 457 | 457 | ||
| 458 | void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) { | 458 | void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) { |
| 459 | auto optimize_pa = | 459 | auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region); |
| 460 | KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region); | ||
| 461 | auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); | 460 | auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); |
| 462 | 461 | ||
| 463 | std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); | 462 | std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); |
| @@ -465,8 +464,7 @@ void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) { | |||
| 465 | 464 | ||
| 466 | void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, | 465 | void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, |
| 467 | size_t num_pages) { | 466 | size_t num_pages) { |
| 468 | auto optimize_pa = | 467 | auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region); |
| 469 | KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region); | ||
| 470 | auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); | 468 | auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); |
| 471 | 469 | ||
| 472 | // Get the range we're tracking. | 470 | // Get the range we're tracking. |
| @@ -485,8 +483,7 @@ void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysi | |||
| 485 | 483 | ||
| 486 | void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, | 484 | void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, |
| 487 | size_t num_pages) { | 485 | size_t num_pages) { |
| 488 | auto optimize_pa = | 486 | auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region); |
| 489 | KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region); | ||
| 490 | auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); | 487 | auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); |
| 491 | 488 | ||
| 492 | // Get the range we're tracking. | 489 | // Get the range we're tracking. |
| @@ -506,8 +503,7 @@ void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysica | |||
| 506 | bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, | 503 | bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, |
| 507 | size_t num_pages, u8 fill_pattern) { | 504 | size_t num_pages, u8 fill_pattern) { |
| 508 | auto& device_memory = kernel.System().DeviceMemory(); | 505 | auto& device_memory = kernel.System().DeviceMemory(); |
| 509 | auto optimize_pa = | 506 | auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region); |
| 510 | KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region); | ||
| 511 | auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa); | 507 | auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa); |
| 512 | 508 | ||
| 513 | // We want to return whether any pages were newly allocated. | 509 | // We want to return whether any pages were newly allocated. |
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp deleted file mode 100644 index 1d47bdf6b..000000000 --- a/src/core/hle/kernel/k_page_table.cpp +++ /dev/null | |||
| @@ -1,3519 +0,0 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "common/alignment.h" | ||
| 5 | #include "common/assert.h" | ||
| 6 | #include "common/literals.h" | ||
| 7 | #include "common/scope_exit.h" | ||
| 8 | #include "common/settings.h" | ||
| 9 | #include "core/core.h" | ||
| 10 | #include "core/hle/kernel/k_address_space_info.h" | ||
| 11 | #include "core/hle/kernel/k_memory_block.h" | ||
| 12 | #include "core/hle/kernel/k_memory_block_manager.h" | ||
| 13 | #include "core/hle/kernel/k_page_group.h" | ||
| 14 | #include "core/hle/kernel/k_page_table.h" | ||
| 15 | #include "core/hle/kernel/k_process.h" | ||
| 16 | #include "core/hle/kernel/k_resource_limit.h" | ||
| 17 | #include "core/hle/kernel/k_scoped_resource_reservation.h" | ||
| 18 | #include "core/hle/kernel/k_system_control.h" | ||
| 19 | #include "core/hle/kernel/k_system_resource.h" | ||
| 20 | #include "core/hle/kernel/kernel.h" | ||
| 21 | #include "core/hle/kernel/svc_results.h" | ||
| 22 | #include "core/memory.h" | ||
| 23 | |||
| 24 | namespace Kernel { | ||
| 25 | |||
| 26 | namespace { | ||
| 27 | |||
| 28 | class KScopedLightLockPair { | ||
| 29 | YUZU_NON_COPYABLE(KScopedLightLockPair); | ||
| 30 | YUZU_NON_MOVEABLE(KScopedLightLockPair); | ||
| 31 | |||
| 32 | private: | ||
| 33 | KLightLock* m_lower; | ||
| 34 | KLightLock* m_upper; | ||
| 35 | |||
| 36 | public: | ||
| 37 | KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) { | ||
| 38 | // Ensure our locks are in a consistent order. | ||
| 39 | if (std::addressof(lhs) <= std::addressof(rhs)) { | ||
| 40 | m_lower = std::addressof(lhs); | ||
| 41 | m_upper = std::addressof(rhs); | ||
| 42 | } else { | ||
| 43 | m_lower = std::addressof(rhs); | ||
| 44 | m_upper = std::addressof(lhs); | ||
| 45 | } | ||
| 46 | |||
| 47 | // Acquire both locks. | ||
| 48 | m_lower->Lock(); | ||
| 49 | if (m_lower != m_upper) { | ||
| 50 | m_upper->Lock(); | ||
| 51 | } | ||
| 52 | } | ||
| 53 | |||
| 54 | ~KScopedLightLockPair() { | ||
| 55 | // Unlock the upper lock. | ||
| 56 | if (m_upper != nullptr && m_upper != m_lower) { | ||
| 57 | m_upper->Unlock(); | ||
| 58 | } | ||
| 59 | |||
| 60 | // Unlock the lower lock. | ||
| 61 | if (m_lower != nullptr) { | ||
| 62 | m_lower->Unlock(); | ||
| 63 | } | ||
| 64 | } | ||
| 65 | |||
| 66 | public: | ||
| 67 | // Utility. | ||
| 68 | void TryUnlockHalf(KLightLock& lock) { | ||
| 69 | // Only allow unlocking if the lock is half the pair. | ||
| 70 | if (m_lower != m_upper) { | ||
| 71 | // We want to be sure the lock is one we own. | ||
| 72 | if (m_lower == std::addressof(lock)) { | ||
| 73 | lock.Unlock(); | ||
| 74 | m_lower = nullptr; | ||
| 75 | } else if (m_upper == std::addressof(lock)) { | ||
| 76 | lock.Unlock(); | ||
| 77 | m_upper = nullptr; | ||
| 78 | } | ||
| 79 | } | ||
| 80 | } | ||
| 81 | }; | ||
| 82 | |||
| 83 | using namespace Common::Literals; | ||
| 84 | |||
| 85 | constexpr size_t GetAddressSpaceWidthFromType(Svc::CreateProcessFlag as_type) { | ||
| 86 | switch (as_type) { | ||
| 87 | case Svc::CreateProcessFlag::AddressSpace32Bit: | ||
| 88 | case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias: | ||
| 89 | return 32; | ||
| 90 | case Svc::CreateProcessFlag::AddressSpace64BitDeprecated: | ||
| 91 | return 36; | ||
| 92 | case Svc::CreateProcessFlag::AddressSpace64Bit: | ||
| 93 | return 39; | ||
| 94 | default: | ||
| 95 | ASSERT(false); | ||
| 96 | return {}; | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | } // namespace | ||
| 101 | |||
| 102 | KPageTable::KPageTable(Core::System& system_) | ||
| 103 | : m_general_lock{system_.Kernel()}, | ||
| 104 | m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {} | ||
| 105 | |||
| 106 | KPageTable::~KPageTable() = default; | ||
| 107 | |||
| 108 | Result KPageTable::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr, | ||
| 109 | bool enable_das_merge, bool from_back, | ||
| 110 | KMemoryManager::Pool pool, KProcessAddress code_addr, | ||
| 111 | size_t code_size, KSystemResource* system_resource, | ||
| 112 | KResourceLimit* resource_limit, | ||
| 113 | Core::Memory::Memory& memory) { | ||
| 114 | |||
| 115 | const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { | ||
| 116 | return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); | ||
| 117 | }; | ||
| 118 | const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { | ||
| 119 | return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); | ||
| 120 | }; | ||
| 121 | |||
| 122 | // Set the tracking memory | ||
| 123 | m_memory = std::addressof(memory); | ||
| 124 | |||
| 125 | // Set our width and heap/alias sizes | ||
| 126 | m_address_space_width = GetAddressSpaceWidthFromType(as_type); | ||
| 127 | const KProcessAddress start = 0; | ||
| 128 | const KProcessAddress end{1ULL << m_address_space_width}; | ||
| 129 | size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; | ||
| 130 | size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; | ||
| 131 | |||
| 132 | ASSERT(code_addr < code_addr + code_size); | ||
| 133 | ASSERT(code_addr + code_size - 1 <= end - 1); | ||
| 134 | |||
| 135 | // Adjust heap/alias size if we don't have an alias region | ||
| 136 | if (as_type == Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) { | ||
| 137 | heap_region_size += alias_region_size; | ||
| 138 | alias_region_size = 0; | ||
| 139 | } | ||
| 140 | |||
| 141 | // Set code regions and determine remaining | ||
| 142 | constexpr size_t RegionAlignment{2_MiB}; | ||
| 143 | KProcessAddress process_code_start{}; | ||
| 144 | KProcessAddress process_code_end{}; | ||
| 145 | size_t stack_region_size{}; | ||
| 146 | size_t kernel_map_region_size{}; | ||
| 147 | |||
| 148 | if (m_address_space_width == 39) { | ||
| 149 | alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); | ||
| 150 | heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); | ||
| 151 | stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); | ||
| 152 | kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); | ||
| 153 | m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); | ||
| 154 | m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); | ||
| 155 | m_alias_code_region_start = m_code_region_start; | ||
| 156 | m_alias_code_region_end = m_code_region_end; | ||
| 157 | process_code_start = Common::AlignDown(GetInteger(code_addr), RegionAlignment); | ||
| 158 | process_code_end = Common::AlignUp(GetInteger(code_addr) + code_size, RegionAlignment); | ||
| 159 | } else { | ||
| 160 | stack_region_size = 0; | ||
| 161 | kernel_map_region_size = 0; | ||
| 162 | m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); | ||
| 163 | m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); | ||
| 164 | m_stack_region_start = m_code_region_start; | ||
| 165 | m_alias_code_region_start = m_code_region_start; | ||
| 166 | m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + | ||
| 167 | GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); | ||
| 168 | m_stack_region_end = m_code_region_end; | ||
| 169 | m_kernel_map_region_start = m_code_region_start; | ||
| 170 | m_kernel_map_region_end = m_code_region_end; | ||
| 171 | process_code_start = m_code_region_start; | ||
| 172 | process_code_end = m_code_region_end; | ||
| 173 | } | ||
| 174 | |||
| 175 | // Set other basic fields | ||
| 176 | m_enable_aslr = enable_aslr; | ||
| 177 | m_enable_device_address_space_merge = enable_das_merge; | ||
| 178 | m_address_space_start = start; | ||
| 179 | m_address_space_end = end; | ||
| 180 | m_is_kernel = false; | ||
| 181 | m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer(); | ||
| 182 | m_block_info_manager = system_resource->GetBlockInfoManagerPointer(); | ||
| 183 | m_resource_limit = resource_limit; | ||
| 184 | |||
| 185 | // Determine the region we can place our undetermineds in | ||
| 186 | KProcessAddress alloc_start{}; | ||
| 187 | size_t alloc_size{}; | ||
| 188 | if ((process_code_start - m_code_region_start) >= (end - process_code_end)) { | ||
| 189 | alloc_start = m_code_region_start; | ||
| 190 | alloc_size = process_code_start - m_code_region_start; | ||
| 191 | } else { | ||
| 192 | alloc_start = process_code_end; | ||
| 193 | alloc_size = end - process_code_end; | ||
| 194 | } | ||
| 195 | const size_t needed_size = | ||
| 196 | (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size); | ||
| 197 | R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory); | ||
| 198 | |||
| 199 | const size_t remaining_size{alloc_size - needed_size}; | ||
| 200 | |||
| 201 | // Determine random placements for each region | ||
| 202 | size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; | ||
| 203 | if (enable_aslr) { | ||
| 204 | alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 205 | RegionAlignment; | ||
| 206 | heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 207 | RegionAlignment; | ||
| 208 | stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 209 | RegionAlignment; | ||
| 210 | kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 211 | RegionAlignment; | ||
| 212 | } | ||
| 213 | |||
| 214 | // Setup heap and alias regions | ||
| 215 | m_alias_region_start = alloc_start + alias_rnd; | ||
| 216 | m_alias_region_end = m_alias_region_start + alias_region_size; | ||
| 217 | m_heap_region_start = alloc_start + heap_rnd; | ||
| 218 | m_heap_region_end = m_heap_region_start + heap_region_size; | ||
| 219 | |||
| 220 | if (alias_rnd <= heap_rnd) { | ||
| 221 | m_heap_region_start += alias_region_size; | ||
| 222 | m_heap_region_end += alias_region_size; | ||
| 223 | } else { | ||
| 224 | m_alias_region_start += heap_region_size; | ||
| 225 | m_alias_region_end += heap_region_size; | ||
| 226 | } | ||
| 227 | |||
| 228 | // Setup stack region | ||
| 229 | if (stack_region_size) { | ||
| 230 | m_stack_region_start = alloc_start + stack_rnd; | ||
| 231 | m_stack_region_end = m_stack_region_start + stack_region_size; | ||
| 232 | |||
| 233 | if (alias_rnd < stack_rnd) { | ||
| 234 | m_stack_region_start += alias_region_size; | ||
| 235 | m_stack_region_end += alias_region_size; | ||
| 236 | } else { | ||
| 237 | m_alias_region_start += stack_region_size; | ||
| 238 | m_alias_region_end += stack_region_size; | ||
| 239 | } | ||
| 240 | |||
| 241 | if (heap_rnd < stack_rnd) { | ||
| 242 | m_stack_region_start += heap_region_size; | ||
| 243 | m_stack_region_end += heap_region_size; | ||
| 244 | } else { | ||
| 245 | m_heap_region_start += stack_region_size; | ||
| 246 | m_heap_region_end += stack_region_size; | ||
| 247 | } | ||
| 248 | } | ||
| 249 | |||
| 250 | // Setup kernel map region | ||
| 251 | if (kernel_map_region_size) { | ||
| 252 | m_kernel_map_region_start = alloc_start + kmap_rnd; | ||
| 253 | m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; | ||
| 254 | |||
| 255 | if (alias_rnd < kmap_rnd) { | ||
| 256 | m_kernel_map_region_start += alias_region_size; | ||
| 257 | m_kernel_map_region_end += alias_region_size; | ||
| 258 | } else { | ||
| 259 | m_alias_region_start += kernel_map_region_size; | ||
| 260 | m_alias_region_end += kernel_map_region_size; | ||
| 261 | } | ||
| 262 | |||
| 263 | if (heap_rnd < kmap_rnd) { | ||
| 264 | m_kernel_map_region_start += heap_region_size; | ||
| 265 | m_kernel_map_region_end += heap_region_size; | ||
| 266 | } else { | ||
| 267 | m_heap_region_start += kernel_map_region_size; | ||
| 268 | m_heap_region_end += kernel_map_region_size; | ||
| 269 | } | ||
| 270 | |||
| 271 | if (stack_region_size) { | ||
| 272 | if (stack_rnd < kmap_rnd) { | ||
| 273 | m_kernel_map_region_start += stack_region_size; | ||
| 274 | m_kernel_map_region_end += stack_region_size; | ||
| 275 | } else { | ||
| 276 | m_stack_region_start += kernel_map_region_size; | ||
| 277 | m_stack_region_end += kernel_map_region_size; | ||
| 278 | } | ||
| 279 | } | ||
| 280 | } | ||
| 281 | |||
| 282 | // Set heap and fill members. | ||
| 283 | m_current_heap_end = m_heap_region_start; | ||
| 284 | m_max_heap_size = 0; | ||
| 285 | m_mapped_physical_memory_size = 0; | ||
| 286 | m_mapped_unsafe_physical_memory = 0; | ||
| 287 | m_mapped_insecure_memory = 0; | ||
| 288 | m_mapped_ipc_server_memory = 0; | ||
| 289 | |||
| 290 | m_heap_fill_value = 0; | ||
| 291 | m_ipc_fill_value = 0; | ||
| 292 | m_stack_fill_value = 0; | ||
| 293 | |||
| 294 | // Set allocation option. | ||
| 295 | m_allocate_option = | ||
| 296 | KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack | ||
| 297 | : KMemoryManager::Direction::FromFront); | ||
| 298 | |||
| 299 | // Ensure that we regions inside our address space | ||
| 300 | auto IsInAddressSpace = [&](KProcessAddress addr) { | ||
| 301 | return m_address_space_start <= addr && addr <= m_address_space_end; | ||
| 302 | }; | ||
| 303 | ASSERT(IsInAddressSpace(m_alias_region_start)); | ||
| 304 | ASSERT(IsInAddressSpace(m_alias_region_end)); | ||
| 305 | ASSERT(IsInAddressSpace(m_heap_region_start)); | ||
| 306 | ASSERT(IsInAddressSpace(m_heap_region_end)); | ||
| 307 | ASSERT(IsInAddressSpace(m_stack_region_start)); | ||
| 308 | ASSERT(IsInAddressSpace(m_stack_region_end)); | ||
| 309 | ASSERT(IsInAddressSpace(m_kernel_map_region_start)); | ||
| 310 | ASSERT(IsInAddressSpace(m_kernel_map_region_end)); | ||
| 311 | |||
| 312 | // Ensure that we selected regions that don't overlap | ||
| 313 | const KProcessAddress alias_start{m_alias_region_start}; | ||
| 314 | const KProcessAddress alias_last{m_alias_region_end - 1}; | ||
| 315 | const KProcessAddress heap_start{m_heap_region_start}; | ||
| 316 | const KProcessAddress heap_last{m_heap_region_end - 1}; | ||
| 317 | const KProcessAddress stack_start{m_stack_region_start}; | ||
| 318 | const KProcessAddress stack_last{m_stack_region_end - 1}; | ||
| 319 | const KProcessAddress kmap_start{m_kernel_map_region_start}; | ||
| 320 | const KProcessAddress kmap_last{m_kernel_map_region_end - 1}; | ||
| 321 | ASSERT(alias_last < heap_start || heap_last < alias_start); | ||
| 322 | ASSERT(alias_last < stack_start || stack_last < alias_start); | ||
| 323 | ASSERT(alias_last < kmap_start || kmap_last < alias_start); | ||
| 324 | ASSERT(heap_last < stack_start || stack_last < heap_start); | ||
| 325 | ASSERT(heap_last < kmap_start || kmap_last < heap_start); | ||
| 326 | |||
| 327 | m_current_heap_end = m_heap_region_start; | ||
| 328 | m_max_heap_size = 0; | ||
| 329 | m_mapped_physical_memory_size = 0; | ||
| 330 | m_memory_pool = pool; | ||
| 331 | |||
| 332 | m_page_table_impl = std::make_unique<Common::PageTable>(); | ||
| 333 | m_page_table_impl->Resize(m_address_space_width, PageBits); | ||
| 334 | |||
| 335 | // Initialize our memory block manager. | ||
| 336 | R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, | ||
| 337 | m_memory_block_slab_manager)); | ||
| 338 | } | ||
| 339 | |||
| 340 | void KPageTable::Finalize() { | ||
| 341 | auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) { | ||
| 342 | if (Settings::IsFastmemEnabled()) { | ||
| 343 | m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size); | ||
| 344 | } | ||
| 345 | }; | ||
| 346 | |||
| 347 | // Finalize memory blocks. | ||
| 348 | m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback)); | ||
| 349 | |||
| 350 | // Release any insecure mapped memory. | ||
| 351 | if (m_mapped_insecure_memory) { | ||
| 352 | UNIMPLEMENTED(); | ||
| 353 | } | ||
| 354 | |||
| 355 | // Release any ipc server memory. | ||
| 356 | if (m_mapped_ipc_server_memory) { | ||
| 357 | UNIMPLEMENTED(); | ||
| 358 | } | ||
| 359 | |||
| 360 | // Close the backing page table, as the destructor is not called for guest objects. | ||
| 361 | m_page_table_impl.reset(); | ||
| 362 | } | ||
| 363 | |||
| 364 | Result KPageTable::MapProcessCode(KProcessAddress addr, size_t num_pages, KMemoryState state, | ||
| 365 | KMemoryPermission perm) { | ||
| 366 | const u64 size{num_pages * PageSize}; | ||
| 367 | |||
| 368 | // Validate the mapping request. | ||
| 369 | R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); | ||
| 370 | |||
| 371 | // Lock the table. | ||
| 372 | KScopedLightLock lk(m_general_lock); | ||
| 373 | |||
| 374 | // Verify that the destination memory is unmapped. | ||
| 375 | R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, | ||
| 376 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 377 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 378 | |||
| 379 | // Create an update allocator. | ||
| 380 | Result allocator_result{ResultSuccess}; | ||
| 381 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 382 | m_memory_block_slab_manager); | ||
| 383 | |||
| 384 | // Allocate and open. | ||
| 385 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 386 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | ||
| 387 | &pg, num_pages, | ||
| 388 | KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); | ||
| 389 | |||
| 390 | R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); | ||
| 391 | |||
| 392 | // Update the blocks. | ||
| 393 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 394 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 395 | KMemoryBlockDisableMergeAttribute::None); | ||
| 396 | |||
| 397 | R_SUCCEED(); | ||
| 398 | } | ||
| 399 | |||
| 400 | Result KPageTable::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 401 | size_t size) { | ||
| 402 | // Validate the mapping request. | ||
| 403 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), | ||
| 404 | ResultInvalidMemoryRegion); | ||
| 405 | |||
| 406 | // Lock the table. | ||
| 407 | KScopedLightLock lk(m_general_lock); | ||
| 408 | |||
| 409 | // Verify that the source memory is normal heap. | ||
| 410 | KMemoryState src_state{}; | ||
| 411 | KMemoryPermission src_perm{}; | ||
| 412 | size_t num_src_allocator_blocks{}; | ||
| 413 | R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, | ||
| 414 | src_address, size, KMemoryState::All, KMemoryState::Normal, | ||
| 415 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 416 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 417 | |||
| 418 | // Verify that the destination memory is unmapped. | ||
| 419 | size_t num_dst_allocator_blocks{}; | ||
| 420 | R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, | ||
| 421 | KMemoryState::Free, KMemoryPermission::None, | ||
| 422 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 423 | KMemoryAttribute::None)); | ||
| 424 | |||
| 425 | // Create an update allocator for the source. | ||
| 426 | Result src_allocator_result{ResultSuccess}; | ||
| 427 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 428 | m_memory_block_slab_manager, | ||
| 429 | num_src_allocator_blocks); | ||
| 430 | R_TRY(src_allocator_result); | ||
| 431 | |||
| 432 | // Create an update allocator for the destination. | ||
| 433 | Result dst_allocator_result{ResultSuccess}; | ||
| 434 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 435 | m_memory_block_slab_manager, | ||
| 436 | num_dst_allocator_blocks); | ||
| 437 | R_TRY(dst_allocator_result); | ||
| 438 | |||
| 439 | // Map the code memory. | ||
| 440 | { | ||
| 441 | // Determine the number of pages being operated on. | ||
| 442 | const size_t num_pages = size / PageSize; | ||
| 443 | |||
| 444 | // Create page groups for the memory being mapped. | ||
| 445 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 446 | AddRegionToPages(src_address, num_pages, pg); | ||
| 447 | |||
| 448 | // We're going to perform an update, so create a helper. | ||
| 449 | KScopedPageTableUpdater updater(this); | ||
| 450 | |||
| 451 | // Reprotect the source as kernel-read/not mapped. | ||
| 452 | const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead | | ||
| 453 | KMemoryPermission::NotMapped); | ||
| 454 | R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions)); | ||
| 455 | |||
| 456 | // Ensure that we unprotect the source pages on failure. | ||
| 457 | auto unprot_guard = SCOPE_GUARD({ | ||
| 458 | ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions) | ||
| 459 | .IsSuccess()); | ||
| 460 | }); | ||
| 461 | |||
| 462 | // Map the alias pages. | ||
| 463 | const KPageProperties dst_properties = {new_perm, false, false, | ||
| 464 | DisableMergeAttribute::DisableHead}; | ||
| 465 | R_TRY( | ||
| 466 | this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false)); | ||
| 467 | |||
| 468 | // We successfully mapped the alias pages, so we don't need to unprotect the src pages on | ||
| 469 | // failure. | ||
| 470 | unprot_guard.Cancel(); | ||
| 471 | |||
| 472 | // Apply the memory block updates. | ||
| 473 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, | ||
| 474 | src_state, new_perm, KMemoryAttribute::Locked, | ||
| 475 | KMemoryBlockDisableMergeAttribute::Locked, | ||
| 476 | KMemoryBlockDisableMergeAttribute::None); | ||
| 477 | m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | ||
| 478 | KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, | ||
| 479 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 480 | KMemoryBlockDisableMergeAttribute::None); | ||
| 481 | } | ||
| 482 | |||
| 483 | R_SUCCEED(); | ||
| 484 | } | ||
| 485 | |||
| 486 | Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 487 | size_t size, | ||
| 488 | ICacheInvalidationStrategy icache_invalidation_strategy) { | ||
| 489 | // Validate the mapping request. | ||
| 490 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), | ||
| 491 | ResultInvalidMemoryRegion); | ||
| 492 | |||
| 493 | // Lock the table. | ||
| 494 | KScopedLightLock lk(m_general_lock); | ||
| 495 | |||
| 496 | // Verify that the source memory is locked normal heap. | ||
| 497 | size_t num_src_allocator_blocks{}; | ||
| 498 | R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, | ||
| 499 | KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, | ||
| 500 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 501 | KMemoryAttribute::Locked)); | ||
| 502 | |||
| 503 | // Verify that the destination memory is aliasable code. | ||
| 504 | size_t num_dst_allocator_blocks{}; | ||
| 505 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 506 | std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, | ||
| 507 | KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, | ||
| 508 | KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None)); | ||
| 509 | |||
| 510 | // Determine whether any pages being unmapped are code. | ||
| 511 | bool any_code_pages = false; | ||
| 512 | { | ||
| 513 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); | ||
| 514 | while (true) { | ||
| 515 | // Get the memory info. | ||
| 516 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 517 | |||
| 518 | // Check if the memory has code flag. | ||
| 519 | if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) { | ||
| 520 | any_code_pages = true; | ||
| 521 | break; | ||
| 522 | } | ||
| 523 | |||
| 524 | // Check if we're done. | ||
| 525 | if (dst_address + size - 1 <= info.GetLastAddress()) { | ||
| 526 | break; | ||
| 527 | } | ||
| 528 | |||
| 529 | // Advance. | ||
| 530 | ++it; | ||
| 531 | } | ||
| 532 | } | ||
| 533 | |||
| 534 | // Ensure that we maintain the instruction cache. | ||
| 535 | bool reprotected_pages = false; | ||
| 536 | SCOPE_EXIT({ | ||
| 537 | if (reprotected_pages && any_code_pages) { | ||
| 538 | if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { | ||
| 539 | m_system.InvalidateCpuInstructionCacheRange(GetInteger(dst_address), size); | ||
| 540 | } else { | ||
| 541 | m_system.InvalidateCpuInstructionCaches(); | ||
| 542 | } | ||
| 543 | } | ||
| 544 | }); | ||
| 545 | |||
| 546 | // Unmap. | ||
| 547 | { | ||
| 548 | // Determine the number of pages being operated on. | ||
| 549 | const size_t num_pages = size / PageSize; | ||
| 550 | |||
| 551 | // Create an update allocator for the source. | ||
| 552 | Result src_allocator_result{ResultSuccess}; | ||
| 553 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 554 | m_memory_block_slab_manager, | ||
| 555 | num_src_allocator_blocks); | ||
| 556 | R_TRY(src_allocator_result); | ||
| 557 | |||
| 558 | // Create an update allocator for the destination. | ||
| 559 | Result dst_allocator_result{ResultSuccess}; | ||
| 560 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 561 | m_memory_block_slab_manager, | ||
| 562 | num_dst_allocator_blocks); | ||
| 563 | R_TRY(dst_allocator_result); | ||
| 564 | |||
| 565 | // Unmap the aliased copy of the pages. | ||
| 566 | R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||
| 567 | |||
| 568 | // Try to set the permissions for the source pages back to what they should be. | ||
| 569 | R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, | ||
| 570 | OperationType::ChangePermissions)); | ||
| 571 | |||
| 572 | // Apply the memory block updates. | ||
| 573 | m_memory_block_manager.Update( | ||
| 574 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, | ||
| 575 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 576 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); | ||
| 577 | m_memory_block_manager.Update( | ||
| 578 | std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal, | ||
| 579 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 580 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); | ||
| 581 | |||
| 582 | // Note that we reprotected pages. | ||
| 583 | reprotected_pages = true; | ||
| 584 | } | ||
| 585 | |||
| 586 | R_SUCCEED(); | ||
| 587 | } | ||
| 588 | |||
| 589 | KProcessAddress KPageTable::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, | ||
| 590 | size_t num_pages, size_t alignment, size_t offset, | ||
| 591 | size_t guard_pages) { | ||
| 592 | KProcessAddress address = 0; | ||
| 593 | |||
| 594 | if (num_pages <= region_num_pages) { | ||
| 595 | if (this->IsAslrEnabled()) { | ||
| 596 | UNIMPLEMENTED(); | ||
| 597 | } | ||
| 598 | // Find the first free area. | ||
| 599 | if (address == 0) { | ||
| 600 | address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, | ||
| 601 | alignment, offset, guard_pages); | ||
| 602 | } | ||
| 603 | } | ||
| 604 | |||
| 605 | return address; | ||
| 606 | } | ||
| 607 | |||
| 608 | Result KPageTable::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) { | ||
| 609 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 610 | |||
| 611 | const size_t size = num_pages * PageSize; | ||
| 612 | |||
| 613 | // We're making a new group, not adding to an existing one. | ||
| 614 | R_UNLESS(pg.empty(), ResultInvalidCurrentMemory); | ||
| 615 | |||
| 616 | // Begin traversal. | ||
| 617 | Common::PageTable::TraversalContext context; | ||
| 618 | Common::PageTable::TraversalEntry next_entry; | ||
| 619 | R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr)), | ||
| 620 | ResultInvalidCurrentMemory); | ||
| 621 | |||
| 622 | // Prepare tracking variables. | ||
| 623 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 624 | size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1)); | ||
| 625 | size_t tot_size = cur_size; | ||
| 626 | |||
| 627 | // Iterate, adding to group as we go. | ||
| 628 | const auto& memory_layout = m_system.Kernel().MemoryLayout(); | ||
| 629 | while (tot_size < size) { | ||
| 630 | R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context), | ||
| 631 | ResultInvalidCurrentMemory); | ||
| 632 | |||
| 633 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 634 | const size_t cur_pages = cur_size / PageSize; | ||
| 635 | |||
| 636 | R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory); | ||
| 637 | R_TRY(pg.AddBlock(cur_addr, cur_pages)); | ||
| 638 | |||
| 639 | cur_addr = next_entry.phys_addr; | ||
| 640 | cur_size = next_entry.block_size; | ||
| 641 | } else { | ||
| 642 | cur_size += next_entry.block_size; | ||
| 643 | } | ||
| 644 | |||
| 645 | tot_size += next_entry.block_size; | ||
| 646 | } | ||
| 647 | |||
| 648 | // Ensure we add the right amount for the last block. | ||
| 649 | if (tot_size > size) { | ||
| 650 | cur_size -= (tot_size - size); | ||
| 651 | } | ||
| 652 | |||
| 653 | // Add the last block. | ||
| 654 | const size_t cur_pages = cur_size / PageSize; | ||
| 655 | R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory); | ||
| 656 | R_TRY(pg.AddBlock(cur_addr, cur_pages)); | ||
| 657 | |||
| 658 | R_SUCCEED(); | ||
| 659 | } | ||
| 660 | |||
| 661 | bool KPageTable::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages) { | ||
| 662 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 663 | |||
| 664 | const size_t size = num_pages * PageSize; | ||
| 665 | const auto& memory_layout = m_system.Kernel().MemoryLayout(); | ||
| 666 | |||
| 667 | // Empty groups are necessarily invalid. | ||
| 668 | if (pg.empty()) { | ||
| 669 | return false; | ||
| 670 | } | ||
| 671 | |||
| 672 | // We're going to validate that the group we'd expect is the group we see. | ||
| 673 | auto cur_it = pg.begin(); | ||
| 674 | KPhysicalAddress cur_block_address = cur_it->GetAddress(); | ||
| 675 | size_t cur_block_pages = cur_it->GetNumPages(); | ||
| 676 | |||
| 677 | auto UpdateCurrentIterator = [&]() { | ||
| 678 | if (cur_block_pages == 0) { | ||
| 679 | if ((++cur_it) == pg.end()) { | ||
| 680 | return false; | ||
| 681 | } | ||
| 682 | |||
| 683 | cur_block_address = cur_it->GetAddress(); | ||
| 684 | cur_block_pages = cur_it->GetNumPages(); | ||
| 685 | } | ||
| 686 | return true; | ||
| 687 | }; | ||
| 688 | |||
| 689 | // Begin traversal. | ||
| 690 | Common::PageTable::TraversalContext context; | ||
| 691 | Common::PageTable::TraversalEntry next_entry; | ||
| 692 | if (!m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr))) { | ||
| 693 | return false; | ||
| 694 | } | ||
| 695 | |||
| 696 | // Prepare tracking variables. | ||
| 697 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 698 | size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1)); | ||
| 699 | size_t tot_size = cur_size; | ||
| 700 | |||
| 701 | // Iterate, comparing expected to actual. | ||
| 702 | while (tot_size < size) { | ||
| 703 | if (!m_page_table_impl->ContinueTraversal(next_entry, context)) { | ||
| 704 | return false; | ||
| 705 | } | ||
| 706 | |||
| 707 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 708 | const size_t cur_pages = cur_size / PageSize; | ||
| 709 | |||
| 710 | if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) { | ||
| 711 | return false; | ||
| 712 | } | ||
| 713 | |||
| 714 | if (!UpdateCurrentIterator()) { | ||
| 715 | return false; | ||
| 716 | } | ||
| 717 | |||
| 718 | if (cur_block_address != cur_addr || cur_block_pages < cur_pages) { | ||
| 719 | return false; | ||
| 720 | } | ||
| 721 | |||
| 722 | cur_block_address += cur_size; | ||
| 723 | cur_block_pages -= cur_pages; | ||
| 724 | cur_addr = next_entry.phys_addr; | ||
| 725 | cur_size = next_entry.block_size; | ||
| 726 | } else { | ||
| 727 | cur_size += next_entry.block_size; | ||
| 728 | } | ||
| 729 | |||
| 730 | tot_size += next_entry.block_size; | ||
| 731 | } | ||
| 732 | |||
| 733 | // Ensure we compare the right amount for the last block. | ||
| 734 | if (tot_size > size) { | ||
| 735 | cur_size -= (tot_size - size); | ||
| 736 | } | ||
| 737 | |||
| 738 | if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) { | ||
| 739 | return false; | ||
| 740 | } | ||
| 741 | |||
| 742 | if (!UpdateCurrentIterator()) { | ||
| 743 | return false; | ||
| 744 | } | ||
| 745 | |||
| 746 | return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); | ||
| 747 | } | ||
| 748 | |||
| 749 | Result KPageTable::UnmapProcessMemory(KProcessAddress dst_addr, size_t size, | ||
| 750 | KPageTable& src_page_table, KProcessAddress src_addr) { | ||
| 751 | // Acquire the table locks. | ||
| 752 | KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock); | ||
| 753 | |||
| 754 | const size_t num_pages{size / PageSize}; | ||
| 755 | |||
| 756 | // Check that the memory is mapped in the destination process. | ||
| 757 | size_t num_allocator_blocks; | ||
| 758 | R_TRY(CheckMemoryState(&num_allocator_blocks, dst_addr, size, KMemoryState::All, | ||
| 759 | KMemoryState::SharedCode, KMemoryPermission::UserReadWrite, | ||
| 760 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | ||
| 761 | KMemoryAttribute::None)); | ||
| 762 | |||
| 763 | // Check that the memory is mapped in the source process. | ||
| 764 | R_TRY(src_page_table.CheckMemoryState(src_addr, size, KMemoryState::FlagCanMapProcess, | ||
| 765 | KMemoryState::FlagCanMapProcess, KMemoryPermission::None, | ||
| 766 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 767 | KMemoryAttribute::None)); | ||
| 768 | |||
| 769 | // Create an update allocator. | ||
| 770 | Result allocator_result{ResultSuccess}; | ||
| 771 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 772 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 773 | R_TRY(allocator_result); | ||
| 774 | |||
| 775 | R_TRY(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||
| 776 | |||
| 777 | // Apply the memory block update. | ||
| 778 | m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, | ||
| 779 | KMemoryState::Free, KMemoryPermission::None, | ||
| 780 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 781 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 782 | |||
| 783 | m_system.InvalidateCpuInstructionCaches(); | ||
| 784 | |||
| 785 | R_SUCCEED(); | ||
| 786 | } | ||
| 787 | |||
| 788 | Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, | ||
| 789 | KProcessAddress address, size_t size, | ||
| 790 | KMemoryPermission test_perm, KMemoryState dst_state) { | ||
| 791 | // Validate pre-conditions. | ||
| 792 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 793 | ASSERT(test_perm == KMemoryPermission::UserReadWrite || | ||
| 794 | test_perm == KMemoryPermission::UserRead); | ||
| 795 | |||
| 796 | // Check that the address is in range. | ||
| 797 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 798 | |||
| 799 | // Get the source permission. | ||
| 800 | const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite) | ||
| 801 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 802 | : KMemoryPermission::UserRead; | ||
| 803 | |||
| 804 | // Get aligned extents. | ||
| 805 | const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize); | ||
| 806 | const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize); | ||
| 807 | const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize); | ||
| 808 | const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize); | ||
| 809 | |||
| 810 | const auto aligned_src_last = (aligned_src_end)-1; | ||
| 811 | const auto mapping_src_last = (mapping_src_end)-1; | ||
| 812 | |||
| 813 | // Get the test state and attribute mask. | ||
| 814 | KMemoryState test_state; | ||
| 815 | KMemoryAttribute test_attr_mask; | ||
| 816 | switch (dst_state) { | ||
| 817 | case KMemoryState::Ipc: | ||
| 818 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 819 | test_attr_mask = | ||
| 820 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 821 | break; | ||
| 822 | case KMemoryState::NonSecureIpc: | ||
| 823 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 824 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 825 | break; | ||
| 826 | case KMemoryState::NonDeviceIpc: | ||
| 827 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 828 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 829 | break; | ||
| 830 | default: | ||
| 831 | R_THROW(ResultInvalidCombination); | ||
| 832 | } | ||
| 833 | |||
| 834 | // Ensure that on failure, we roll back appropriately. | ||
| 835 | size_t mapped_size = 0; | ||
| 836 | ON_RESULT_FAILURE { | ||
| 837 | if (mapped_size > 0) { | ||
| 838 | this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, | ||
| 839 | src_perm); | ||
| 840 | } | ||
| 841 | }; | ||
| 842 | |||
| 843 | size_t blocks_needed = 0; | ||
| 844 | |||
| 845 | // Iterate, mapping as needed. | ||
| 846 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start); | ||
| 847 | while (true) { | ||
| 848 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 849 | |||
| 850 | // Validate the current block. | ||
| 851 | R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, | ||
| 852 | test_attr_mask, KMemoryAttribute::None)); | ||
| 853 | |||
| 854 | if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() && | ||
| 855 | info.GetAddress() < GetInteger(mapping_src_end)) { | ||
| 856 | const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start) | ||
| 857 | ? info.GetAddress() | ||
| 858 | : (mapping_src_start); | ||
| 859 | const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress() | ||
| 860 | : (mapping_src_end); | ||
| 861 | const size_t cur_size = cur_end - cur_start; | ||
| 862 | |||
| 863 | if (info.GetAddress() < GetInteger(mapping_src_start)) { | ||
| 864 | ++blocks_needed; | ||
| 865 | } | ||
| 866 | if (mapping_src_last < info.GetLastAddress()) { | ||
| 867 | ++blocks_needed; | ||
| 868 | } | ||
| 869 | |||
| 870 | // Set the permissions on the block, if we need to. | ||
| 871 | if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) { | ||
| 872 | R_TRY(Operate(cur_start, cur_size / PageSize, src_perm, | ||
| 873 | OperationType::ChangePermissions)); | ||
| 874 | } | ||
| 875 | |||
| 876 | // Note that we mapped this part. | ||
| 877 | mapped_size += cur_size; | ||
| 878 | } | ||
| 879 | |||
| 880 | // If the block is at the end, we're done. | ||
| 881 | if (aligned_src_last <= info.GetLastAddress()) { | ||
| 882 | break; | ||
| 883 | } | ||
| 884 | |||
| 885 | // Advance. | ||
| 886 | ++it; | ||
| 887 | ASSERT(it != m_memory_block_manager.end()); | ||
| 888 | } | ||
| 889 | |||
| 890 | if (out_blocks_needed != nullptr) { | ||
| 891 | ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 892 | *out_blocks_needed = blocks_needed; | ||
| 893 | } | ||
| 894 | |||
| 895 | R_SUCCEED(); | ||
| 896 | } | ||
| 897 | |||
| 898 | Result KPageTable::SetupForIpcServer(KProcessAddress* out_addr, size_t size, | ||
| 899 | KProcessAddress src_addr, KMemoryPermission test_perm, | ||
| 900 | KMemoryState dst_state, KPageTable& src_page_table, | ||
| 901 | bool send) { | ||
| 902 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 903 | ASSERT(src_page_table.IsLockedByCurrentThread()); | ||
| 904 | |||
| 905 | // Check that we can theoretically map. | ||
| 906 | const KProcessAddress region_start = m_alias_region_start; | ||
| 907 | const size_t region_size = m_alias_region_end - m_alias_region_start; | ||
| 908 | R_UNLESS(size < region_size, ResultOutOfAddressSpace); | ||
| 909 | |||
| 910 | // Get aligned source extents. | ||
| 911 | const KProcessAddress src_start = src_addr; | ||
| 912 | const KProcessAddress src_end = src_addr + size; | ||
| 913 | const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize); | ||
| 914 | const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize); | ||
| 915 | const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize); | ||
| 916 | const KProcessAddress mapping_src_end = | ||
| 917 | Common::AlignDown(GetInteger(src_start) + size, PageSize); | ||
| 918 | const size_t aligned_src_size = aligned_src_end - aligned_src_start; | ||
| 919 | const size_t mapping_src_size = | ||
| 920 | (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0; | ||
| 921 | |||
| 922 | // Select a random address to map at. | ||
| 923 | KProcessAddress dst_addr = | ||
| 924 | this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, | ||
| 925 | PageSize, 0, this->GetNumGuardPages()); | ||
| 926 | |||
| 927 | R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace); | ||
| 928 | |||
| 929 | // Check that we can perform the operation we're about to perform. | ||
| 930 | ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state)); | ||
| 931 | |||
| 932 | // Create an update allocator. | ||
| 933 | Result allocator_result; | ||
| 934 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 935 | m_memory_block_slab_manager); | ||
| 936 | R_TRY(allocator_result); | ||
| 937 | |||
| 938 | // We're going to perform an update, so create a helper. | ||
| 939 | KScopedPageTableUpdater updater(this); | ||
| 940 | |||
| 941 | // Reserve space for any partial pages we allocate. | ||
| 942 | const size_t unmapped_size = aligned_src_size - mapping_src_size; | ||
| 943 | KScopedResourceReservation memory_reservation( | ||
| 944 | m_resource_limit, LimitableResource::PhysicalMemoryMax, unmapped_size); | ||
| 945 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 946 | |||
| 947 | // Ensure that we manage page references correctly. | ||
| 948 | KPhysicalAddress start_partial_page = 0; | ||
| 949 | KPhysicalAddress end_partial_page = 0; | ||
| 950 | KProcessAddress cur_mapped_addr = dst_addr; | ||
| 951 | |||
| 952 | // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll | ||
| 953 | // free on scope exit. | ||
| 954 | SCOPE_EXIT({ | ||
| 955 | if (start_partial_page != 0) { | ||
| 956 | m_system.Kernel().MemoryManager().Close(start_partial_page, 1); | ||
| 957 | } | ||
| 958 | if (end_partial_page != 0) { | ||
| 959 | m_system.Kernel().MemoryManager().Close(end_partial_page, 1); | ||
| 960 | } | ||
| 961 | }); | ||
| 962 | |||
| 963 | ON_RESULT_FAILURE { | ||
| 964 | if (cur_mapped_addr != dst_addr) { | ||
| 965 | ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, | ||
| 966 | KMemoryPermission::None, OperationType::Unmap) | ||
| 967 | .IsSuccess()); | ||
| 968 | } | ||
| 969 | }; | ||
| 970 | |||
| 971 | // Allocate the start page as needed. | ||
| 972 | if (aligned_src_start < mapping_src_start) { | ||
| 973 | start_partial_page = | ||
| 974 | m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 975 | R_UNLESS(start_partial_page != 0, ResultOutOfMemory); | ||
| 976 | } | ||
| 977 | |||
| 978 | // Allocate the end page as needed. | ||
| 979 | if (mapping_src_end < aligned_src_end && | ||
| 980 | (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { | ||
| 981 | end_partial_page = | ||
| 982 | m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 983 | R_UNLESS(end_partial_page != 0, ResultOutOfMemory); | ||
| 984 | } | ||
| 985 | |||
| 986 | // Get the implementation. | ||
| 987 | auto& src_impl = src_page_table.PageTableImpl(); | ||
| 988 | |||
| 989 | // Get the fill value for partial pages. | ||
| 990 | const auto fill_val = m_ipc_fill_value; | ||
| 991 | |||
| 992 | // Begin traversal. | ||
| 993 | Common::PageTable::TraversalContext context; | ||
| 994 | Common::PageTable::TraversalEntry next_entry; | ||
| 995 | bool traverse_valid = | ||
| 996 | src_impl.BeginTraversal(next_entry, context, GetInteger(aligned_src_start)); | ||
| 997 | ASSERT(traverse_valid); | ||
| 998 | |||
| 999 | // Prepare tracking variables. | ||
| 1000 | KPhysicalAddress cur_block_addr = next_entry.phys_addr; | ||
| 1001 | size_t cur_block_size = | ||
| 1002 | next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1)); | ||
| 1003 | size_t tot_block_size = cur_block_size; | ||
| 1004 | |||
| 1005 | // Map the start page, if we have one. | ||
| 1006 | if (start_partial_page != 0) { | ||
| 1007 | // Ensure the page holds correct data. | ||
| 1008 | const KVirtualAddress start_partial_virt = | ||
| 1009 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page); | ||
| 1010 | if (send) { | ||
| 1011 | const size_t partial_offset = src_start - aligned_src_start; | ||
| 1012 | size_t copy_size, clear_size; | ||
| 1013 | if (src_end < mapping_src_start) { | ||
| 1014 | copy_size = size; | ||
| 1015 | clear_size = mapping_src_start - src_end; | ||
| 1016 | } else { | ||
| 1017 | copy_size = mapping_src_start - src_start; | ||
| 1018 | clear_size = 0; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val, | ||
| 1022 | partial_offset); | ||
| 1023 | std::memcpy( | ||
| 1024 | m_memory->GetPointer<void>(GetInteger(start_partial_virt) + partial_offset), | ||
| 1025 | m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress( | ||
| 1026 | m_system.Kernel().MemoryLayout(), cur_block_addr)) + | ||
| 1027 | partial_offset), | ||
| 1028 | copy_size); | ||
| 1029 | if (clear_size > 0) { | ||
| 1030 | std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt) + | ||
| 1031 | partial_offset + copy_size), | ||
| 1032 | fill_val, clear_size); | ||
| 1033 | } | ||
| 1034 | } else { | ||
| 1035 | std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val, | ||
| 1036 | PageSize); | ||
| 1037 | } | ||
| 1038 | |||
| 1039 | // Map the page. | ||
| 1040 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); | ||
| 1041 | |||
| 1042 | // Update tracking extents. | ||
| 1043 | cur_mapped_addr += PageSize; | ||
| 1044 | cur_block_addr += PageSize; | ||
| 1045 | cur_block_size -= PageSize; | ||
| 1046 | |||
| 1047 | // If the block's size was one page, we may need to continue traversal. | ||
| 1048 | if (cur_block_size == 0 && aligned_src_size > PageSize) { | ||
| 1049 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1050 | ASSERT(traverse_valid); | ||
| 1051 | |||
| 1052 | cur_block_addr = next_entry.phys_addr; | ||
| 1053 | cur_block_size = next_entry.block_size; | ||
| 1054 | tot_block_size += next_entry.block_size; | ||
| 1055 | } | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | // Map the remaining pages. | ||
| 1059 | while (aligned_src_start + tot_block_size < mapping_src_end) { | ||
| 1060 | // Continue the traversal. | ||
| 1061 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1062 | ASSERT(traverse_valid); | ||
| 1063 | |||
| 1064 | // Process the block. | ||
| 1065 | if (next_entry.phys_addr != cur_block_addr + cur_block_size) { | ||
| 1066 | // Map the block we've been processing so far. | ||
| 1067 | R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, | ||
| 1068 | cur_block_addr)); | ||
| 1069 | |||
| 1070 | // Update tracking extents. | ||
| 1071 | cur_mapped_addr += cur_block_size; | ||
| 1072 | cur_block_addr = next_entry.phys_addr; | ||
| 1073 | cur_block_size = next_entry.block_size; | ||
| 1074 | } else { | ||
| 1075 | cur_block_size += next_entry.block_size; | ||
| 1076 | } | ||
| 1077 | tot_block_size += next_entry.block_size; | ||
| 1078 | } | ||
| 1079 | |||
| 1080 | // Handle the last direct-mapped page. | ||
| 1081 | if (const KProcessAddress mapped_block_end = | ||
| 1082 | aligned_src_start + tot_block_size - cur_block_size; | ||
| 1083 | mapped_block_end < mapping_src_end) { | ||
| 1084 | const size_t last_block_size = mapping_src_end - mapped_block_end; | ||
| 1085 | |||
| 1086 | // Map the last block. | ||
| 1087 | R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, | ||
| 1088 | cur_block_addr)); | ||
| 1089 | |||
| 1090 | // Update tracking extents. | ||
| 1091 | cur_mapped_addr += last_block_size; | ||
| 1092 | cur_block_addr += last_block_size; | ||
| 1093 | if (mapped_block_end + cur_block_size < aligned_src_end && | ||
| 1094 | cur_block_size == last_block_size) { | ||
| 1095 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1096 | ASSERT(traverse_valid); | ||
| 1097 | |||
| 1098 | cur_block_addr = next_entry.phys_addr; | ||
| 1099 | } | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | // Map the end page, if we have one. | ||
| 1103 | if (end_partial_page != 0) { | ||
| 1104 | // Ensure the page holds correct data. | ||
| 1105 | const KVirtualAddress end_partial_virt = | ||
| 1106 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page); | ||
| 1107 | if (send) { | ||
| 1108 | const size_t copy_size = src_end - mapping_src_end; | ||
| 1109 | std::memcpy(m_memory->GetPointer<void>(GetInteger(end_partial_virt)), | ||
| 1110 | m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress( | ||
| 1111 | m_system.Kernel().MemoryLayout(), cur_block_addr))), | ||
| 1112 | copy_size); | ||
| 1113 | std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt) + copy_size), | ||
| 1114 | fill_val, PageSize - copy_size); | ||
| 1115 | } else { | ||
| 1116 | std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt)), fill_val, | ||
| 1117 | PageSize); | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | // Map the page. | ||
| 1121 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | // Update memory blocks to reflect our changes | ||
| 1125 | m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, | ||
| 1126 | dst_state, test_perm, KMemoryAttribute::None, | ||
| 1127 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1128 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1129 | |||
| 1130 | // Set the output address. | ||
| 1131 | *out_addr = dst_addr + (src_start - aligned_src_start); | ||
| 1132 | |||
| 1133 | // We succeeded. | ||
| 1134 | memory_reservation.Commit(); | ||
| 1135 | R_SUCCEED(); | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | Result KPageTable::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr, | ||
| 1139 | KPageTable& src_page_table, KMemoryPermission test_perm, | ||
| 1140 | KMemoryState dst_state, bool send) { | ||
| 1141 | // For convenience, alias this. | ||
| 1142 | KPageTable& dst_page_table = *this; | ||
| 1143 | |||
| 1144 | // Acquire the table locks. | ||
| 1145 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 1146 | |||
| 1147 | // We're going to perform an update, so create a helper. | ||
| 1148 | KScopedPageTableUpdater updater(std::addressof(src_page_table)); | ||
| 1149 | |||
| 1150 | // Perform client setup. | ||
| 1151 | size_t num_allocator_blocks; | ||
| 1152 | R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), | ||
| 1153 | std::addressof(num_allocator_blocks), src_addr, size, | ||
| 1154 | test_perm, dst_state)); | ||
| 1155 | |||
| 1156 | // Create an update allocator. | ||
| 1157 | Result allocator_result; | ||
| 1158 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1159 | src_page_table.m_memory_block_slab_manager, | ||
| 1160 | num_allocator_blocks); | ||
| 1161 | R_TRY(allocator_result); | ||
| 1162 | |||
| 1163 | // Get the mapped extents. | ||
| 1164 | const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize); | ||
| 1165 | const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize); | ||
| 1166 | const size_t src_map_size = src_map_end - src_map_start; | ||
| 1167 | |||
| 1168 | // Ensure that we clean up appropriately if we fail after this. | ||
| 1169 | const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite) | ||
| 1170 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 1171 | : KMemoryPermission::UserRead; | ||
| 1172 | ON_RESULT_FAILURE { | ||
| 1173 | if (src_map_end > src_map_start) { | ||
| 1174 | src_page_table.CleanupForIpcClientOnServerSetupFailure( | ||
| 1175 | updater.GetPageList(), src_map_start, src_map_size, src_perm); | ||
| 1176 | } | ||
| 1177 | }; | ||
| 1178 | |||
| 1179 | // Perform server setup. | ||
| 1180 | R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state, | ||
| 1181 | src_page_table, send)); | ||
| 1182 | |||
| 1183 | // If anything was mapped, ipc-lock the pages. | ||
| 1184 | if (src_map_start < src_map_end) { | ||
| 1185 | // Get the source permission. | ||
| 1186 | src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, | ||
| 1187 | (src_map_end - src_map_start) / PageSize, | ||
| 1188 | &KMemoryBlock::LockForIpc, src_perm); | ||
| 1189 | } | ||
| 1190 | |||
| 1191 | R_SUCCEED(); | ||
| 1192 | } | ||
| 1193 | |||
| 1194 | Result KPageTable::CleanupForIpcServer(KProcessAddress address, size_t size, | ||
| 1195 | KMemoryState dst_state) { | ||
| 1196 | // Validate the address. | ||
| 1197 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1198 | |||
| 1199 | // Lock the table. | ||
| 1200 | KScopedLightLock lk(m_general_lock); | ||
| 1201 | |||
| 1202 | // Validate the memory state. | ||
| 1203 | size_t num_allocator_blocks; | ||
| 1204 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 1205 | KMemoryState::All, dst_state, KMemoryPermission::UserRead, | ||
| 1206 | KMemoryPermission::UserRead, KMemoryAttribute::All, | ||
| 1207 | KMemoryAttribute::None)); | ||
| 1208 | |||
| 1209 | // Create an update allocator. | ||
| 1210 | Result allocator_result; | ||
| 1211 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1212 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1213 | R_TRY(allocator_result); | ||
| 1214 | |||
| 1215 | // We're going to perform an update, so create a helper. | ||
| 1216 | KScopedPageTableUpdater updater(this); | ||
| 1217 | |||
| 1218 | // Get aligned extents. | ||
| 1219 | const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize); | ||
| 1220 | const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize); | ||
| 1221 | const size_t aligned_size = aligned_end - aligned_start; | ||
| 1222 | const size_t aligned_num_pages = aligned_size / PageSize; | ||
| 1223 | |||
| 1224 | // Unmap the pages. | ||
| 1225 | R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||
| 1226 | |||
| 1227 | // Update memory blocks. | ||
| 1228 | m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, | ||
| 1229 | KMemoryState::None, KMemoryPermission::None, | ||
| 1230 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1231 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1232 | |||
| 1233 | // Release from the resource limit as relevant. | ||
| 1234 | const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize); | ||
| 1235 | const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize); | ||
| 1236 | const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0; | ||
| 1237 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size); | ||
| 1238 | |||
| 1239 | R_SUCCEED(); | ||
| 1240 | } | ||
| 1241 | |||
| 1242 | Result KPageTable::CleanupForIpcClient(KProcessAddress address, size_t size, | ||
| 1243 | KMemoryState dst_state) { | ||
| 1244 | // Validate the address. | ||
| 1245 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1246 | |||
| 1247 | // Get aligned source extents. | ||
| 1248 | const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize); | ||
| 1249 | const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize); | ||
| 1250 | const KProcessAddress mapping_last = mapping_end - 1; | ||
| 1251 | const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0; | ||
| 1252 | |||
| 1253 | // If nothing was mapped, we're actually done immediately. | ||
| 1254 | R_SUCCEED_IF(mapping_size == 0); | ||
| 1255 | |||
| 1256 | // Get the test state and attribute mask. | ||
| 1257 | KMemoryState test_state; | ||
| 1258 | KMemoryAttribute test_attr_mask; | ||
| 1259 | switch (dst_state) { | ||
| 1260 | case KMemoryState::Ipc: | ||
| 1261 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 1262 | test_attr_mask = | ||
| 1263 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 1264 | break; | ||
| 1265 | case KMemoryState::NonSecureIpc: | ||
| 1266 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 1267 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 1268 | break; | ||
| 1269 | case KMemoryState::NonDeviceIpc: | ||
| 1270 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 1271 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 1272 | break; | ||
| 1273 | default: | ||
| 1274 | R_THROW(ResultInvalidCombination); | ||
| 1275 | } | ||
| 1276 | |||
| 1277 | // Lock the table. | ||
| 1278 | // NOTE: Nintendo does this *after* creating the updater below, but this does not follow | ||
| 1279 | // convention elsewhere in KPageTable. | ||
| 1280 | KScopedLightLock lk(m_general_lock); | ||
| 1281 | |||
| 1282 | // We're going to perform an update, so create a helper. | ||
| 1283 | KScopedPageTableUpdater updater(this); | ||
| 1284 | |||
| 1285 | // Ensure that on failure, we roll back appropriately. | ||
| 1286 | size_t mapped_size = 0; | ||
| 1287 | ON_RESULT_FAILURE { | ||
| 1288 | if (mapped_size > 0) { | ||
| 1289 | // Determine where the mapping ends. | ||
| 1290 | const auto mapped_end = (mapping_start) + mapped_size; | ||
| 1291 | const auto mapped_last = mapped_end - 1; | ||
| 1292 | |||
| 1293 | // Get current and next iterators. | ||
| 1294 | KMemoryBlockManager::const_iterator start_it = | ||
| 1295 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 1296 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 1297 | ++next_it; | ||
| 1298 | |||
| 1299 | // Get the current block info. | ||
| 1300 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 1301 | |||
| 1302 | // Create tracking variables. | ||
| 1303 | KProcessAddress cur_address = cur_info.GetAddress(); | ||
| 1304 | size_t cur_size = cur_info.GetSize(); | ||
| 1305 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 1306 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 1307 | bool first = | ||
| 1308 | cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 1309 | (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) == | ||
| 1310 | KMemoryBlockDisableMergeAttribute::None; | ||
| 1311 | |||
| 1312 | while (((cur_address) + cur_size - 1) < mapped_last) { | ||
| 1313 | // Check that we have a next block. | ||
| 1314 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 1315 | |||
| 1316 | // Get the next info. | ||
| 1317 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 1318 | |||
| 1319 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 1320 | |||
| 1321 | const bool next_perm_eq = | ||
| 1322 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 1323 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 1324 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 1325 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 1326 | // We can consolidate the reprotection for the current and next block into a | ||
| 1327 | // single call. | ||
| 1328 | cur_size += next_info.GetSize(); | ||
| 1329 | } else { | ||
| 1330 | // We have to operate on the current block. | ||
| 1331 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 1332 | ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(), | ||
| 1333 | OperationType::ChangePermissions) | ||
| 1334 | .IsSuccess()); | ||
| 1335 | } | ||
| 1336 | |||
| 1337 | // Advance. | ||
| 1338 | cur_address = next_info.GetAddress(); | ||
| 1339 | cur_size = next_info.GetSize(); | ||
| 1340 | first = false; | ||
| 1341 | } | ||
| 1342 | |||
| 1343 | // Advance. | ||
| 1344 | cur_info = next_info; | ||
| 1345 | cur_perm_eq = next_perm_eq; | ||
| 1346 | cur_needs_set_perm = next_needs_set_perm; | ||
| 1347 | ++next_it; | ||
| 1348 | } | ||
| 1349 | |||
| 1350 | // Process the last block. | ||
| 1351 | if ((first || cur_needs_set_perm) && !cur_perm_eq) { | ||
| 1352 | ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(), | ||
| 1353 | OperationType::ChangePermissions) | ||
| 1354 | .IsSuccess()); | ||
| 1355 | } | ||
| 1356 | } | ||
| 1357 | }; | ||
| 1358 | |||
| 1359 | // Iterate, reprotecting as needed. | ||
| 1360 | { | ||
| 1361 | // Get current and next iterators. | ||
| 1362 | KMemoryBlockManager::const_iterator start_it = | ||
| 1363 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 1364 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 1365 | ++next_it; | ||
| 1366 | |||
| 1367 | // Validate the current block. | ||
| 1368 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 1369 | ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None, | ||
| 1370 | KMemoryPermission::None, | ||
| 1371 | test_attr_mask | KMemoryAttribute::IpcLocked, | ||
| 1372 | KMemoryAttribute::IpcLocked) | ||
| 1373 | .IsSuccess()); | ||
| 1374 | |||
| 1375 | // Create tracking variables. | ||
| 1376 | KProcessAddress cur_address = cur_info.GetAddress(); | ||
| 1377 | size_t cur_size = cur_info.GetSize(); | ||
| 1378 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 1379 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 1380 | bool first = | ||
| 1381 | cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 1382 | (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) == | ||
| 1383 | KMemoryBlockDisableMergeAttribute::None; | ||
| 1384 | |||
| 1385 | while ((cur_address + cur_size - 1) < mapping_last) { | ||
| 1386 | // Check that we have a next block. | ||
| 1387 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 1388 | |||
| 1389 | // Get the next info. | ||
| 1390 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 1391 | |||
| 1392 | // Validate the next block. | ||
| 1393 | ASSERT(this->CheckMemoryState(next_info, test_state, test_state, | ||
| 1394 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1395 | test_attr_mask | KMemoryAttribute::IpcLocked, | ||
| 1396 | KMemoryAttribute::IpcLocked) | ||
| 1397 | .IsSuccess()); | ||
| 1398 | |||
| 1399 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 1400 | const bool next_perm_eq = | ||
| 1401 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 1402 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 1403 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 1404 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 1405 | // We can consolidate the reprotection for the current and next block into a single | ||
| 1406 | // call. | ||
| 1407 | cur_size += next_info.GetSize(); | ||
| 1408 | } else { | ||
| 1409 | // We have to operate on the current block. | ||
| 1410 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 1411 | R_TRY(Operate(cur_address, cur_size / PageSize, | ||
| 1412 | cur_needs_set_perm ? cur_info.GetOriginalPermission() | ||
| 1413 | : cur_info.GetPermission(), | ||
| 1414 | OperationType::ChangePermissions)); | ||
| 1415 | } | ||
| 1416 | |||
| 1417 | // Mark that we mapped the block. | ||
| 1418 | mapped_size += cur_size; | ||
| 1419 | |||
| 1420 | // Advance. | ||
| 1421 | cur_address = next_info.GetAddress(); | ||
| 1422 | cur_size = next_info.GetSize(); | ||
| 1423 | first = false; | ||
| 1424 | } | ||
| 1425 | |||
| 1426 | // Advance. | ||
| 1427 | cur_info = next_info; | ||
| 1428 | cur_perm_eq = next_perm_eq; | ||
| 1429 | cur_needs_set_perm = next_needs_set_perm; | ||
| 1430 | ++next_it; | ||
| 1431 | } | ||
| 1432 | |||
| 1433 | // Process the last block. | ||
| 1434 | const auto lock_count = | ||
| 1435 | cur_info.GetIpcLockCount() + | ||
| 1436 | (next_it != m_memory_block_manager.end() | ||
| 1437 | ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) | ||
| 1438 | : 0); | ||
| 1439 | if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) { | ||
| 1440 | R_TRY(Operate(cur_address, cur_size / PageSize, | ||
| 1441 | cur_needs_set_perm ? cur_info.GetOriginalPermission() | ||
| 1442 | : cur_info.GetPermission(), | ||
| 1443 | OperationType::ChangePermissions)); | ||
| 1444 | } | ||
| 1445 | } | ||
| 1446 | |||
| 1447 | // Create an update allocator. | ||
| 1448 | // NOTE: Guaranteed zero blocks needed here. | ||
| 1449 | Result allocator_result; | ||
| 1450 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1451 | m_memory_block_slab_manager, 0); | ||
| 1452 | R_TRY(allocator_result); | ||
| 1453 | |||
| 1454 | // Unlock the pages. | ||
| 1455 | m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, | ||
| 1456 | mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, | ||
| 1457 | KMemoryPermission::None); | ||
| 1458 | |||
| 1459 | R_SUCCEED(); | ||
| 1460 | } | ||
| 1461 | |||
| 1462 | void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list, | ||
| 1463 | KProcessAddress address, size_t size, | ||
| 1464 | KMemoryPermission prot_perm) { | ||
| 1465 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1466 | ASSERT(Common::IsAligned(GetInteger(address), PageSize)); | ||
| 1467 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 1468 | |||
| 1469 | // Get the mapped extents. | ||
| 1470 | const KProcessAddress src_map_start = address; | ||
| 1471 | const KProcessAddress src_map_end = address + size; | ||
| 1472 | const KProcessAddress src_map_last = src_map_end - 1; | ||
| 1473 | |||
| 1474 | // This function is only invoked when there's something to do. | ||
| 1475 | ASSERT(src_map_end > src_map_start); | ||
| 1476 | |||
| 1477 | // Iterate over blocks, fixing permissions. | ||
| 1478 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address); | ||
| 1479 | while (true) { | ||
| 1480 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1481 | |||
| 1482 | const auto cur_start = info.GetAddress() >= GetInteger(src_map_start) | ||
| 1483 | ? info.GetAddress() | ||
| 1484 | : GetInteger(src_map_start); | ||
| 1485 | const auto cur_end = | ||
| 1486 | src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress(); | ||
| 1487 | |||
| 1488 | // If we can, fix the protections on the block. | ||
| 1489 | if ((info.GetIpcLockCount() == 0 && | ||
| 1490 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) || | ||
| 1491 | (info.GetIpcLockCount() != 0 && | ||
| 1492 | (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) { | ||
| 1493 | // Check if we actually need to fix the protections on the block. | ||
| 1494 | if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) || | ||
| 1495 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) { | ||
| 1496 | ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(), | ||
| 1497 | OperationType::ChangePermissions) | ||
| 1498 | .IsSuccess()); | ||
| 1499 | } | ||
| 1500 | } | ||
| 1501 | |||
| 1502 | // If we're past the end of the region, we're done. | ||
| 1503 | if (src_map_last <= info.GetLastAddress()) { | ||
| 1504 | break; | ||
| 1505 | } | ||
| 1506 | |||
| 1507 | // Advance. | ||
| 1508 | ++it; | ||
| 1509 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1510 | } | ||
| 1511 | } | ||
| 1512 | |||
| 1513 | Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) { | ||
| 1514 | // Lock the physical memory lock. | ||
| 1515 | KScopedLightLock phys_lk(m_map_physical_memory_lock); | ||
| 1516 | |||
| 1517 | // Calculate the last address for convenience. | ||
| 1518 | const KProcessAddress last_address = address + size - 1; | ||
| 1519 | |||
| 1520 | // Define iteration variables. | ||
| 1521 | KProcessAddress cur_address; | ||
| 1522 | size_t mapped_size; | ||
| 1523 | |||
| 1524 | // The entire mapping process can be retried. | ||
| 1525 | while (true) { | ||
| 1526 | // Check if the memory is already mapped. | ||
| 1527 | { | ||
| 1528 | // Lock the table. | ||
| 1529 | KScopedLightLock lk(m_general_lock); | ||
| 1530 | |||
| 1531 | // Iterate over the memory. | ||
| 1532 | cur_address = address; | ||
| 1533 | mapped_size = 0; | ||
| 1534 | |||
| 1535 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1536 | while (true) { | ||
| 1537 | // Check that the iterator is valid. | ||
| 1538 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1539 | |||
| 1540 | // Get the memory info. | ||
| 1541 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1542 | |||
| 1543 | // Check if we're done. | ||
| 1544 | if (last_address <= info.GetLastAddress()) { | ||
| 1545 | if (info.GetState() != KMemoryState::Free) { | ||
| 1546 | mapped_size += (last_address + 1 - cur_address); | ||
| 1547 | } | ||
| 1548 | break; | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | // Track the memory if it's mapped. | ||
| 1552 | if (info.GetState() != KMemoryState::Free) { | ||
| 1553 | mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address; | ||
| 1554 | } | ||
| 1555 | |||
| 1556 | // Advance. | ||
| 1557 | cur_address = info.GetEndAddress(); | ||
| 1558 | ++it; | ||
| 1559 | } | ||
| 1560 | |||
| 1561 | // If the size mapped is the size requested, we've nothing to do. | ||
| 1562 | R_SUCCEED_IF(size == mapped_size); | ||
| 1563 | } | ||
| 1564 | |||
| 1565 | // Allocate and map the memory. | ||
| 1566 | { | ||
| 1567 | // Reserve the memory from the process resource limit. | ||
| 1568 | KScopedResourceReservation memory_reservation( | ||
| 1569 | m_resource_limit, LimitableResource::PhysicalMemoryMax, size - mapped_size); | ||
| 1570 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 1571 | |||
| 1572 | // Allocate pages for the new memory. | ||
| 1573 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 1574 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( | ||
| 1575 | &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); | ||
| 1576 | |||
| 1577 | // If we fail in the next bit (or retry), we need to cleanup the pages. | ||
| 1578 | // auto pg_guard = SCOPE_GUARD { | ||
| 1579 | // pg.OpenFirst(); | ||
| 1580 | // pg.Close(); | ||
| 1581 | //}; | ||
| 1582 | |||
| 1583 | // Map the memory. | ||
| 1584 | { | ||
| 1585 | // Lock the table. | ||
| 1586 | KScopedLightLock lk(m_general_lock); | ||
| 1587 | |||
| 1588 | size_t num_allocator_blocks = 0; | ||
| 1589 | |||
| 1590 | // Verify that nobody has mapped memory since we first checked. | ||
| 1591 | { | ||
| 1592 | // Iterate over the memory. | ||
| 1593 | size_t checked_mapped_size = 0; | ||
| 1594 | cur_address = address; | ||
| 1595 | |||
| 1596 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1597 | while (true) { | ||
| 1598 | // Check that the iterator is valid. | ||
| 1599 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1600 | |||
| 1601 | // Get the memory info. | ||
| 1602 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1603 | |||
| 1604 | const bool is_free = info.GetState() == KMemoryState::Free; | ||
| 1605 | if (is_free) { | ||
| 1606 | if (info.GetAddress() < GetInteger(address)) { | ||
| 1607 | ++num_allocator_blocks; | ||
| 1608 | } | ||
| 1609 | if (last_address < info.GetLastAddress()) { | ||
| 1610 | ++num_allocator_blocks; | ||
| 1611 | } | ||
| 1612 | } | ||
| 1613 | |||
| 1614 | // Check if we're done. | ||
| 1615 | if (last_address <= info.GetLastAddress()) { | ||
| 1616 | if (!is_free) { | ||
| 1617 | checked_mapped_size += (last_address + 1 - cur_address); | ||
| 1618 | } | ||
| 1619 | break; | ||
| 1620 | } | ||
| 1621 | |||
| 1622 | // Track the memory if it's mapped. | ||
| 1623 | if (!is_free) { | ||
| 1624 | checked_mapped_size += | ||
| 1625 | KProcessAddress(info.GetEndAddress()) - cur_address; | ||
| 1626 | } | ||
| 1627 | |||
| 1628 | // Advance. | ||
| 1629 | cur_address = info.GetEndAddress(); | ||
| 1630 | ++it; | ||
| 1631 | } | ||
| 1632 | |||
| 1633 | // If the size now isn't what it was before, somebody mapped or unmapped | ||
| 1634 | // concurrently. If this happened, retry. | ||
| 1635 | if (mapped_size != checked_mapped_size) { | ||
| 1636 | continue; | ||
| 1637 | } | ||
| 1638 | } | ||
| 1639 | |||
| 1640 | // Create an update allocator. | ||
| 1641 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 1642 | Result allocator_result; | ||
| 1643 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1644 | m_memory_block_slab_manager, | ||
| 1645 | num_allocator_blocks); | ||
| 1646 | R_TRY(allocator_result); | ||
| 1647 | |||
| 1648 | // We're going to perform an update, so create a helper. | ||
| 1649 | KScopedPageTableUpdater updater(this); | ||
| 1650 | |||
| 1651 | // Prepare to iterate over the memory. | ||
| 1652 | auto pg_it = pg.begin(); | ||
| 1653 | KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); | ||
| 1654 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 1655 | |||
| 1656 | // Reset the current tracking address, and make sure we clean up on failure. | ||
| 1657 | // pg_guard.Cancel(); | ||
| 1658 | cur_address = address; | ||
| 1659 | ON_RESULT_FAILURE { | ||
| 1660 | if (cur_address > address) { | ||
| 1661 | const KProcessAddress last_unmap_address = cur_address - 1; | ||
| 1662 | |||
| 1663 | // Iterate, unmapping the pages. | ||
| 1664 | cur_address = address; | ||
| 1665 | |||
| 1666 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1667 | while (true) { | ||
| 1668 | // Check that the iterator is valid. | ||
| 1669 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1670 | |||
| 1671 | // Get the memory info. | ||
| 1672 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1673 | |||
| 1674 | // If the memory state is free, we mapped it and need to unmap it. | ||
| 1675 | if (info.GetState() == KMemoryState::Free) { | ||
| 1676 | // Determine the range to unmap. | ||
| 1677 | const size_t cur_pages = | ||
| 1678 | std::min(KProcessAddress(info.GetEndAddress()) - cur_address, | ||
| 1679 | last_unmap_address + 1 - cur_address) / | ||
| 1680 | PageSize; | ||
| 1681 | |||
| 1682 | // Unmap. | ||
| 1683 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | ||
| 1684 | OperationType::Unmap) | ||
| 1685 | .IsSuccess()); | ||
| 1686 | } | ||
| 1687 | |||
| 1688 | // Check if we're done. | ||
| 1689 | if (last_unmap_address <= info.GetLastAddress()) { | ||
| 1690 | break; | ||
| 1691 | } | ||
| 1692 | |||
| 1693 | // Advance. | ||
| 1694 | cur_address = info.GetEndAddress(); | ||
| 1695 | ++it; | ||
| 1696 | } | ||
| 1697 | } | ||
| 1698 | |||
| 1699 | // Release any remaining unmapped memory. | ||
| 1700 | m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); | ||
| 1701 | m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); | ||
| 1702 | for (++pg_it; pg_it != pg.end(); ++pg_it) { | ||
| 1703 | m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), | ||
| 1704 | pg_it->GetNumPages()); | ||
| 1705 | m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), | ||
| 1706 | pg_it->GetNumPages()); | ||
| 1707 | } | ||
| 1708 | }; | ||
| 1709 | |||
| 1710 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1711 | while (true) { | ||
| 1712 | // Check that the iterator is valid. | ||
| 1713 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1714 | |||
| 1715 | // Get the memory info. | ||
| 1716 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1717 | |||
| 1718 | // If it's unmapped, we need to map it. | ||
| 1719 | if (info.GetState() == KMemoryState::Free) { | ||
| 1720 | // Determine the range to map. | ||
| 1721 | size_t map_pages = | ||
| 1722 | std::min(KProcessAddress(info.GetEndAddress()) - cur_address, | ||
| 1723 | last_address + 1 - cur_address) / | ||
| 1724 | PageSize; | ||
| 1725 | |||
| 1726 | // While we have pages to map, map them. | ||
| 1727 | { | ||
| 1728 | // Create a page group for the current mapping range. | ||
| 1729 | KPageGroup cur_pg(m_kernel, m_block_info_manager); | ||
| 1730 | { | ||
| 1731 | ON_RESULT_FAILURE_2 { | ||
| 1732 | cur_pg.OpenFirst(); | ||
| 1733 | cur_pg.Close(); | ||
| 1734 | }; | ||
| 1735 | |||
| 1736 | size_t remain_pages = map_pages; | ||
| 1737 | while (remain_pages > 0) { | ||
| 1738 | // Check if we're at the end of the physical block. | ||
| 1739 | if (pg_pages == 0) { | ||
| 1740 | // Ensure there are more pages to map. | ||
| 1741 | ASSERT(pg_it != pg.end()); | ||
| 1742 | |||
| 1743 | // Advance our physical block. | ||
| 1744 | ++pg_it; | ||
| 1745 | pg_phys_addr = pg_it->GetAddress(); | ||
| 1746 | pg_pages = pg_it->GetNumPages(); | ||
| 1747 | } | ||
| 1748 | |||
| 1749 | // Add whatever we can to the current block. | ||
| 1750 | const size_t cur_pages = std::min(pg_pages, remain_pages); | ||
| 1751 | R_TRY(cur_pg.AddBlock(pg_phys_addr + | ||
| 1752 | ((pg_pages - cur_pages) * PageSize), | ||
| 1753 | cur_pages)); | ||
| 1754 | |||
| 1755 | // Advance. | ||
| 1756 | remain_pages -= cur_pages; | ||
| 1757 | pg_pages -= cur_pages; | ||
| 1758 | } | ||
| 1759 | } | ||
| 1760 | |||
| 1761 | // Map the pages. | ||
| 1762 | R_TRY(this->Operate(cur_address, map_pages, cur_pg, | ||
| 1763 | OperationType::MapFirstGroup)); | ||
| 1764 | } | ||
| 1765 | } | ||
| 1766 | |||
| 1767 | // Check if we're done. | ||
| 1768 | if (last_address <= info.GetLastAddress()) { | ||
| 1769 | break; | ||
| 1770 | } | ||
| 1771 | |||
| 1772 | // Advance. | ||
| 1773 | cur_address = info.GetEndAddress(); | ||
| 1774 | ++it; | ||
| 1775 | } | ||
| 1776 | |||
| 1777 | // We succeeded, so commit the memory reservation. | ||
| 1778 | memory_reservation.Commit(); | ||
| 1779 | |||
| 1780 | // Increase our tracked mapped size. | ||
| 1781 | m_mapped_physical_memory_size += (size - mapped_size); | ||
| 1782 | |||
| 1783 | // Update the relevant memory blocks. | ||
| 1784 | m_memory_block_manager.UpdateIfMatch( | ||
| 1785 | std::addressof(allocator), address, size / PageSize, KMemoryState::Free, | ||
| 1786 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, | ||
| 1787 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 1788 | address == this->GetAliasRegionStart() | ||
| 1789 | ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 1790 | : KMemoryBlockDisableMergeAttribute::None, | ||
| 1791 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1792 | |||
| 1793 | R_SUCCEED(); | ||
| 1794 | } | ||
| 1795 | } | ||
| 1796 | } | ||
| 1797 | } | ||
| 1798 | |||
| 1799 | Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) { | ||
| 1800 | // Lock the physical memory lock. | ||
| 1801 | KScopedLightLock phys_lk(m_map_physical_memory_lock); | ||
| 1802 | |||
| 1803 | // Lock the table. | ||
| 1804 | KScopedLightLock lk(m_general_lock); | ||
| 1805 | |||
| 1806 | // Calculate the last address for convenience. | ||
| 1807 | const KProcessAddress last_address = address + size - 1; | ||
| 1808 | |||
| 1809 | // Define iteration variables. | ||
| 1810 | KProcessAddress map_start_address = 0; | ||
| 1811 | KProcessAddress map_last_address = 0; | ||
| 1812 | |||
| 1813 | KProcessAddress cur_address; | ||
| 1814 | size_t mapped_size; | ||
| 1815 | size_t num_allocator_blocks = 0; | ||
| 1816 | |||
| 1817 | // Check if the memory is mapped. | ||
| 1818 | { | ||
| 1819 | // Iterate over the memory. | ||
| 1820 | cur_address = address; | ||
| 1821 | mapped_size = 0; | ||
| 1822 | |||
| 1823 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1824 | while (true) { | ||
| 1825 | // Check that the iterator is valid. | ||
| 1826 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1827 | |||
| 1828 | // Get the memory info. | ||
| 1829 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1830 | |||
| 1831 | // Verify the memory's state. | ||
| 1832 | const bool is_normal = info.GetState() == KMemoryState::Normal && | ||
| 1833 | info.GetAttribute() == KMemoryAttribute::None; | ||
| 1834 | const bool is_free = info.GetState() == KMemoryState::Free; | ||
| 1835 | R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory); | ||
| 1836 | |||
| 1837 | if (is_normal) { | ||
| 1838 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); | ||
| 1839 | |||
| 1840 | if (map_start_address == 0) { | ||
| 1841 | map_start_address = cur_address; | ||
| 1842 | } | ||
| 1843 | map_last_address = | ||
| 1844 | (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address; | ||
| 1845 | |||
| 1846 | if (info.GetAddress() < GetInteger(address)) { | ||
| 1847 | ++num_allocator_blocks; | ||
| 1848 | } | ||
| 1849 | if (last_address < info.GetLastAddress()) { | ||
| 1850 | ++num_allocator_blocks; | ||
| 1851 | } | ||
| 1852 | |||
| 1853 | mapped_size += (map_last_address + 1 - cur_address); | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | // Check if we're done. | ||
| 1857 | if (last_address <= info.GetLastAddress()) { | ||
| 1858 | break; | ||
| 1859 | } | ||
| 1860 | |||
| 1861 | // Advance. | ||
| 1862 | cur_address = info.GetEndAddress(); | ||
| 1863 | ++it; | ||
| 1864 | } | ||
| 1865 | |||
| 1866 | // If there's nothing mapped, we've nothing to do. | ||
| 1867 | R_SUCCEED_IF(mapped_size == 0); | ||
| 1868 | } | ||
| 1869 | |||
| 1870 | // Create an update allocator. | ||
| 1871 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 1872 | Result allocator_result; | ||
| 1873 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1874 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1875 | R_TRY(allocator_result); | ||
| 1876 | |||
| 1877 | // We're going to perform an update, so create a helper. | ||
| 1878 | KScopedPageTableUpdater updater(this); | ||
| 1879 | |||
| 1880 | // Separate the mapping. | ||
| 1881 | R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize, | ||
| 1882 | KMemoryPermission::None, OperationType::Separate)); | ||
| 1883 | |||
| 1884 | // Reset the current tracking address, and make sure we clean up on failure. | ||
| 1885 | cur_address = address; | ||
| 1886 | |||
| 1887 | // Iterate over the memory, unmapping as we go. | ||
| 1888 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1889 | |||
| 1890 | const auto clear_merge_attr = | ||
| 1891 | (it->GetState() == KMemoryState::Normal && | ||
| 1892 | it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address) | ||
| 1893 | ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 1894 | : KMemoryBlockDisableMergeAttribute::None; | ||
| 1895 | |||
| 1896 | while (true) { | ||
| 1897 | // Check that the iterator is valid. | ||
| 1898 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1899 | |||
| 1900 | // Get the memory info. | ||
| 1901 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1902 | |||
| 1903 | // If the memory state is normal, we need to unmap it. | ||
| 1904 | if (info.GetState() == KMemoryState::Normal) { | ||
| 1905 | // Determine the range to unmap. | ||
| 1906 | const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, | ||
| 1907 | last_address + 1 - cur_address) / | ||
| 1908 | PageSize; | ||
| 1909 | |||
| 1910 | // Unmap. | ||
| 1911 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) | ||
| 1912 | .IsSuccess()); | ||
| 1913 | } | ||
| 1914 | |||
| 1915 | // Check if we're done. | ||
| 1916 | if (last_address <= info.GetLastAddress()) { | ||
| 1917 | break; | ||
| 1918 | } | ||
| 1919 | |||
| 1920 | // Advance. | ||
| 1921 | cur_address = info.GetEndAddress(); | ||
| 1922 | ++it; | ||
| 1923 | } | ||
| 1924 | |||
| 1925 | // Release the memory resource. | ||
| 1926 | m_mapped_physical_memory_size -= mapped_size; | ||
| 1927 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, mapped_size); | ||
| 1928 | |||
| 1929 | // Update memory blocks. | ||
| 1930 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, | ||
| 1931 | KMemoryState::Free, KMemoryPermission::None, | ||
| 1932 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1933 | clear_merge_attr); | ||
| 1934 | |||
| 1935 | // We succeeded. | ||
| 1936 | R_SUCCEED(); | ||
| 1937 | } | ||
| 1938 | |||
| 1939 | Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 1940 | size_t size) { | ||
| 1941 | // Lock the table. | ||
| 1942 | KScopedLightLock lk(m_general_lock); | ||
| 1943 | |||
| 1944 | // Validate that the source address's state is valid. | ||
| 1945 | KMemoryState src_state; | ||
| 1946 | size_t num_src_allocator_blocks; | ||
| 1947 | R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, | ||
| 1948 | std::addressof(num_src_allocator_blocks), src_address, size, | ||
| 1949 | KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, | ||
| 1950 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 1951 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 1952 | |||
| 1953 | // Validate that the dst address's state is valid. | ||
| 1954 | size_t num_dst_allocator_blocks; | ||
| 1955 | R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, | ||
| 1956 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 1957 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1958 | KMemoryAttribute::None)); | ||
| 1959 | |||
| 1960 | // Create an update allocator for the source. | ||
| 1961 | Result src_allocator_result; | ||
| 1962 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 1963 | m_memory_block_slab_manager, | ||
| 1964 | num_src_allocator_blocks); | ||
| 1965 | R_TRY(src_allocator_result); | ||
| 1966 | |||
| 1967 | // Create an update allocator for the destination. | ||
| 1968 | Result dst_allocator_result; | ||
| 1969 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 1970 | m_memory_block_slab_manager, | ||
| 1971 | num_dst_allocator_blocks); | ||
| 1972 | R_TRY(dst_allocator_result); | ||
| 1973 | |||
| 1974 | // Map the memory. | ||
| 1975 | { | ||
| 1976 | // Determine the number of pages being operated on. | ||
| 1977 | const size_t num_pages = size / PageSize; | ||
| 1978 | |||
| 1979 | // Create page groups for the memory being unmapped. | ||
| 1980 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 1981 | |||
| 1982 | // Create the page group representing the source. | ||
| 1983 | R_TRY(this->MakePageGroup(pg, src_address, num_pages)); | ||
| 1984 | |||
| 1985 | // We're going to perform an update, so create a helper. | ||
| 1986 | KScopedPageTableUpdater updater(this); | ||
| 1987 | |||
| 1988 | // Reprotect the source as kernel-read/not mapped. | ||
| 1989 | const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( | ||
| 1990 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | ||
| 1991 | const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; | ||
| 1992 | const KPageProperties src_properties = {new_src_perm, false, false, | ||
| 1993 | DisableMergeAttribute::DisableHeadBodyTail}; | ||
| 1994 | R_TRY(this->Operate(src_address, num_pages, src_properties.perm, | ||
| 1995 | OperationType::ChangePermissions)); | ||
| 1996 | |||
| 1997 | // Ensure that we unprotect the source pages on failure. | ||
| 1998 | ON_RESULT_FAILURE { | ||
| 1999 | const KPageProperties unprotect_properties = { | ||
| 2000 | KMemoryPermission::UserReadWrite, false, false, | ||
| 2001 | DisableMergeAttribute::EnableHeadBodyTail}; | ||
| 2002 | ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm, | ||
| 2003 | OperationType::ChangePermissions) == ResultSuccess); | ||
| 2004 | }; | ||
| 2005 | |||
| 2006 | // Map the alias pages. | ||
| 2007 | const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 2008 | DisableMergeAttribute::DisableHead}; | ||
| 2009 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties, | ||
| 2010 | false)); | ||
| 2011 | |||
| 2012 | // Apply the memory block updates. | ||
| 2013 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, | ||
| 2014 | src_state, new_src_perm, new_src_attr, | ||
| 2015 | KMemoryBlockDisableMergeAttribute::Locked, | ||
| 2016 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2017 | m_memory_block_manager.Update( | ||
| 2018 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack, | ||
| 2019 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 2020 | KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); | ||
| 2021 | } | ||
| 2022 | |||
| 2023 | R_SUCCEED(); | ||
| 2024 | } | ||
| 2025 | |||
| 2026 | Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 2027 | size_t size) { | ||
| 2028 | // Lock the table. | ||
| 2029 | KScopedLightLock lk(m_general_lock); | ||
| 2030 | |||
| 2031 | // Validate that the source address's state is valid. | ||
| 2032 | KMemoryState src_state; | ||
| 2033 | size_t num_src_allocator_blocks; | ||
| 2034 | R_TRY(this->CheckMemoryState( | ||
| 2035 | std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), | ||
| 2036 | src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, | ||
| 2037 | KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead, | ||
| 2038 | KMemoryAttribute::All, KMemoryAttribute::Locked)); | ||
| 2039 | |||
| 2040 | // Validate that the dst address's state is valid. | ||
| 2041 | KMemoryPermission dst_perm; | ||
| 2042 | size_t num_dst_allocator_blocks; | ||
| 2043 | R_TRY(this->CheckMemoryState( | ||
| 2044 | nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), | ||
| 2045 | dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, | ||
| 2046 | KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 2047 | |||
| 2048 | // Create an update allocator for the source. | ||
| 2049 | Result src_allocator_result; | ||
| 2050 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 2051 | m_memory_block_slab_manager, | ||
| 2052 | num_src_allocator_blocks); | ||
| 2053 | R_TRY(src_allocator_result); | ||
| 2054 | |||
| 2055 | // Create an update allocator for the destination. | ||
| 2056 | Result dst_allocator_result; | ||
| 2057 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 2058 | m_memory_block_slab_manager, | ||
| 2059 | num_dst_allocator_blocks); | ||
| 2060 | R_TRY(dst_allocator_result); | ||
| 2061 | |||
| 2062 | // Unmap the memory. | ||
| 2063 | { | ||
| 2064 | // Determine the number of pages being operated on. | ||
| 2065 | const size_t num_pages = size / PageSize; | ||
| 2066 | |||
| 2067 | // Create page groups for the memory being unmapped. | ||
| 2068 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 2069 | |||
| 2070 | // Create the page group representing the destination. | ||
| 2071 | R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); | ||
| 2072 | |||
| 2073 | // Ensure the page group is the valid for the source. | ||
| 2074 | R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion); | ||
| 2075 | |||
| 2076 | // We're going to perform an update, so create a helper. | ||
| 2077 | KScopedPageTableUpdater updater(this); | ||
| 2078 | |||
| 2079 | // Unmap the aliased copy of the pages. | ||
| 2080 | const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2081 | DisableMergeAttribute::None}; | ||
| 2082 | R_TRY( | ||
| 2083 | this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap)); | ||
| 2084 | |||
| 2085 | // Ensure that we re-map the aliased pages on failure. | ||
| 2086 | ON_RESULT_FAILURE { | ||
| 2087 | this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg); | ||
| 2088 | }; | ||
| 2089 | |||
| 2090 | // Try to set the permissions for the source pages back to what they should be. | ||
| 2091 | const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 2092 | DisableMergeAttribute::EnableAndMergeHeadBodyTail}; | ||
| 2093 | R_TRY(this->Operate(src_address, num_pages, src_properties.perm, | ||
| 2094 | OperationType::ChangePermissions)); | ||
| 2095 | |||
| 2096 | // Apply the memory block updates. | ||
| 2097 | m_memory_block_manager.Update( | ||
| 2098 | std::addressof(src_allocator), src_address, num_pages, src_state, | ||
| 2099 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 2100 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); | ||
| 2101 | m_memory_block_manager.Update( | ||
| 2102 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, | ||
| 2103 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2104 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2105 | } | ||
| 2106 | |||
| 2107 | R_SUCCEED(); | ||
| 2108 | } | ||
| 2109 | |||
| 2110 | Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 2111 | size_t num_pages, KMemoryPermission perm) { | ||
| 2112 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 2113 | |||
| 2114 | // Create a page group to hold the pages we allocate. | ||
| 2115 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 2116 | |||
| 2117 | // Allocate the pages. | ||
| 2118 | R_TRY( | ||
| 2119 | m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option)); | ||
| 2120 | |||
| 2121 | // Ensure that the page group is closed when we're done working with it. | ||
| 2122 | SCOPE_EXIT({ pg.Close(); }); | ||
| 2123 | |||
| 2124 | // Clear all pages. | ||
| 2125 | for (const auto& it : pg) { | ||
| 2126 | std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, | ||
| 2127 | it.GetSize()); | ||
| 2128 | } | ||
| 2129 | |||
| 2130 | // Map the pages. | ||
| 2131 | R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup)); | ||
| 2132 | } | ||
| 2133 | |||
| 2134 | Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 2135 | const KPageGroup& pg, const KPageProperties properties, | ||
| 2136 | bool reuse_ll) { | ||
| 2137 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 2138 | |||
| 2139 | // Note the current address, so that we can iterate. | ||
| 2140 | const KProcessAddress start_address = address; | ||
| 2141 | KProcessAddress cur_address = address; | ||
| 2142 | |||
| 2143 | // Ensure that we clean up on failure. | ||
| 2144 | ON_RESULT_FAILURE { | ||
| 2145 | ASSERT(!reuse_ll); | ||
| 2146 | if (cur_address != start_address) { | ||
| 2147 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2148 | DisableMergeAttribute::None}; | ||
| 2149 | ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize, | ||
| 2150 | unmap_properties.perm, OperationType::Unmap) == ResultSuccess); | ||
| 2151 | } | ||
| 2152 | }; | ||
| 2153 | |||
| 2154 | // Iterate, mapping all pages in the group. | ||
| 2155 | for (const auto& block : pg) { | ||
| 2156 | // Map and advance. | ||
| 2157 | const KPageProperties cur_properties = | ||
| 2158 | (cur_address == start_address) | ||
| 2159 | ? properties | ||
| 2160 | : KPageProperties{properties.perm, properties.io, properties.uncached, | ||
| 2161 | DisableMergeAttribute::None}; | ||
| 2162 | this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map, | ||
| 2163 | block.GetAddress()); | ||
| 2164 | cur_address += block.GetSize(); | ||
| 2165 | } | ||
| 2166 | |||
| 2167 | // We succeeded! | ||
| 2168 | R_SUCCEED(); | ||
| 2169 | } | ||
| 2170 | |||
| 2171 | void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, | ||
| 2172 | const KPageGroup& pg) { | ||
| 2173 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 2174 | |||
| 2175 | // Note the current address, so that we can iterate. | ||
| 2176 | const KProcessAddress start_address = address; | ||
| 2177 | const KProcessAddress last_address = start_address + size - 1; | ||
| 2178 | const KProcessAddress end_address = last_address + 1; | ||
| 2179 | |||
| 2180 | // Iterate over the memory. | ||
| 2181 | auto pg_it = pg.begin(); | ||
| 2182 | ASSERT(pg_it != pg.end()); | ||
| 2183 | |||
| 2184 | KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); | ||
| 2185 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 2186 | |||
| 2187 | auto it = m_memory_block_manager.FindIterator(start_address); | ||
| 2188 | while (true) { | ||
| 2189 | // Check that the iterator is valid. | ||
| 2190 | ASSERT(it != m_memory_block_manager.end()); | ||
| 2191 | |||
| 2192 | // Get the memory info. | ||
| 2193 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 2194 | |||
| 2195 | // Determine the range to map. | ||
| 2196 | KProcessAddress map_address = std::max<KProcessAddress>(info.GetAddress(), start_address); | ||
| 2197 | const KProcessAddress map_end_address = | ||
| 2198 | std::min<KProcessAddress>(info.GetEndAddress(), end_address); | ||
| 2199 | ASSERT(map_end_address != map_address); | ||
| 2200 | |||
| 2201 | // Determine if we should disable head merge. | ||
| 2202 | const bool disable_head_merge = | ||
| 2203 | info.GetAddress() >= GetInteger(start_address) && | ||
| 2204 | True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2205 | const KPageProperties map_properties = { | ||
| 2206 | info.GetPermission(), false, false, | ||
| 2207 | disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None}; | ||
| 2208 | |||
| 2209 | // While we have pages to map, map them. | ||
| 2210 | size_t map_pages = (map_end_address - map_address) / PageSize; | ||
| 2211 | while (map_pages > 0) { | ||
| 2212 | // Check if we're at the end of the physical block. | ||
| 2213 | if (pg_pages == 0) { | ||
| 2214 | // Ensure there are more pages to map. | ||
| 2215 | ASSERT(pg_it != pg.end()); | ||
| 2216 | |||
| 2217 | // Advance our physical block. | ||
| 2218 | ++pg_it; | ||
| 2219 | pg_phys_addr = pg_it->GetAddress(); | ||
| 2220 | pg_pages = pg_it->GetNumPages(); | ||
| 2221 | } | ||
| 2222 | |||
| 2223 | // Map whatever we can. | ||
| 2224 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 2225 | ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map, | ||
| 2226 | pg_phys_addr) == ResultSuccess); | ||
| 2227 | |||
| 2228 | // Advance. | ||
| 2229 | map_address += cur_pages * PageSize; | ||
| 2230 | map_pages -= cur_pages; | ||
| 2231 | |||
| 2232 | pg_phys_addr += cur_pages * PageSize; | ||
| 2233 | pg_pages -= cur_pages; | ||
| 2234 | } | ||
| 2235 | |||
| 2236 | // Check if we're done. | ||
| 2237 | if (last_address <= info.GetLastAddress()) { | ||
| 2238 | break; | ||
| 2239 | } | ||
| 2240 | |||
| 2241 | // Advance. | ||
| 2242 | ++it; | ||
| 2243 | } | ||
| 2244 | |||
| 2245 | // Check that we re-mapped precisely the page group. | ||
| 2246 | ASSERT((++pg_it) == pg.end()); | ||
| 2247 | } | ||
| 2248 | |||
| 2249 | Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 2250 | KPhysicalAddress phys_addr, bool is_pa_valid, | ||
| 2251 | KProcessAddress region_start, size_t region_num_pages, | ||
| 2252 | KMemoryState state, KMemoryPermission perm) { | ||
| 2253 | ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); | ||
| 2254 | |||
| 2255 | // Ensure this is a valid map request. | ||
| 2256 | R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), | ||
| 2257 | ResultInvalidCurrentMemory); | ||
| 2258 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | ||
| 2259 | |||
| 2260 | // Lock the table. | ||
| 2261 | KScopedLightLock lk(m_general_lock); | ||
| 2262 | |||
| 2263 | // Find a random address to map at. | ||
| 2264 | KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, | ||
| 2265 | 0, this->GetNumGuardPages()); | ||
| 2266 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2267 | ASSERT(Common::IsAligned(GetInteger(addr), alignment)); | ||
| 2268 | ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | ||
| 2269 | ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, | ||
| 2270 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2271 | KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess); | ||
| 2272 | |||
| 2273 | // Create an update allocator. | ||
| 2274 | Result allocator_result; | ||
| 2275 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2276 | m_memory_block_slab_manager); | ||
| 2277 | R_TRY(allocator_result); | ||
| 2278 | |||
| 2279 | // We're going to perform an update, so create a helper. | ||
| 2280 | KScopedPageTableUpdater updater(this); | ||
| 2281 | |||
| 2282 | // Perform mapping operation. | ||
| 2283 | if (is_pa_valid) { | ||
| 2284 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2285 | R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr)); | ||
| 2286 | } else { | ||
| 2287 | R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm)); | ||
| 2288 | } | ||
| 2289 | |||
| 2290 | // Update the blocks. | ||
| 2291 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2292 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2293 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2294 | |||
| 2295 | // We successfully mapped the pages. | ||
| 2296 | *out_addr = addr; | ||
| 2297 | R_SUCCEED(); | ||
| 2298 | } | ||
| 2299 | |||
| 2300 | Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||
| 2301 | KMemoryPermission perm) { | ||
| 2302 | // Check that the map is in range. | ||
| 2303 | const size_t size = num_pages * PageSize; | ||
| 2304 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||
| 2305 | |||
| 2306 | // Lock the table. | ||
| 2307 | KScopedLightLock lk(m_general_lock); | ||
| 2308 | |||
| 2309 | // Check the memory state. | ||
| 2310 | size_t num_allocator_blocks; | ||
| 2311 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 2312 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2313 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2314 | KMemoryAttribute::None)); | ||
| 2315 | |||
| 2316 | // Create an update allocator. | ||
| 2317 | Result allocator_result; | ||
| 2318 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2319 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2320 | R_TRY(allocator_result); | ||
| 2321 | |||
| 2322 | // We're going to perform an update, so create a helper. | ||
| 2323 | KScopedPageTableUpdater updater(this); | ||
| 2324 | |||
| 2325 | // Map the pages. | ||
| 2326 | R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm)); | ||
| 2327 | |||
| 2328 | // Update the blocks. | ||
| 2329 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, | ||
| 2330 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2331 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2332 | |||
| 2333 | R_SUCCEED(); | ||
| 2334 | } | ||
| 2335 | |||
| 2336 | Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { | ||
| 2337 | // Check that the unmap is in range. | ||
| 2338 | const size_t size = num_pages * PageSize; | ||
| 2339 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2340 | |||
| 2341 | // Lock the table. | ||
| 2342 | KScopedLightLock lk(m_general_lock); | ||
| 2343 | |||
| 2344 | // Check the memory state. | ||
| 2345 | size_t num_allocator_blocks; | ||
| 2346 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 2347 | KMemoryState::All, state, KMemoryPermission::None, | ||
| 2348 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2349 | KMemoryAttribute::None)); | ||
| 2350 | |||
| 2351 | // Create an update allocator. | ||
| 2352 | Result allocator_result; | ||
| 2353 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2354 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2355 | R_TRY(allocator_result); | ||
| 2356 | |||
| 2357 | // We're going to perform an update, so create a helper. | ||
| 2358 | KScopedPageTableUpdater updater(this); | ||
| 2359 | |||
| 2360 | // Perform the unmap. | ||
| 2361 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2362 | DisableMergeAttribute::None}; | ||
| 2363 | R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap)); | ||
| 2364 | |||
| 2365 | // Update the blocks. | ||
| 2366 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | ||
| 2367 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2368 | KMemoryBlockDisableMergeAttribute::None, | ||
| 2369 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2370 | |||
| 2371 | R_SUCCEED(); | ||
| 2372 | } | ||
| 2373 | |||
| 2374 | Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, | ||
| 2375 | KProcessAddress region_start, size_t region_num_pages, | ||
| 2376 | KMemoryState state, KMemoryPermission perm) { | ||
| 2377 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2378 | |||
| 2379 | // Ensure this is a valid map request. | ||
| 2380 | const size_t num_pages = pg.GetNumPages(); | ||
| 2381 | R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), | ||
| 2382 | ResultInvalidCurrentMemory); | ||
| 2383 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | ||
| 2384 | |||
| 2385 | // Lock the table. | ||
| 2386 | KScopedLightLock lk(m_general_lock); | ||
| 2387 | |||
| 2388 | // Find a random address to map at. | ||
| 2389 | KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, | ||
| 2390 | 0, this->GetNumGuardPages()); | ||
| 2391 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2392 | ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | ||
| 2393 | ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, | ||
| 2394 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2395 | KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess); | ||
| 2396 | |||
| 2397 | // Create an update allocator. | ||
| 2398 | Result allocator_result; | ||
| 2399 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2400 | m_memory_block_slab_manager); | ||
| 2401 | R_TRY(allocator_result); | ||
| 2402 | |||
| 2403 | // We're going to perform an update, so create a helper. | ||
| 2404 | KScopedPageTableUpdater updater(this); | ||
| 2405 | |||
| 2406 | // Perform mapping operation. | ||
| 2407 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2408 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | ||
| 2409 | |||
| 2410 | // Update the blocks. | ||
| 2411 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2412 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2413 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2414 | |||
| 2415 | // We successfully mapped the pages. | ||
| 2416 | *out_addr = addr; | ||
| 2417 | R_SUCCEED(); | ||
| 2418 | } | ||
| 2419 | |||
| 2420 | Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state, | ||
| 2421 | KMemoryPermission perm) { | ||
| 2422 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2423 | |||
| 2424 | // Ensure this is a valid map request. | ||
| 2425 | const size_t num_pages = pg.GetNumPages(); | ||
| 2426 | const size_t size = num_pages * PageSize; | ||
| 2427 | R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); | ||
| 2428 | |||
| 2429 | // Lock the table. | ||
| 2430 | KScopedLightLock lk(m_general_lock); | ||
| 2431 | |||
| 2432 | // Check if state allows us to map. | ||
| 2433 | size_t num_allocator_blocks; | ||
| 2434 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size, | ||
| 2435 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2436 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2437 | KMemoryAttribute::None)); | ||
| 2438 | |||
| 2439 | // Create an update allocator. | ||
| 2440 | Result allocator_result; | ||
| 2441 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2442 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2443 | R_TRY(allocator_result); | ||
| 2444 | |||
| 2445 | // We're going to perform an update, so create a helper. | ||
| 2446 | KScopedPageTableUpdater updater(this); | ||
| 2447 | |||
| 2448 | // Perform mapping operation. | ||
| 2449 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2450 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | ||
| 2451 | |||
| 2452 | // Update the blocks. | ||
| 2453 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2454 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2455 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2456 | |||
| 2457 | // We successfully mapped the pages. | ||
| 2458 | R_SUCCEED(); | ||
| 2459 | } | ||
| 2460 | |||
| 2461 | Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, | ||
| 2462 | KMemoryState state) { | ||
| 2463 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2464 | |||
| 2465 | // Ensure this is a valid unmap request. | ||
| 2466 | const size_t num_pages = pg.GetNumPages(); | ||
| 2467 | const size_t size = num_pages * PageSize; | ||
| 2468 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||
| 2469 | |||
| 2470 | // Lock the table. | ||
| 2471 | KScopedLightLock lk(m_general_lock); | ||
| 2472 | |||
| 2473 | // Check if state allows us to unmap. | ||
| 2474 | size_t num_allocator_blocks; | ||
| 2475 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 2476 | KMemoryState::All, state, KMemoryPermission::None, | ||
| 2477 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2478 | KMemoryAttribute::None)); | ||
| 2479 | |||
| 2480 | // Check that the page group is valid. | ||
| 2481 | R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory); | ||
| 2482 | |||
| 2483 | // Create an update allocator. | ||
| 2484 | Result allocator_result; | ||
| 2485 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2486 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2487 | R_TRY(allocator_result); | ||
| 2488 | |||
| 2489 | // We're going to perform an update, so create a helper. | ||
| 2490 | KScopedPageTableUpdater updater(this); | ||
| 2491 | |||
| 2492 | // Perform unmapping operation. | ||
| 2493 | const KPageProperties properties = {KMemoryPermission::None, false, false, | ||
| 2494 | DisableMergeAttribute::None}; | ||
| 2495 | R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap)); | ||
| 2496 | |||
| 2497 | // Update the blocks. | ||
| 2498 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | ||
| 2499 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2500 | KMemoryBlockDisableMergeAttribute::None, | ||
| 2501 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2502 | |||
| 2503 | R_SUCCEED(); | ||
| 2504 | } | ||
| 2505 | |||
| 2506 | Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages, | ||
| 2507 | KMemoryState state_mask, KMemoryState state, | ||
| 2508 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 2509 | KMemoryAttribute attr_mask, KMemoryAttribute attr) { | ||
| 2510 | // Ensure that the page group isn't null. | ||
| 2511 | ASSERT(out != nullptr); | ||
| 2512 | |||
| 2513 | // Make sure that the region we're mapping is valid for the table. | ||
| 2514 | const size_t size = num_pages * PageSize; | ||
| 2515 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2516 | |||
| 2517 | // Lock the table. | ||
| 2518 | KScopedLightLock lk(m_general_lock); | ||
| 2519 | |||
| 2520 | // Check if state allows us to create the group. | ||
| 2521 | R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, | ||
| 2522 | state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 2523 | attr_mask, attr)); | ||
| 2524 | |||
| 2525 | // Create a new page group for the region. | ||
| 2526 | R_TRY(this->MakePageGroup(*out, address, num_pages)); | ||
| 2527 | |||
| 2528 | R_SUCCEED(); | ||
| 2529 | } | ||
| 2530 | |||
| 2531 | Result KPageTable::SetProcessMemoryPermission(KProcessAddress addr, size_t size, | ||
| 2532 | Svc::MemoryPermission svc_perm) { | ||
| 2533 | const size_t num_pages = size / PageSize; | ||
| 2534 | |||
| 2535 | // Lock the table. | ||
| 2536 | KScopedLightLock lk(m_general_lock); | ||
| 2537 | |||
| 2538 | // Verify we can change the memory permission. | ||
| 2539 | KMemoryState old_state; | ||
| 2540 | KMemoryPermission old_perm; | ||
| 2541 | size_t num_allocator_blocks; | ||
| 2542 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, | ||
| 2543 | std::addressof(num_allocator_blocks), addr, size, | ||
| 2544 | KMemoryState::FlagCode, KMemoryState::FlagCode, | ||
| 2545 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2546 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 2547 | |||
| 2548 | // Determine new perm/state. | ||
| 2549 | const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); | ||
| 2550 | KMemoryState new_state = old_state; | ||
| 2551 | const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite; | ||
| 2552 | const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute; | ||
| 2553 | const bool was_x = | ||
| 2554 | (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute; | ||
| 2555 | ASSERT(!(is_w && is_x)); | ||
| 2556 | |||
| 2557 | if (is_w) { | ||
| 2558 | switch (old_state) { | ||
| 2559 | case KMemoryState::Code: | ||
| 2560 | new_state = KMemoryState::CodeData; | ||
| 2561 | break; | ||
| 2562 | case KMemoryState::AliasCode: | ||
| 2563 | new_state = KMemoryState::AliasCodeData; | ||
| 2564 | break; | ||
| 2565 | default: | ||
| 2566 | ASSERT(false); | ||
| 2567 | break; | ||
| 2568 | } | ||
| 2569 | } | ||
| 2570 | |||
| 2571 | // Succeed if there's nothing to do. | ||
| 2572 | R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); | ||
| 2573 | |||
| 2574 | // Create an update allocator. | ||
| 2575 | Result allocator_result{ResultSuccess}; | ||
| 2576 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2577 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2578 | R_TRY(allocator_result); | ||
| 2579 | |||
| 2580 | // Perform mapping operation. | ||
| 2581 | const auto operation = | ||
| 2582 | was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions; | ||
| 2583 | R_TRY(Operate(addr, num_pages, new_perm, operation)); | ||
| 2584 | |||
| 2585 | // Update the blocks. | ||
| 2586 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, | ||
| 2587 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 2588 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2589 | |||
| 2590 | // Ensure cache coherency, if we're setting pages as executable. | ||
| 2591 | if (is_x) { | ||
| 2592 | m_system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size); | ||
| 2593 | } | ||
| 2594 | |||
| 2595 | R_SUCCEED(); | ||
| 2596 | } | ||
| 2597 | |||
| 2598 | KMemoryInfo KPageTable::QueryInfoImpl(KProcessAddress addr) { | ||
| 2599 | KScopedLightLock lk(m_general_lock); | ||
| 2600 | |||
| 2601 | return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo(); | ||
| 2602 | } | ||
| 2603 | |||
| 2604 | KMemoryInfo KPageTable::QueryInfo(KProcessAddress addr) { | ||
| 2605 | if (!Contains(addr, 1)) { | ||
| 2606 | return { | ||
| 2607 | .m_address = GetInteger(m_address_space_end), | ||
| 2608 | .m_size = 0 - GetInteger(m_address_space_end), | ||
| 2609 | .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible), | ||
| 2610 | .m_device_disable_merge_left_count = 0, | ||
| 2611 | .m_device_disable_merge_right_count = 0, | ||
| 2612 | .m_ipc_lock_count = 0, | ||
| 2613 | .m_device_use_count = 0, | ||
| 2614 | .m_ipc_disable_merge_count = 0, | ||
| 2615 | .m_permission = KMemoryPermission::None, | ||
| 2616 | .m_attribute = KMemoryAttribute::None, | ||
| 2617 | .m_original_permission = KMemoryPermission::None, | ||
| 2618 | .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None, | ||
| 2619 | }; | ||
| 2620 | } | ||
| 2621 | |||
| 2622 | return QueryInfoImpl(addr); | ||
| 2623 | } | ||
| 2624 | |||
| 2625 | Result KPageTable::SetMemoryPermission(KProcessAddress addr, size_t size, | ||
| 2626 | Svc::MemoryPermission svc_perm) { | ||
| 2627 | const size_t num_pages = size / PageSize; | ||
| 2628 | |||
| 2629 | // Lock the table. | ||
| 2630 | KScopedLightLock lk(m_general_lock); | ||
| 2631 | |||
| 2632 | // Verify we can change the memory permission. | ||
| 2633 | KMemoryState old_state; | ||
| 2634 | KMemoryPermission old_perm; | ||
| 2635 | size_t num_allocator_blocks; | ||
| 2636 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, | ||
| 2637 | std::addressof(num_allocator_blocks), addr, size, | ||
| 2638 | KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, | ||
| 2639 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2640 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 2641 | |||
| 2642 | // Determine new perm. | ||
| 2643 | const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); | ||
| 2644 | R_SUCCEED_IF(old_perm == new_perm); | ||
| 2645 | |||
| 2646 | // Create an update allocator. | ||
| 2647 | Result allocator_result{ResultSuccess}; | ||
| 2648 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2649 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2650 | R_TRY(allocator_result); | ||
| 2651 | |||
| 2652 | // Perform mapping operation. | ||
| 2653 | R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); | ||
| 2654 | |||
| 2655 | // Update the blocks. | ||
| 2656 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | ||
| 2657 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 2658 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2659 | |||
| 2660 | R_SUCCEED(); | ||
| 2661 | } | ||
| 2662 | |||
| 2663 | Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) { | ||
| 2664 | const size_t num_pages = size / PageSize; | ||
| 2665 | ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) == | ||
| 2666 | KMemoryAttribute::SetMask); | ||
| 2667 | |||
| 2668 | // Lock the table. | ||
| 2669 | KScopedLightLock lk(m_general_lock); | ||
| 2670 | |||
| 2671 | // Verify we can change the memory attribute. | ||
| 2672 | KMemoryState old_state; | ||
| 2673 | KMemoryPermission old_perm; | ||
| 2674 | KMemoryAttribute old_attr; | ||
| 2675 | size_t num_allocator_blocks; | ||
| 2676 | constexpr auto AttributeTestMask = | ||
| 2677 | ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared); | ||
| 2678 | const KMemoryState state_test_mask = | ||
| 2679 | static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached)) | ||
| 2680 | ? static_cast<u32>(KMemoryState::FlagCanChangeAttribute) | ||
| 2681 | : 0) | | ||
| 2682 | ((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked)) | ||
| 2683 | ? static_cast<u32>(KMemoryState::FlagCanPermissionLock) | ||
| 2684 | : 0)); | ||
| 2685 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 2686 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 2687 | addr, size, state_test_mask, state_test_mask, | ||
| 2688 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2689 | AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); | ||
| 2690 | |||
| 2691 | // Create an update allocator. | ||
| 2692 | Result allocator_result{ResultSuccess}; | ||
| 2693 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2694 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2695 | R_TRY(allocator_result); | ||
| 2696 | |||
| 2697 | // If we need to, perform a change attribute operation. | ||
| 2698 | if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) { | ||
| 2699 | // Perform operation. | ||
| 2700 | R_TRY(this->Operate(addr, num_pages, old_perm, | ||
| 2701 | OperationType::ChangePermissionsAndRefreshAndFlush, 0)); | ||
| 2702 | } | ||
| 2703 | |||
| 2704 | // Update the blocks. | ||
| 2705 | m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, | ||
| 2706 | static_cast<KMemoryAttribute>(mask), | ||
| 2707 | static_cast<KMemoryAttribute>(attr)); | ||
| 2708 | |||
| 2709 | R_SUCCEED(); | ||
| 2710 | } | ||
| 2711 | |||
| 2712 | Result KPageTable::SetMaxHeapSize(size_t size) { | ||
| 2713 | // Lock the table. | ||
| 2714 | KScopedLightLock lk(m_general_lock); | ||
| 2715 | |||
| 2716 | // Only process page tables are allowed to set heap size. | ||
| 2717 | ASSERT(!this->IsKernel()); | ||
| 2718 | |||
| 2719 | m_max_heap_size = size; | ||
| 2720 | |||
| 2721 | R_SUCCEED(); | ||
| 2722 | } | ||
| 2723 | |||
| 2724 | Result KPageTable::SetHeapSize(u64* out, size_t size) { | ||
| 2725 | // Lock the physical memory mutex. | ||
| 2726 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); | ||
| 2727 | |||
| 2728 | // Try to perform a reduction in heap, instead of an extension. | ||
| 2729 | KProcessAddress cur_address{}; | ||
| 2730 | size_t allocation_size{}; | ||
| 2731 | { | ||
| 2732 | // Lock the table. | ||
| 2733 | KScopedLightLock lk(m_general_lock); | ||
| 2734 | |||
| 2735 | // Validate that setting heap size is possible at all. | ||
| 2736 | R_UNLESS(!m_is_kernel, ResultOutOfMemory); | ||
| 2737 | R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start), | ||
| 2738 | ResultOutOfMemory); | ||
| 2739 | R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory); | ||
| 2740 | |||
| 2741 | if (size < GetHeapSize()) { | ||
| 2742 | // The size being requested is less than the current size, so we need to free the end of | ||
| 2743 | // the heap. | ||
| 2744 | |||
| 2745 | // Validate memory state. | ||
| 2746 | size_t num_allocator_blocks; | ||
| 2747 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), | ||
| 2748 | m_heap_region_start + size, GetHeapSize() - size, | ||
| 2749 | KMemoryState::All, KMemoryState::Normal, | ||
| 2750 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 2751 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 2752 | |||
| 2753 | // Create an update allocator. | ||
| 2754 | Result allocator_result{ResultSuccess}; | ||
| 2755 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2756 | m_memory_block_slab_manager, | ||
| 2757 | num_allocator_blocks); | ||
| 2758 | R_TRY(allocator_result); | ||
| 2759 | |||
| 2760 | // Unmap the end of the heap. | ||
| 2761 | const auto num_pages = (GetHeapSize() - size) / PageSize; | ||
| 2762 | R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None, | ||
| 2763 | OperationType::Unmap)); | ||
| 2764 | |||
| 2765 | // Release the memory from the resource limit. | ||
| 2766 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, num_pages * PageSize); | ||
| 2767 | |||
| 2768 | // Apply the memory block update. | ||
| 2769 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, | ||
| 2770 | num_pages, KMemoryState::Free, KMemoryPermission::None, | ||
| 2771 | KMemoryAttribute::None, | ||
| 2772 | KMemoryBlockDisableMergeAttribute::None, | ||
| 2773 | size == 0 ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 2774 | : KMemoryBlockDisableMergeAttribute::None); | ||
| 2775 | |||
| 2776 | // Update the current heap end. | ||
| 2777 | m_current_heap_end = m_heap_region_start + size; | ||
| 2778 | |||
| 2779 | // Set the output. | ||
| 2780 | *out = GetInteger(m_heap_region_start); | ||
| 2781 | R_SUCCEED(); | ||
| 2782 | } else if (size == GetHeapSize()) { | ||
| 2783 | // The size requested is exactly the current size. | ||
| 2784 | *out = GetInteger(m_heap_region_start); | ||
| 2785 | R_SUCCEED(); | ||
| 2786 | } else { | ||
| 2787 | // We have to allocate memory. Determine how much to allocate and where while the table | ||
| 2788 | // is locked. | ||
| 2789 | cur_address = m_current_heap_end; | ||
| 2790 | allocation_size = size - GetHeapSize(); | ||
| 2791 | } | ||
| 2792 | } | ||
| 2793 | |||
| 2794 | // Reserve memory for the heap extension. | ||
| 2795 | KScopedResourceReservation memory_reservation( | ||
| 2796 | m_resource_limit, LimitableResource::PhysicalMemoryMax, allocation_size); | ||
| 2797 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 2798 | |||
| 2799 | // Allocate pages for the heap extension. | ||
| 2800 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 2801 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | ||
| 2802 | &pg, allocation_size / PageSize, | ||
| 2803 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); | ||
| 2804 | |||
| 2805 | // Clear all the newly allocated pages. | ||
| 2806 | for (const auto& it : pg) { | ||
| 2807 | std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, | ||
| 2808 | it.GetSize()); | ||
| 2809 | } | ||
| 2810 | |||
| 2811 | // Map the pages. | ||
| 2812 | { | ||
| 2813 | // Lock the table. | ||
| 2814 | KScopedLightLock lk(m_general_lock); | ||
| 2815 | |||
| 2816 | // Ensure that the heap hasn't changed since we began executing. | ||
| 2817 | ASSERT(cur_address == m_current_heap_end); | ||
| 2818 | |||
| 2819 | // Check the memory state. | ||
| 2820 | size_t num_allocator_blocks{}; | ||
| 2821 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, | ||
| 2822 | allocation_size, KMemoryState::All, KMemoryState::Free, | ||
| 2823 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2824 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2825 | |||
| 2826 | // Create an update allocator. | ||
| 2827 | Result allocator_result{ResultSuccess}; | ||
| 2828 | KMemoryBlockManagerUpdateAllocator allocator( | ||
| 2829 | std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2830 | R_TRY(allocator_result); | ||
| 2831 | |||
| 2832 | // Map the pages. | ||
| 2833 | const auto num_pages = allocation_size / PageSize; | ||
| 2834 | R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup)); | ||
| 2835 | |||
| 2836 | // Clear all the newly allocated pages. | ||
| 2837 | for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) { | ||
| 2838 | std::memset(m_memory->GetPointer(m_current_heap_end + (cur_page * PageSize)), 0, | ||
| 2839 | PageSize); | ||
| 2840 | } | ||
| 2841 | |||
| 2842 | // We succeeded, so commit our memory reservation. | ||
| 2843 | memory_reservation.Commit(); | ||
| 2844 | |||
| 2845 | // Apply the memory block update. | ||
| 2846 | m_memory_block_manager.Update( | ||
| 2847 | std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal, | ||
| 2848 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 2849 | m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 2850 | : KMemoryBlockDisableMergeAttribute::None, | ||
| 2851 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2852 | |||
| 2853 | // Update the current heap end. | ||
| 2854 | m_current_heap_end = m_heap_region_start + size; | ||
| 2855 | |||
| 2856 | // Set the output. | ||
| 2857 | *out = GetInteger(m_heap_region_start); | ||
| 2858 | R_SUCCEED(); | ||
| 2859 | } | ||
| 2860 | } | ||
| 2861 | |||
| 2862 | Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, | ||
| 2863 | size_t size, KMemoryPermission perm, | ||
| 2864 | bool is_aligned, bool check_heap) { | ||
| 2865 | // Lightly validate the range before doing anything else. | ||
| 2866 | const size_t num_pages = size / PageSize; | ||
| 2867 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2868 | |||
| 2869 | // Lock the table. | ||
| 2870 | KScopedLightLock lk(m_general_lock); | ||
| 2871 | |||
| 2872 | // Check the memory state. | ||
| 2873 | const auto test_state = | ||
| 2874 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) | | ||
| 2875 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 2876 | size_t num_allocator_blocks; | ||
| 2877 | KMemoryState old_state; | ||
| 2878 | R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, | ||
| 2879 | std::addressof(num_allocator_blocks), address, size, test_state, | ||
| 2880 | test_state, perm, perm, | ||
| 2881 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, | ||
| 2882 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); | ||
| 2883 | |||
| 2884 | // Create an update allocator. | ||
| 2885 | Result allocator_result; | ||
| 2886 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2887 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2888 | R_TRY(allocator_result); | ||
| 2889 | |||
| 2890 | // Update the memory blocks. | ||
| 2891 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | ||
| 2892 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); | ||
| 2893 | |||
| 2894 | // Set whether the locked memory was io. | ||
| 2895 | *out_is_io = | ||
| 2896 | static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io; | ||
| 2897 | |||
| 2898 | R_SUCCEED(); | ||
| 2899 | } | ||
| 2900 | |||
| 2901 | Result KPageTable::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, | ||
| 2902 | bool check_heap) { | ||
| 2903 | // Lightly validate the range before doing anything else. | ||
| 2904 | const size_t num_pages = size / PageSize; | ||
| 2905 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2906 | |||
| 2907 | // Lock the table. | ||
| 2908 | KScopedLightLock lk(m_general_lock); | ||
| 2909 | |||
| 2910 | // Check the memory state. | ||
| 2911 | const auto test_state = KMemoryState::FlagCanDeviceMap | | ||
| 2912 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 2913 | size_t num_allocator_blocks; | ||
| 2914 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 2915 | std::addressof(num_allocator_blocks), address, size, test_state, test_state, | ||
| 2916 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2917 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 2918 | |||
| 2919 | // Create an update allocator. | ||
| 2920 | Result allocator_result; | ||
| 2921 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2922 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2923 | R_TRY(allocator_result); | ||
| 2924 | |||
| 2925 | // Update the memory blocks. | ||
| 2926 | const KMemoryBlockManager::MemoryBlockLockFunction lock_func = | ||
| 2927 | m_enable_device_address_space_merge | ||
| 2928 | ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare | ||
| 2929 | : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; | ||
| 2930 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, | ||
| 2931 | KMemoryPermission::None); | ||
| 2932 | |||
| 2933 | R_SUCCEED(); | ||
| 2934 | } | ||
| 2935 | |||
| 2936 | Result KPageTable::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) { | ||
| 2937 | // Lightly validate the range before doing anything else. | ||
| 2938 | const size_t num_pages = size / PageSize; | ||
| 2939 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2940 | |||
| 2941 | // Lock the table. | ||
| 2942 | KScopedLightLock lk(m_general_lock); | ||
| 2943 | |||
| 2944 | // Check the memory state. | ||
| 2945 | size_t num_allocator_blocks; | ||
| 2946 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 2947 | std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap, | ||
| 2948 | KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, | ||
| 2949 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 2950 | |||
| 2951 | // Create an update allocator. | ||
| 2952 | Result allocator_result{ResultSuccess}; | ||
| 2953 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2954 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2955 | R_TRY(allocator_result); | ||
| 2956 | |||
| 2957 | // Update the memory blocks. | ||
| 2958 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | ||
| 2959 | &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); | ||
| 2960 | |||
| 2961 | R_SUCCEED(); | ||
| 2962 | } | ||
| 2963 | |||
| 2964 | Result KPageTable::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, | ||
| 2965 | size_t size) { | ||
| 2966 | R_RETURN(this->LockMemoryAndOpen( | ||
| 2967 | nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 2968 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All, | ||
| 2969 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, | ||
| 2970 | KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite, | ||
| 2971 | KMemoryAttribute::Locked)); | ||
| 2972 | } | ||
| 2973 | |||
| 2974 | Result KPageTable::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) { | ||
| 2975 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 2976 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None, | ||
| 2977 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2978 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 2979 | KMemoryAttribute::Locked, nullptr)); | ||
| 2980 | } | ||
| 2981 | |||
| 2982 | Result KPageTable::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, | ||
| 2983 | KMemoryPermission perm) { | ||
| 2984 | R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer, | ||
| 2985 | KMemoryState::FlagCanTransfer, KMemoryPermission::All, | ||
| 2986 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | ||
| 2987 | KMemoryAttribute::None, perm, KMemoryAttribute::Locked)); | ||
| 2988 | } | ||
| 2989 | |||
| 2990 | Result KPageTable::UnlockForTransferMemory(KProcessAddress address, size_t size, | ||
| 2991 | const KPageGroup& pg) { | ||
| 2992 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer, | ||
| 2993 | KMemoryState::FlagCanTransfer, KMemoryPermission::None, | ||
| 2994 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2995 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 2996 | KMemoryAttribute::Locked, std::addressof(pg))); | ||
| 2997 | } | ||
| 2998 | |||
| 2999 | Result KPageTable::LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size) { | ||
| 3000 | R_RETURN(this->LockMemoryAndOpen( | ||
| 3001 | out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, | ||
| 3002 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | ||
| 3003 | KMemoryAttribute::None, KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite, | ||
| 3004 | KMemoryAttribute::Locked)); | ||
| 3005 | } | ||
| 3006 | |||
| 3007 | Result KPageTable::UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg) { | ||
| 3008 | R_RETURN(this->UnlockMemory( | ||
| 3009 | addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, | ||
| 3010 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, | ||
| 3011 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg)); | ||
| 3012 | } | ||
| 3013 | |||
| 3014 | bool KPageTable::IsRegionContiguous(KProcessAddress addr, u64 size) const { | ||
| 3015 | auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr)); | ||
| 3016 | for (u64 offset{}; offset < size; offset += PageSize) { | ||
| 3017 | if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr) + offset)) { | ||
| 3018 | return false; | ||
| 3019 | } | ||
| 3020 | start_ptr += PageSize; | ||
| 3021 | } | ||
| 3022 | return true; | ||
| 3023 | } | ||
| 3024 | |||
| 3025 | void KPageTable::AddRegionToPages(KProcessAddress start, size_t num_pages, | ||
| 3026 | KPageGroup& page_linked_list) { | ||
| 3027 | KProcessAddress addr{start}; | ||
| 3028 | while (addr < start + (num_pages * PageSize)) { | ||
| 3029 | const KPhysicalAddress paddr{GetPhysicalAddr(addr)}; | ||
| 3030 | ASSERT(paddr != 0); | ||
| 3031 | page_linked_list.AddBlock(paddr, 1); | ||
| 3032 | addr += PageSize; | ||
| 3033 | } | ||
| 3034 | } | ||
| 3035 | |||
| 3036 | KProcessAddress KPageTable::AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages, | ||
| 3037 | u64 needed_num_pages, size_t align) { | ||
| 3038 | if (m_enable_aslr) { | ||
| 3039 | UNIMPLEMENTED(); | ||
| 3040 | } | ||
| 3041 | return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, | ||
| 3042 | IsKernel() ? 1 : 4); | ||
| 3043 | } | ||
| 3044 | |||
| 3045 | Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group, | ||
| 3046 | OperationType operation) { | ||
| 3047 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3048 | |||
| 3049 | ASSERT(Common::IsAligned(GetInteger(addr), PageSize)); | ||
| 3050 | ASSERT(num_pages > 0); | ||
| 3051 | ASSERT(num_pages == page_group.GetNumPages()); | ||
| 3052 | |||
| 3053 | switch (operation) { | ||
| 3054 | case OperationType::MapGroup: | ||
| 3055 | case OperationType::MapFirstGroup: { | ||
| 3056 | // We want to maintain a new reference to every page in the group. | ||
| 3057 | KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); | ||
| 3058 | |||
| 3059 | for (const auto& node : page_group) { | ||
| 3060 | const size_t size{node.GetNumPages() * PageSize}; | ||
| 3061 | |||
| 3062 | // Map the pages. | ||
| 3063 | m_memory->MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); | ||
| 3064 | |||
| 3065 | addr += size; | ||
| 3066 | } | ||
| 3067 | |||
| 3068 | // We succeeded! We want to persist the reference to the pages. | ||
| 3069 | spg.CancelClose(); | ||
| 3070 | |||
| 3071 | break; | ||
| 3072 | } | ||
| 3073 | default: | ||
| 3074 | ASSERT(false); | ||
| 3075 | break; | ||
| 3076 | } | ||
| 3077 | |||
| 3078 | R_SUCCEED(); | ||
| 3079 | } | ||
| 3080 | |||
| 3081 | Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm, | ||
| 3082 | OperationType operation, KPhysicalAddress map_addr) { | ||
| 3083 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3084 | |||
| 3085 | ASSERT(num_pages > 0); | ||
| 3086 | ASSERT(Common::IsAligned(GetInteger(addr), PageSize)); | ||
| 3087 | ASSERT(ContainsPages(addr, num_pages)); | ||
| 3088 | |||
| 3089 | switch (operation) { | ||
| 3090 | case OperationType::Unmap: { | ||
| 3091 | // Ensure that any pages we track close on exit. | ||
| 3092 | KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()}; | ||
| 3093 | SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); | ||
| 3094 | |||
| 3095 | this->AddRegionToPages(addr, num_pages, pages_to_close); | ||
| 3096 | m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); | ||
| 3097 | break; | ||
| 3098 | } | ||
| 3099 | case OperationType::Map: { | ||
| 3100 | ASSERT(map_addr); | ||
| 3101 | ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize)); | ||
| 3102 | m_memory->MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); | ||
| 3103 | |||
| 3104 | // Open references to pages, if we should. | ||
| 3105 | if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { | ||
| 3106 | m_kernel.MemoryManager().Open(map_addr, num_pages); | ||
| 3107 | } | ||
| 3108 | break; | ||
| 3109 | } | ||
| 3110 | case OperationType::Separate: { | ||
| 3111 | // HACK: Unimplemented. | ||
| 3112 | break; | ||
| 3113 | } | ||
| 3114 | case OperationType::ChangePermissions: | ||
| 3115 | case OperationType::ChangePermissionsAndRefresh: | ||
| 3116 | case OperationType::ChangePermissionsAndRefreshAndFlush: | ||
| 3117 | break; | ||
| 3118 | default: | ||
| 3119 | ASSERT(false); | ||
| 3120 | break; | ||
| 3121 | } | ||
| 3122 | R_SUCCEED(); | ||
| 3123 | } | ||
| 3124 | |||
| 3125 | void KPageTable::FinalizeUpdate(PageLinkedList* page_list) { | ||
| 3126 | while (page_list->Peek()) { | ||
| 3127 | [[maybe_unused]] auto page = page_list->Pop(); | ||
| 3128 | |||
| 3129 | // TODO(bunnei): Free pages once they are allocated in guest memory | ||
| 3130 | // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page)); | ||
| 3131 | // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0); | ||
| 3132 | // this->GetPageTableManager().Free(page); | ||
| 3133 | } | ||
| 3134 | } | ||
| 3135 | |||
| 3136 | KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const { | ||
| 3137 | switch (state) { | ||
| 3138 | case Svc::MemoryState::Free: | ||
| 3139 | case Svc::MemoryState::Kernel: | ||
| 3140 | return m_address_space_start; | ||
| 3141 | case Svc::MemoryState::Normal: | ||
| 3142 | return m_heap_region_start; | ||
| 3143 | case Svc::MemoryState::Ipc: | ||
| 3144 | case Svc::MemoryState::NonSecureIpc: | ||
| 3145 | case Svc::MemoryState::NonDeviceIpc: | ||
| 3146 | return m_alias_region_start; | ||
| 3147 | case Svc::MemoryState::Stack: | ||
| 3148 | return m_stack_region_start; | ||
| 3149 | case Svc::MemoryState::Static: | ||
| 3150 | case Svc::MemoryState::ThreadLocal: | ||
| 3151 | return m_kernel_map_region_start; | ||
| 3152 | case Svc::MemoryState::Io: | ||
| 3153 | case Svc::MemoryState::Shared: | ||
| 3154 | case Svc::MemoryState::AliasCode: | ||
| 3155 | case Svc::MemoryState::AliasCodeData: | ||
| 3156 | case Svc::MemoryState::Transfered: | ||
| 3157 | case Svc::MemoryState::SharedTransfered: | ||
| 3158 | case Svc::MemoryState::SharedCode: | ||
| 3159 | case Svc::MemoryState::GeneratedCode: | ||
| 3160 | case Svc::MemoryState::CodeOut: | ||
| 3161 | case Svc::MemoryState::Coverage: | ||
| 3162 | case Svc::MemoryState::Insecure: | ||
| 3163 | return m_alias_code_region_start; | ||
| 3164 | case Svc::MemoryState::Code: | ||
| 3165 | case Svc::MemoryState::CodeData: | ||
| 3166 | return m_code_region_start; | ||
| 3167 | default: | ||
| 3168 | UNREACHABLE(); | ||
| 3169 | } | ||
| 3170 | } | ||
| 3171 | |||
| 3172 | size_t KPageTable::GetRegionSize(Svc::MemoryState state) const { | ||
| 3173 | switch (state) { | ||
| 3174 | case Svc::MemoryState::Free: | ||
| 3175 | case Svc::MemoryState::Kernel: | ||
| 3176 | return m_address_space_end - m_address_space_start; | ||
| 3177 | case Svc::MemoryState::Normal: | ||
| 3178 | return m_heap_region_end - m_heap_region_start; | ||
| 3179 | case Svc::MemoryState::Ipc: | ||
| 3180 | case Svc::MemoryState::NonSecureIpc: | ||
| 3181 | case Svc::MemoryState::NonDeviceIpc: | ||
| 3182 | return m_alias_region_end - m_alias_region_start; | ||
| 3183 | case Svc::MemoryState::Stack: | ||
| 3184 | return m_stack_region_end - m_stack_region_start; | ||
| 3185 | case Svc::MemoryState::Static: | ||
| 3186 | case Svc::MemoryState::ThreadLocal: | ||
| 3187 | return m_kernel_map_region_end - m_kernel_map_region_start; | ||
| 3188 | case Svc::MemoryState::Io: | ||
| 3189 | case Svc::MemoryState::Shared: | ||
| 3190 | case Svc::MemoryState::AliasCode: | ||
| 3191 | case Svc::MemoryState::AliasCodeData: | ||
| 3192 | case Svc::MemoryState::Transfered: | ||
| 3193 | case Svc::MemoryState::SharedTransfered: | ||
| 3194 | case Svc::MemoryState::SharedCode: | ||
| 3195 | case Svc::MemoryState::GeneratedCode: | ||
| 3196 | case Svc::MemoryState::CodeOut: | ||
| 3197 | case Svc::MemoryState::Coverage: | ||
| 3198 | case Svc::MemoryState::Insecure: | ||
| 3199 | return m_alias_code_region_end - m_alias_code_region_start; | ||
| 3200 | case Svc::MemoryState::Code: | ||
| 3201 | case Svc::MemoryState::CodeData: | ||
| 3202 | return m_code_region_end - m_code_region_start; | ||
| 3203 | default: | ||
| 3204 | UNREACHABLE(); | ||
| 3205 | } | ||
| 3206 | } | ||
| 3207 | |||
| 3208 | bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const { | ||
| 3209 | const KProcessAddress end = addr + size; | ||
| 3210 | const KProcessAddress last = end - 1; | ||
| 3211 | |||
| 3212 | const KProcessAddress region_start = this->GetRegionAddress(state); | ||
| 3213 | const size_t region_size = this->GetRegionSize(state); | ||
| 3214 | |||
| 3215 | const bool is_in_region = | ||
| 3216 | region_start <= addr && addr < end && last <= region_start + region_size - 1; | ||
| 3217 | const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || | ||
| 3218 | m_heap_region_start == m_heap_region_end); | ||
| 3219 | const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || | ||
| 3220 | m_alias_region_start == m_alias_region_end); | ||
| 3221 | switch (state) { | ||
| 3222 | case Svc::MemoryState::Free: | ||
| 3223 | case Svc::MemoryState::Kernel: | ||
| 3224 | return is_in_region; | ||
| 3225 | case Svc::MemoryState::Io: | ||
| 3226 | case Svc::MemoryState::Static: | ||
| 3227 | case Svc::MemoryState::Code: | ||
| 3228 | case Svc::MemoryState::CodeData: | ||
| 3229 | case Svc::MemoryState::Shared: | ||
| 3230 | case Svc::MemoryState::AliasCode: | ||
| 3231 | case Svc::MemoryState::AliasCodeData: | ||
| 3232 | case Svc::MemoryState::Stack: | ||
| 3233 | case Svc::MemoryState::ThreadLocal: | ||
| 3234 | case Svc::MemoryState::Transfered: | ||
| 3235 | case Svc::MemoryState::SharedTransfered: | ||
| 3236 | case Svc::MemoryState::SharedCode: | ||
| 3237 | case Svc::MemoryState::GeneratedCode: | ||
| 3238 | case Svc::MemoryState::CodeOut: | ||
| 3239 | case Svc::MemoryState::Coverage: | ||
| 3240 | case Svc::MemoryState::Insecure: | ||
| 3241 | return is_in_region && !is_in_heap && !is_in_alias; | ||
| 3242 | case Svc::MemoryState::Normal: | ||
| 3243 | ASSERT(is_in_heap); | ||
| 3244 | return is_in_region && !is_in_alias; | ||
| 3245 | case Svc::MemoryState::Ipc: | ||
| 3246 | case Svc::MemoryState::NonSecureIpc: | ||
| 3247 | case Svc::MemoryState::NonDeviceIpc: | ||
| 3248 | ASSERT(is_in_alias); | ||
| 3249 | return is_in_region && !is_in_heap; | ||
| 3250 | default: | ||
| 3251 | return false; | ||
| 3252 | } | ||
| 3253 | } | ||
| 3254 | |||
| 3255 | Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, | ||
| 3256 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3257 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3258 | KMemoryAttribute attr) const { | ||
| 3259 | // Validate the states match expectation. | ||
| 3260 | R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory); | ||
| 3261 | R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory); | ||
| 3262 | R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory); | ||
| 3263 | |||
| 3264 | R_SUCCEED(); | ||
| 3265 | } | ||
| 3266 | |||
| 3267 | Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, | ||
| 3268 | size_t size, KMemoryState state_mask, | ||
| 3269 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3270 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3271 | KMemoryAttribute attr) const { | ||
| 3272 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3273 | |||
| 3274 | // Get information about the first block. | ||
| 3275 | const KProcessAddress last_addr = addr + size - 1; | ||
| 3276 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); | ||
| 3277 | KMemoryInfo info = it->GetMemoryInfo(); | ||
| 3278 | |||
| 3279 | // If the start address isn't aligned, we need a block. | ||
| 3280 | const size_t blocks_for_start_align = | ||
| 3281 | (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0; | ||
| 3282 | |||
| 3283 | while (true) { | ||
| 3284 | // Validate against the provided masks. | ||
| 3285 | R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); | ||
| 3286 | |||
| 3287 | // Break once we're done. | ||
| 3288 | if (last_addr <= info.GetLastAddress()) { | ||
| 3289 | break; | ||
| 3290 | } | ||
| 3291 | |||
| 3292 | // Advance our iterator. | ||
| 3293 | it++; | ||
| 3294 | ASSERT(it != m_memory_block_manager.cend()); | ||
| 3295 | info = it->GetMemoryInfo(); | ||
| 3296 | } | ||
| 3297 | |||
| 3298 | // If the end address isn't aligned, we need a block. | ||
| 3299 | const size_t blocks_for_end_align = | ||
| 3300 | (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0; | ||
| 3301 | |||
| 3302 | if (out_blocks_needed != nullptr) { | ||
| 3303 | *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; | ||
| 3304 | } | ||
| 3305 | |||
| 3306 | R_SUCCEED(); | ||
| 3307 | } | ||
| 3308 | |||
| 3309 | Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 3310 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 3311 | KMemoryBlockManager::const_iterator it, | ||
| 3312 | KProcessAddress last_addr, KMemoryState state_mask, | ||
| 3313 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3314 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3315 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { | ||
| 3316 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3317 | |||
| 3318 | // Get information about the first block. | ||
| 3319 | KMemoryInfo info = it->GetMemoryInfo(); | ||
| 3320 | |||
| 3321 | // Validate all blocks in the range have correct state. | ||
| 3322 | const KMemoryState first_state = info.m_state; | ||
| 3323 | const KMemoryPermission first_perm = info.m_permission; | ||
| 3324 | const KMemoryAttribute first_attr = info.m_attribute; | ||
| 3325 | while (true) { | ||
| 3326 | // Validate the current block. | ||
| 3327 | R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory); | ||
| 3328 | R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory); | ||
| 3329 | R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), | ||
| 3330 | ResultInvalidCurrentMemory); | ||
| 3331 | |||
| 3332 | // Validate against the provided masks. | ||
| 3333 | R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); | ||
| 3334 | |||
| 3335 | // Break once we're done. | ||
| 3336 | if (last_addr <= info.GetLastAddress()) { | ||
| 3337 | break; | ||
| 3338 | } | ||
| 3339 | |||
| 3340 | // Advance our iterator. | ||
| 3341 | it++; | ||
| 3342 | ASSERT(it != m_memory_block_manager.cend()); | ||
| 3343 | info = it->GetMemoryInfo(); | ||
| 3344 | } | ||
| 3345 | |||
| 3346 | // Write output state. | ||
| 3347 | if (out_state != nullptr) { | ||
| 3348 | *out_state = first_state; | ||
| 3349 | } | ||
| 3350 | if (out_perm != nullptr) { | ||
| 3351 | *out_perm = first_perm; | ||
| 3352 | } | ||
| 3353 | if (out_attr != nullptr) { | ||
| 3354 | *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr); | ||
| 3355 | } | ||
| 3356 | |||
| 3357 | // If the end address isn't aligned, we need a block. | ||
| 3358 | if (out_blocks_needed != nullptr) { | ||
| 3359 | const size_t blocks_for_end_align = | ||
| 3360 | (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) | ||
| 3361 | ? 1 | ||
| 3362 | : 0; | ||
| 3363 | *out_blocks_needed = blocks_for_end_align; | ||
| 3364 | } | ||
| 3365 | |||
| 3366 | R_SUCCEED(); | ||
| 3367 | } | ||
| 3368 | |||
| 3369 | Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 3370 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 3371 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 3372 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3373 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3374 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { | ||
| 3375 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3376 | |||
| 3377 | // Check memory state. | ||
| 3378 | const KProcessAddress last_addr = addr + size - 1; | ||
| 3379 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); | ||
| 3380 | R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr, | ||
| 3381 | state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr)); | ||
| 3382 | |||
| 3383 | // If the start address isn't aligned, we need a block. | ||
| 3384 | if (out_blocks_needed != nullptr && | ||
| 3385 | Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) { | ||
| 3386 | ++(*out_blocks_needed); | ||
| 3387 | } | ||
| 3388 | |||
| 3389 | R_SUCCEED(); | ||
| 3390 | } | ||
| 3391 | |||
| 3392 | Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress, | ||
| 3393 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 3394 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3395 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3396 | KMemoryAttribute attr, KMemoryPermission new_perm, | ||
| 3397 | KMemoryAttribute lock_attr) { | ||
| 3398 | // Validate basic preconditions. | ||
| 3399 | ASSERT((lock_attr & attr) == KMemoryAttribute::None); | ||
| 3400 | ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == | ||
| 3401 | KMemoryAttribute::None); | ||
| 3402 | |||
| 3403 | // Validate the lock request. | ||
| 3404 | const size_t num_pages = size / PageSize; | ||
| 3405 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | ||
| 3406 | |||
| 3407 | // Lock the table. | ||
| 3408 | KScopedLightLock lk(m_general_lock); | ||
| 3409 | |||
| 3410 | // Check that the output page group is empty, if it exists. | ||
| 3411 | if (out_pg) { | ||
| 3412 | ASSERT(out_pg->GetNumPages() == 0); | ||
| 3413 | } | ||
| 3414 | |||
| 3415 | // Check the state. | ||
| 3416 | KMemoryState old_state{}; | ||
| 3417 | KMemoryPermission old_perm{}; | ||
| 3418 | KMemoryAttribute old_attr{}; | ||
| 3419 | size_t num_allocator_blocks{}; | ||
| 3420 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 3421 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 3422 | addr, size, state_mask | KMemoryState::FlagReferenceCounted, | ||
| 3423 | state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 3424 | attr_mask, attr)); | ||
| 3425 | |||
| 3426 | // Get the physical address, if we're supposed to. | ||
| 3427 | if (out_KPhysicalAddress != nullptr) { | ||
| 3428 | ASSERT(this->GetPhysicalAddressLocked(out_KPhysicalAddress, addr)); | ||
| 3429 | } | ||
| 3430 | |||
| 3431 | // Make the page group, if we're supposed to. | ||
| 3432 | if (out_pg != nullptr) { | ||
| 3433 | R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); | ||
| 3434 | } | ||
| 3435 | |||
| 3436 | // Create an update allocator. | ||
| 3437 | Result allocator_result{ResultSuccess}; | ||
| 3438 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 3439 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 3440 | R_TRY(allocator_result); | ||
| 3441 | |||
| 3442 | // Decide on new perm and attr. | ||
| 3443 | new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; | ||
| 3444 | KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr); | ||
| 3445 | |||
| 3446 | // Update permission, if we need to. | ||
| 3447 | if (new_perm != old_perm) { | ||
| 3448 | R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); | ||
| 3449 | } | ||
| 3450 | |||
| 3451 | // Apply the memory block updates. | ||
| 3452 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | ||
| 3453 | new_attr, KMemoryBlockDisableMergeAttribute::Locked, | ||
| 3454 | KMemoryBlockDisableMergeAttribute::None); | ||
| 3455 | |||
| 3456 | // If we have an output page group, open. | ||
| 3457 | if (out_pg) { | ||
| 3458 | out_pg->Open(); | ||
| 3459 | } | ||
| 3460 | |||
| 3461 | R_SUCCEED(); | ||
| 3462 | } | ||
| 3463 | |||
| 3464 | Result KPageTable::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 3465 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3466 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3467 | KMemoryAttribute attr, KMemoryPermission new_perm, | ||
| 3468 | KMemoryAttribute lock_attr, const KPageGroup* pg) { | ||
| 3469 | // Validate basic preconditions. | ||
| 3470 | ASSERT((attr_mask & lock_attr) == lock_attr); | ||
| 3471 | ASSERT((attr & lock_attr) == lock_attr); | ||
| 3472 | |||
| 3473 | // Validate the unlock request. | ||
| 3474 | const size_t num_pages = size / PageSize; | ||
| 3475 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | ||
| 3476 | |||
| 3477 | // Lock the table. | ||
| 3478 | KScopedLightLock lk(m_general_lock); | ||
| 3479 | |||
| 3480 | // Check the state. | ||
| 3481 | KMemoryState old_state{}; | ||
| 3482 | KMemoryPermission old_perm{}; | ||
| 3483 | KMemoryAttribute old_attr{}; | ||
| 3484 | size_t num_allocator_blocks{}; | ||
| 3485 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 3486 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 3487 | addr, size, state_mask | KMemoryState::FlagReferenceCounted, | ||
| 3488 | state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 3489 | attr_mask, attr)); | ||
| 3490 | |||
| 3491 | // Check the page group. | ||
| 3492 | if (pg != nullptr) { | ||
| 3493 | R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion); | ||
| 3494 | } | ||
| 3495 | |||
| 3496 | // Decide on new perm and attr. | ||
| 3497 | new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; | ||
| 3498 | KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr); | ||
| 3499 | |||
| 3500 | // Create an update allocator. | ||
| 3501 | Result allocator_result{ResultSuccess}; | ||
| 3502 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 3503 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 3504 | R_TRY(allocator_result); | ||
| 3505 | |||
| 3506 | // Update permission, if we need to. | ||
| 3507 | if (new_perm != old_perm) { | ||
| 3508 | R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); | ||
| 3509 | } | ||
| 3510 | |||
| 3511 | // Apply the memory block updates. | ||
| 3512 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | ||
| 3513 | new_attr, KMemoryBlockDisableMergeAttribute::None, | ||
| 3514 | KMemoryBlockDisableMergeAttribute::Locked); | ||
| 3515 | |||
| 3516 | R_SUCCEED(); | ||
| 3517 | } | ||
| 3518 | |||
| 3519 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 66f16faaf..5541bc13f 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -3,548 +3,14 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <memory> | 6 | #include "core/hle/kernel/k_page_table_base.h" |
| 7 | |||
| 8 | #include "common/common_funcs.h" | ||
| 9 | #include "common/page_table.h" | ||
| 10 | #include "core/file_sys/program_metadata.h" | ||
| 11 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 12 | #include "core/hle/kernel/k_light_lock.h" | ||
| 13 | #include "core/hle/kernel/k_memory_block.h" | ||
| 14 | #include "core/hle/kernel/k_memory_block_manager.h" | ||
| 15 | #include "core/hle/kernel/k_memory_layout.h" | ||
| 16 | #include "core/hle/kernel/k_memory_manager.h" | ||
| 17 | #include "core/hle/kernel/k_typed_address.h" | ||
| 18 | #include "core/hle/result.h" | ||
| 19 | #include "core/memory.h" | ||
| 20 | |||
| 21 | namespace Core { | ||
| 22 | class System; | ||
| 23 | } | ||
| 24 | 7 | ||
| 25 | namespace Kernel { | 8 | namespace Kernel { |
| 26 | 9 | ||
| 27 | enum class DisableMergeAttribute : u8 { | 10 | class KPageTable final : public KPageTableBase { |
| 28 | None = (0U << 0), | ||
| 29 | DisableHead = (1U << 0), | ||
| 30 | DisableHeadAndBody = (1U << 1), | ||
| 31 | EnableHeadAndBody = (1U << 2), | ||
| 32 | DisableTail = (1U << 3), | ||
| 33 | EnableTail = (1U << 4), | ||
| 34 | EnableAndMergeHeadBodyTail = (1U << 5), | ||
| 35 | EnableHeadBodyTail = EnableHeadAndBody | EnableTail, | ||
| 36 | DisableHeadBodyTail = DisableHeadAndBody | DisableTail, | ||
| 37 | }; | ||
| 38 | |||
| 39 | struct KPageProperties { | ||
| 40 | KMemoryPermission perm; | ||
| 41 | bool io; | ||
| 42 | bool uncached; | ||
| 43 | DisableMergeAttribute disable_merge_attributes; | ||
| 44 | }; | ||
| 45 | static_assert(std::is_trivial_v<KPageProperties>); | ||
| 46 | static_assert(sizeof(KPageProperties) == sizeof(u32)); | ||
| 47 | |||
| 48 | class KBlockInfoManager; | ||
| 49 | class KMemoryBlockManager; | ||
| 50 | class KResourceLimit; | ||
| 51 | class KSystemResource; | ||
| 52 | |||
| 53 | class KPageTable final { | ||
| 54 | protected: | ||
| 55 | struct PageLinkedList; | ||
| 56 | |||
| 57 | public: | ||
| 58 | enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll }; | ||
| 59 | |||
| 60 | YUZU_NON_COPYABLE(KPageTable); | ||
| 61 | YUZU_NON_MOVEABLE(KPageTable); | ||
| 62 | |||
| 63 | explicit KPageTable(Core::System& system_); | ||
| 64 | ~KPageTable(); | ||
| 65 | |||
| 66 | Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr, | ||
| 67 | bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, | ||
| 68 | KProcessAddress code_addr, size_t code_size, | ||
| 69 | KSystemResource* system_resource, KResourceLimit* resource_limit, | ||
| 70 | Core::Memory::Memory& memory); | ||
| 71 | |||
| 72 | void Finalize(); | ||
| 73 | |||
| 74 | Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state, | ||
| 75 | KMemoryPermission perm); | ||
| 76 | Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 77 | Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, | ||
| 78 | ICacheInvalidationStrategy icache_invalidation_strategy); | ||
| 79 | Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table, | ||
| 80 | KProcessAddress src_addr); | ||
| 81 | Result MapPhysicalMemory(KProcessAddress addr, size_t size); | ||
| 82 | Result UnmapPhysicalMemory(KProcessAddress addr, size_t size); | ||
| 83 | Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size); | ||
| 84 | Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size); | ||
| 85 | Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, | ||
| 86 | Svc::MemoryPermission svc_perm); | ||
| 87 | KMemoryInfo QueryInfo(KProcessAddress addr); | ||
| 88 | Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm); | ||
| 89 | Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr); | ||
| 90 | Result SetMaxHeapSize(size_t size); | ||
| 91 | Result SetHeapSize(u64* out, size_t size); | ||
| 92 | Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size, | ||
| 93 | KMemoryPermission perm, bool is_aligned, bool check_heap); | ||
| 94 | Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap); | ||
| 95 | |||
| 96 | Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size); | ||
| 97 | |||
| 98 | Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size); | ||
| 99 | Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size); | ||
| 100 | |||
| 101 | Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr, | ||
| 102 | KPageTable& src_page_table, KMemoryPermission test_perm, | ||
| 103 | KMemoryState dst_state, bool send); | ||
| 104 | Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state); | ||
| 105 | Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state); | ||
| 106 | |||
| 107 | Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, | ||
| 108 | KMemoryPermission perm); | ||
| 109 | Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg); | ||
| 110 | Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size); | ||
| 111 | Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg); | ||
| 112 | Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages, | ||
| 113 | KMemoryState state_mask, KMemoryState state, | ||
| 114 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 115 | KMemoryAttribute attr_mask, KMemoryAttribute attr); | ||
| 116 | |||
| 117 | Common::PageTable& PageTableImpl() { | ||
| 118 | return *m_page_table_impl; | ||
| 119 | } | ||
| 120 | |||
| 121 | const Common::PageTable& PageTableImpl() const { | ||
| 122 | return *m_page_table_impl; | ||
| 123 | } | ||
| 124 | |||
| 125 | KBlockInfoManager* GetBlockInfoManager() { | ||
| 126 | return m_block_info_manager; | ||
| 127 | } | ||
| 128 | |||
| 129 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 130 | KPhysicalAddress phys_addr, KProcessAddress region_start, | ||
| 131 | size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { | ||
| 132 | R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, | ||
| 133 | region_num_pages, state, perm)); | ||
| 134 | } | ||
| 135 | |||
| 136 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 137 | KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { | ||
| 138 | R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, | ||
| 139 | this->GetRegionAddress(state), | ||
| 140 | this->GetRegionSize(state) / PageSize, state, perm)); | ||
| 141 | } | ||
| 142 | |||
| 143 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state, | ||
| 144 | KMemoryPermission perm) { | ||
| 145 | R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false, | ||
| 146 | this->GetRegionAddress(state), | ||
| 147 | this->GetRegionSize(state) / PageSize, state, perm)); | ||
| 148 | } | ||
| 149 | |||
| 150 | Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||
| 151 | KMemoryPermission perm); | ||
| 152 | Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state); | ||
| 153 | |||
| 154 | Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, | ||
| 155 | KProcessAddress region_start, size_t region_num_pages, KMemoryState state, | ||
| 156 | KMemoryPermission perm); | ||
| 157 | Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state, | ||
| 158 | KMemoryPermission perm); | ||
| 159 | Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state); | ||
| 160 | void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, | ||
| 161 | const KPageGroup& pg); | ||
| 162 | |||
| 163 | KProcessAddress GetRegionAddress(Svc::MemoryState state) const; | ||
| 164 | size_t GetRegionSize(Svc::MemoryState state) const; | ||
| 165 | bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const; | ||
| 166 | |||
| 167 | KProcessAddress GetRegionAddress(KMemoryState state) const { | ||
| 168 | return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 169 | } | ||
| 170 | size_t GetRegionSize(KMemoryState state) const { | ||
| 171 | return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 172 | } | ||
| 173 | bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { | ||
| 174 | return this->CanContain(addr, size, | ||
| 175 | static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 176 | } | ||
| 177 | |||
| 178 | protected: | ||
| 179 | struct PageLinkedList { | ||
| 180 | private: | ||
| 181 | struct Node { | ||
| 182 | Node* m_next; | ||
| 183 | std::array<u8, PageSize - sizeof(Node*)> m_buffer; | ||
| 184 | }; | ||
| 185 | |||
| 186 | public: | ||
| 187 | constexpr PageLinkedList() = default; | ||
| 188 | |||
| 189 | void Push(Node* n) { | ||
| 190 | ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize)); | ||
| 191 | n->m_next = m_root; | ||
| 192 | m_root = n; | ||
| 193 | } | ||
| 194 | |||
| 195 | void Push(Core::Memory::Memory& memory, KVirtualAddress addr) { | ||
| 196 | this->Push(memory.GetPointer<Node>(GetInteger(addr))); | ||
| 197 | } | ||
| 198 | |||
| 199 | Node* Peek() const { | ||
| 200 | return m_root; | ||
| 201 | } | ||
| 202 | |||
| 203 | Node* Pop() { | ||
| 204 | Node* const r = m_root; | ||
| 205 | |||
| 206 | m_root = r->m_next; | ||
| 207 | r->m_next = nullptr; | ||
| 208 | |||
| 209 | return r; | ||
| 210 | } | ||
| 211 | |||
| 212 | private: | ||
| 213 | Node* m_root{}; | ||
| 214 | }; | ||
| 215 | static_assert(std::is_trivially_destructible<PageLinkedList>::value); | ||
| 216 | |||
| 217 | private: | ||
| 218 | enum class OperationType : u32 { | ||
| 219 | Map = 0, | ||
| 220 | MapGroup = 1, | ||
| 221 | MapFirstGroup = 2, | ||
| 222 | Unmap = 3, | ||
| 223 | ChangePermissions = 4, | ||
| 224 | ChangePermissionsAndRefresh = 5, | ||
| 225 | ChangePermissionsAndRefreshAndFlush = 6, | ||
| 226 | Separate = 7, | ||
| 227 | }; | ||
| 228 | |||
| 229 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = | ||
| 230 | KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; | ||
| 231 | |||
| 232 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 233 | KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, | ||
| 234 | size_t region_num_pages, KMemoryState state, KMemoryPermission perm); | ||
| 235 | bool IsRegionContiguous(KProcessAddress addr, u64 size) const; | ||
| 236 | void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list); | ||
| 237 | KMemoryInfo QueryInfoImpl(KProcessAddress addr); | ||
| 238 | KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages, | ||
| 239 | u64 needed_num_pages, size_t align); | ||
| 240 | Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group, | ||
| 241 | OperationType operation); | ||
| 242 | Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm, | ||
| 243 | OperationType operation, KPhysicalAddress map_addr = 0); | ||
| 244 | void FinalizeUpdate(PageLinkedList* page_list); | ||
| 245 | |||
| 246 | KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, | ||
| 247 | size_t num_pages, size_t alignment, size_t offset, | ||
| 248 | size_t guard_pages); | ||
| 249 | |||
| 250 | Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size, | ||
| 251 | KMemoryState state_mask, KMemoryState state, | ||
| 252 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 253 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | ||
| 254 | Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 255 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 256 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 257 | KMemoryAttribute attr) const { | ||
| 258 | R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, | ||
| 259 | perm, attr_mask, attr)); | ||
| 260 | } | ||
| 261 | |||
| 262 | Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, | ||
| 263 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 264 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | ||
| 265 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 266 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 267 | KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, | ||
| 268 | KMemoryState state_mask, KMemoryState state, | ||
| 269 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 270 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 271 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; | ||
| 272 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 273 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 274 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 275 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 276 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 277 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; | ||
| 278 | Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size, | ||
| 279 | KMemoryState state_mask, KMemoryState state, | ||
| 280 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 281 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 282 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { | ||
| 283 | R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, | ||
| 284 | state_mask, state, perm_mask, perm, attr_mask, attr, | ||
| 285 | ignore_attr)); | ||
| 286 | } | ||
| 287 | Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 288 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 289 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 290 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { | ||
| 291 | R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, | ||
| 292 | attr_mask, attr, ignore_attr)); | ||
| 293 | } | ||
| 294 | |||
| 295 | Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress, | ||
| 296 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 297 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 298 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 299 | KMemoryAttribute attr, KMemoryPermission new_perm, | ||
| 300 | KMemoryAttribute lock_attr); | ||
| 301 | Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 302 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 303 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 304 | KMemoryPermission new_perm, KMemoryAttribute lock_attr, | ||
| 305 | const KPageGroup* pg); | ||
| 306 | |||
| 307 | Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages); | ||
| 308 | bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages); | ||
| 309 | |||
| 310 | bool IsLockedByCurrentThread() const { | ||
| 311 | return m_general_lock.IsLockedByCurrentThread(); | ||
| 312 | } | ||
| 313 | |||
| 314 | bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) { | ||
| 315 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 316 | |||
| 317 | return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); | ||
| 318 | } | ||
| 319 | |||
| 320 | bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const { | ||
| 321 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 322 | |||
| 323 | *out = GetPhysicalAddr(virt_addr); | ||
| 324 | |||
| 325 | return *out != 0; | ||
| 326 | } | ||
| 327 | |||
| 328 | Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, | ||
| 329 | KProcessAddress address, size_t size, KMemoryPermission test_perm, | ||
| 330 | KMemoryState dst_state); | ||
| 331 | Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr, | ||
| 332 | KMemoryPermission test_perm, KMemoryState dst_state, | ||
| 333 | KPageTable& src_page_table, bool send); | ||
| 334 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address, | ||
| 335 | size_t size, KMemoryPermission prot_perm); | ||
| 336 | |||
| 337 | Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 338 | size_t num_pages, KMemoryPermission perm); | ||
| 339 | Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 340 | const KPageGroup& pg, const KPageProperties properties, bool reuse_ll); | ||
| 341 | |||
| 342 | mutable KLightLock m_general_lock; | ||
| 343 | mutable KLightLock m_map_physical_memory_lock; | ||
| 344 | |||
| 345 | public: | ||
| 346 | constexpr KProcessAddress GetAddressSpaceStart() const { | ||
| 347 | return m_address_space_start; | ||
| 348 | } | ||
| 349 | constexpr KProcessAddress GetAddressSpaceEnd() const { | ||
| 350 | return m_address_space_end; | ||
| 351 | } | ||
| 352 | constexpr size_t GetAddressSpaceSize() const { | ||
| 353 | return m_address_space_end - m_address_space_start; | ||
| 354 | } | ||
| 355 | constexpr KProcessAddress GetHeapRegionStart() const { | ||
| 356 | return m_heap_region_start; | ||
| 357 | } | ||
| 358 | constexpr KProcessAddress GetHeapRegionEnd() const { | ||
| 359 | return m_heap_region_end; | ||
| 360 | } | ||
| 361 | constexpr size_t GetHeapRegionSize() const { | ||
| 362 | return m_heap_region_end - m_heap_region_start; | ||
| 363 | } | ||
| 364 | constexpr KProcessAddress GetAliasRegionStart() const { | ||
| 365 | return m_alias_region_start; | ||
| 366 | } | ||
| 367 | constexpr KProcessAddress GetAliasRegionEnd() const { | ||
| 368 | return m_alias_region_end; | ||
| 369 | } | ||
| 370 | constexpr size_t GetAliasRegionSize() const { | ||
| 371 | return m_alias_region_end - m_alias_region_start; | ||
| 372 | } | ||
| 373 | constexpr KProcessAddress GetStackRegionStart() const { | ||
| 374 | return m_stack_region_start; | ||
| 375 | } | ||
| 376 | constexpr KProcessAddress GetStackRegionEnd() const { | ||
| 377 | return m_stack_region_end; | ||
| 378 | } | ||
| 379 | constexpr size_t GetStackRegionSize() const { | ||
| 380 | return m_stack_region_end - m_stack_region_start; | ||
| 381 | } | ||
| 382 | constexpr KProcessAddress GetKernelMapRegionStart() const { | ||
| 383 | return m_kernel_map_region_start; | ||
| 384 | } | ||
| 385 | constexpr KProcessAddress GetKernelMapRegionEnd() const { | ||
| 386 | return m_kernel_map_region_end; | ||
| 387 | } | ||
| 388 | constexpr KProcessAddress GetCodeRegionStart() const { | ||
| 389 | return m_code_region_start; | ||
| 390 | } | ||
| 391 | constexpr KProcessAddress GetCodeRegionEnd() const { | ||
| 392 | return m_code_region_end; | ||
| 393 | } | ||
| 394 | constexpr KProcessAddress GetAliasCodeRegionStart() const { | ||
| 395 | return m_alias_code_region_start; | ||
| 396 | } | ||
| 397 | constexpr KProcessAddress GetAliasCodeRegionEnd() const { | ||
| 398 | return m_alias_code_region_end; | ||
| 399 | } | ||
| 400 | constexpr size_t GetAliasCodeRegionSize() const { | ||
| 401 | return m_alias_code_region_end - m_alias_code_region_start; | ||
| 402 | } | ||
| 403 | size_t GetNormalMemorySize() const { | ||
| 404 | KScopedLightLock lk(m_general_lock); | ||
| 405 | return GetHeapSize() + m_mapped_physical_memory_size; | ||
| 406 | } | ||
| 407 | constexpr size_t GetAddressSpaceWidth() const { | ||
| 408 | return m_address_space_width; | ||
| 409 | } | ||
| 410 | constexpr size_t GetHeapSize() const { | ||
| 411 | return m_current_heap_end - m_heap_region_start; | ||
| 412 | } | ||
| 413 | constexpr size_t GetNumGuardPages() const { | ||
| 414 | return IsKernel() ? 1 : 4; | ||
| 415 | } | ||
| 416 | KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const { | ||
| 417 | const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits]; | ||
| 418 | ASSERT(backing_addr); | ||
| 419 | return backing_addr + GetInteger(addr); | ||
| 420 | } | ||
| 421 | constexpr bool Contains(KProcessAddress addr) const { | ||
| 422 | return m_address_space_start <= addr && addr <= m_address_space_end - 1; | ||
| 423 | } | ||
| 424 | constexpr bool Contains(KProcessAddress addr, size_t size) const { | ||
| 425 | return m_address_space_start <= addr && addr < addr + size && | ||
| 426 | addr + size - 1 <= m_address_space_end - 1; | ||
| 427 | } | ||
| 428 | constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const { | ||
| 429 | return this->Contains(addr, size) && m_alias_region_start <= addr && | ||
| 430 | addr + size - 1 <= m_alias_region_end - 1; | ||
| 431 | } | ||
| 432 | constexpr bool IsInHeapRegion(KProcessAddress addr, size_t size) const { | ||
| 433 | return this->Contains(addr, size) && m_heap_region_start <= addr && | ||
| 434 | addr + size - 1 <= m_heap_region_end - 1; | ||
| 435 | } | ||
| 436 | |||
| 437 | public: | 11 | public: |
| 438 | static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout, | 12 | explicit KPageTable(KernelCore& kernel) : KPageTableBase(kernel) {} |
| 439 | KPhysicalAddress addr) { | 13 | ~KPageTable() = default; |
| 440 | return layout.GetLinearVirtualAddress(addr); | ||
| 441 | } | ||
| 442 | |||
| 443 | static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, | ||
| 444 | KVirtualAddress addr) { | ||
| 445 | return layout.GetLinearPhysicalAddress(addr); | ||
| 446 | } | ||
| 447 | |||
| 448 | static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout, | ||
| 449 | KPhysicalAddress addr) { | ||
| 450 | return GetLinearMappedVirtualAddress(layout, addr); | ||
| 451 | } | ||
| 452 | |||
| 453 | static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout, | ||
| 454 | KVirtualAddress addr) { | ||
| 455 | return GetLinearMappedPhysicalAddress(layout, addr); | ||
| 456 | } | ||
| 457 | |||
| 458 | static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout, | ||
| 459 | KPhysicalAddress addr) { | ||
| 460 | return GetLinearMappedVirtualAddress(layout, addr); | ||
| 461 | } | ||
| 462 | |||
| 463 | static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout, | ||
| 464 | KVirtualAddress addr) { | ||
| 465 | return GetLinearMappedPhysicalAddress(layout, addr); | ||
| 466 | } | ||
| 467 | |||
| 468 | private: | ||
| 469 | constexpr bool IsKernel() const { | ||
| 470 | return m_is_kernel; | ||
| 471 | } | ||
| 472 | constexpr bool IsAslrEnabled() const { | ||
| 473 | return m_enable_aslr; | ||
| 474 | } | ||
| 475 | |||
| 476 | constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const { | ||
| 477 | return (m_address_space_start <= addr) && | ||
| 478 | (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && | ||
| 479 | (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); | ||
| 480 | } | ||
| 481 | |||
| 482 | private: | ||
| 483 | class KScopedPageTableUpdater { | ||
| 484 | private: | ||
| 485 | KPageTable* m_pt{}; | ||
| 486 | PageLinkedList m_ll; | ||
| 487 | |||
| 488 | public: | ||
| 489 | explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {} | ||
| 490 | explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {} | ||
| 491 | ~KScopedPageTableUpdater() { | ||
| 492 | m_pt->FinalizeUpdate(this->GetPageList()); | ||
| 493 | } | ||
| 494 | |||
| 495 | PageLinkedList* GetPageList() { | ||
| 496 | return std::addressof(m_ll); | ||
| 497 | } | ||
| 498 | }; | ||
| 499 | |||
| 500 | private: | ||
| 501 | KProcessAddress m_address_space_start{}; | ||
| 502 | KProcessAddress m_address_space_end{}; | ||
| 503 | KProcessAddress m_heap_region_start{}; | ||
| 504 | KProcessAddress m_heap_region_end{}; | ||
| 505 | KProcessAddress m_current_heap_end{}; | ||
| 506 | KProcessAddress m_alias_region_start{}; | ||
| 507 | KProcessAddress m_alias_region_end{}; | ||
| 508 | KProcessAddress m_stack_region_start{}; | ||
| 509 | KProcessAddress m_stack_region_end{}; | ||
| 510 | KProcessAddress m_kernel_map_region_start{}; | ||
| 511 | KProcessAddress m_kernel_map_region_end{}; | ||
| 512 | KProcessAddress m_code_region_start{}; | ||
| 513 | KProcessAddress m_code_region_end{}; | ||
| 514 | KProcessAddress m_alias_code_region_start{}; | ||
| 515 | KProcessAddress m_alias_code_region_end{}; | ||
| 516 | |||
| 517 | size_t m_max_heap_size{}; | ||
| 518 | size_t m_mapped_physical_memory_size{}; | ||
| 519 | size_t m_mapped_unsafe_physical_memory{}; | ||
| 520 | size_t m_mapped_insecure_memory{}; | ||
| 521 | size_t m_mapped_ipc_server_memory{}; | ||
| 522 | size_t m_address_space_width{}; | ||
| 523 | |||
| 524 | KMemoryBlockManager m_memory_block_manager; | ||
| 525 | u32 m_allocate_option{}; | ||
| 526 | |||
| 527 | bool m_is_kernel{}; | ||
| 528 | bool m_enable_aslr{}; | ||
| 529 | bool m_enable_device_address_space_merge{}; | ||
| 530 | |||
| 531 | KMemoryBlockSlabManager* m_memory_block_slab_manager{}; | ||
| 532 | KBlockInfoManager* m_block_info_manager{}; | ||
| 533 | KResourceLimit* m_resource_limit{}; | ||
| 534 | |||
| 535 | u32 m_heap_fill_value{}; | ||
| 536 | u32 m_ipc_fill_value{}; | ||
| 537 | u32 m_stack_fill_value{}; | ||
| 538 | const KMemoryRegion* m_cached_physical_heap_region{}; | ||
| 539 | |||
| 540 | KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; | ||
| 541 | KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront}; | ||
| 542 | |||
| 543 | std::unique_ptr<Common::PageTable> m_page_table_impl; | ||
| 544 | |||
| 545 | Core::System& m_system; | ||
| 546 | KernelCore& m_kernel; | ||
| 547 | Core::Memory::Memory* m_memory{}; | ||
| 548 | }; | 14 | }; |
| 549 | 15 | ||
| 550 | } // namespace Kernel | 16 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp new file mode 100644 index 000000000..6a57ad55c --- /dev/null +++ b/src/core/hle/kernel/k_page_table_base.cpp | |||
| @@ -0,0 +1,5716 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "common/scope_exit.h" | ||
| 5 | #include "common/settings.h" | ||
| 6 | #include "core/core.h" | ||
| 7 | #include "core/hle/kernel/k_address_space_info.h" | ||
| 8 | #include "core/hle/kernel/k_page_table_base.h" | ||
| 9 | #include "core/hle/kernel/k_scoped_resource_reservation.h" | ||
| 10 | #include "core/hle/kernel/k_system_resource.h" | ||
| 11 | |||
| 12 | namespace Kernel { | ||
| 13 | |||
| 14 | namespace { | ||
| 15 | |||
| 16 | class KScopedLightLockPair { | ||
| 17 | YUZU_NON_COPYABLE(KScopedLightLockPair); | ||
| 18 | YUZU_NON_MOVEABLE(KScopedLightLockPair); | ||
| 19 | |||
| 20 | private: | ||
| 21 | KLightLock* m_lower; | ||
| 22 | KLightLock* m_upper; | ||
| 23 | |||
| 24 | public: | ||
| 25 | KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) { | ||
| 26 | // Ensure our locks are in a consistent order. | ||
| 27 | if (std::addressof(lhs) <= std::addressof(rhs)) { | ||
| 28 | m_lower = std::addressof(lhs); | ||
| 29 | m_upper = std::addressof(rhs); | ||
| 30 | } else { | ||
| 31 | m_lower = std::addressof(rhs); | ||
| 32 | m_upper = std::addressof(lhs); | ||
| 33 | } | ||
| 34 | |||
| 35 | // Acquire both locks. | ||
| 36 | m_lower->Lock(); | ||
| 37 | if (m_lower != m_upper) { | ||
| 38 | m_upper->Lock(); | ||
| 39 | } | ||
| 40 | } | ||
| 41 | |||
| 42 | ~KScopedLightLockPair() { | ||
| 43 | // Unlock the upper lock. | ||
| 44 | if (m_upper != nullptr && m_upper != m_lower) { | ||
| 45 | m_upper->Unlock(); | ||
| 46 | } | ||
| 47 | |||
| 48 | // Unlock the lower lock. | ||
| 49 | if (m_lower != nullptr) { | ||
| 50 | m_lower->Unlock(); | ||
| 51 | } | ||
| 52 | } | ||
| 53 | |||
| 54 | public: | ||
| 55 | // Utility. | ||
| 56 | void TryUnlockHalf(KLightLock& lock) { | ||
| 57 | // Only allow unlocking if the lock is half the pair. | ||
| 58 | if (m_lower != m_upper) { | ||
| 59 | // We want to be sure the lock is one we own. | ||
| 60 | if (m_lower == std::addressof(lock)) { | ||
| 61 | lock.Unlock(); | ||
| 62 | m_lower = nullptr; | ||
| 63 | } else if (m_upper == std::addressof(lock)) { | ||
| 64 | lock.Unlock(); | ||
| 65 | m_upper = nullptr; | ||
| 66 | } | ||
| 67 | } | ||
| 68 | } | ||
| 69 | }; | ||
| 70 | |||
| 71 | template <typename AddressType> | ||
| 72 | void InvalidateInstructionCache(Core::System& system, AddressType addr, u64 size) { | ||
| 73 | system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size); | ||
| 74 | } | ||
| 75 | |||
| 76 | template <typename AddressType> | ||
| 77 | Result InvalidateDataCache(AddressType addr, u64 size) { | ||
| 78 | R_SUCCEED(); | ||
| 79 | } | ||
| 80 | |||
| 81 | template <typename AddressType> | ||
| 82 | Result StoreDataCache(AddressType addr, u64 size) { | ||
| 83 | R_SUCCEED(); | ||
| 84 | } | ||
| 85 | |||
| 86 | template <typename AddressType> | ||
| 87 | Result FlushDataCache(AddressType addr, u64 size) { | ||
| 88 | R_SUCCEED(); | ||
| 89 | } | ||
| 90 | |||
| 91 | } // namespace | ||
| 92 | |||
| 93 | void KPageTableBase::MemoryRange::Open() { | ||
| 94 | // If the range contains heap pages, open them. | ||
| 95 | if (this->IsHeap()) { | ||
| 96 | m_kernel.MemoryManager().Open(this->GetAddress(), this->GetSize() / PageSize); | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | void KPageTableBase::MemoryRange::Close() { | ||
| 101 | // If the range contains heap pages, close them. | ||
| 102 | if (this->IsHeap()) { | ||
| 103 | m_kernel.MemoryManager().Close(this->GetAddress(), this->GetSize() / PageSize); | ||
| 104 | } | ||
| 105 | } | ||
| 106 | |||
| 107 | KPageTableBase::KPageTableBase(KernelCore& kernel) | ||
| 108 | : m_kernel(kernel), m_system(kernel.System()), m_general_lock(kernel), | ||
| 109 | m_map_physical_memory_lock(kernel), m_device_map_lock(kernel) {} | ||
| 110 | KPageTableBase::~KPageTableBase() = default; | ||
| 111 | |||
| 112 | Result KPageTableBase::InitializeForKernel(bool is_64_bit, KVirtualAddress start, | ||
| 113 | KVirtualAddress end, Core::Memory::Memory& memory) { | ||
| 114 | // Initialize our members. | ||
| 115 | m_address_space_width = | ||
| 116 | static_cast<u32>(is_64_bit ? Common::BitSize<u64>() : Common::BitSize<u32>()); | ||
| 117 | m_address_space_start = KProcessAddress(GetInteger(start)); | ||
| 118 | m_address_space_end = KProcessAddress(GetInteger(end)); | ||
| 119 | m_is_kernel = true; | ||
| 120 | m_enable_aslr = true; | ||
| 121 | m_enable_device_address_space_merge = false; | ||
| 122 | |||
| 123 | m_heap_region_start = 0; | ||
| 124 | m_heap_region_end = 0; | ||
| 125 | m_current_heap_end = 0; | ||
| 126 | m_alias_region_start = 0; | ||
| 127 | m_alias_region_end = 0; | ||
| 128 | m_stack_region_start = 0; | ||
| 129 | m_stack_region_end = 0; | ||
| 130 | m_kernel_map_region_start = 0; | ||
| 131 | m_kernel_map_region_end = 0; | ||
| 132 | m_alias_code_region_start = 0; | ||
| 133 | m_alias_code_region_end = 0; | ||
| 134 | m_code_region_start = 0; | ||
| 135 | m_code_region_end = 0; | ||
| 136 | m_max_heap_size = 0; | ||
| 137 | m_mapped_physical_memory_size = 0; | ||
| 138 | m_mapped_unsafe_physical_memory = 0; | ||
| 139 | m_mapped_insecure_memory = 0; | ||
| 140 | m_mapped_ipc_server_memory = 0; | ||
| 141 | |||
| 142 | m_memory_block_slab_manager = | ||
| 143 | m_kernel.GetSystemSystemResource().GetMemoryBlockSlabManagerPointer(); | ||
| 144 | m_block_info_manager = m_kernel.GetSystemSystemResource().GetBlockInfoManagerPointer(); | ||
| 145 | m_resource_limit = m_kernel.GetSystemResourceLimit(); | ||
| 146 | |||
| 147 | m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool::System, | ||
| 148 | KMemoryManager::Direction::FromFront); | ||
| 149 | m_heap_fill_value = MemoryFillValue_Zero; | ||
| 150 | m_ipc_fill_value = MemoryFillValue_Zero; | ||
| 151 | m_stack_fill_value = MemoryFillValue_Zero; | ||
| 152 | |||
| 153 | m_cached_physical_linear_region = nullptr; | ||
| 154 | m_cached_physical_heap_region = nullptr; | ||
| 155 | |||
| 156 | // Initialize our implementation. | ||
| 157 | m_impl = std::make_unique<Common::PageTable>(); | ||
| 158 | m_impl->Resize(m_address_space_width, PageBits); | ||
| 159 | |||
| 160 | // Set the tracking memory. | ||
| 161 | m_memory = std::addressof(memory); | ||
| 162 | |||
| 163 | // Initialize our memory block manager. | ||
| 164 | R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, | ||
| 165 | m_memory_block_slab_manager)); | ||
| 166 | } | ||
| 167 | |||
| 168 | Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr, | ||
| 169 | bool enable_das_merge, bool from_back, | ||
| 170 | KMemoryManager::Pool pool, KProcessAddress code_address, | ||
| 171 | size_t code_size, KSystemResource* system_resource, | ||
| 172 | KResourceLimit* resource_limit, | ||
| 173 | Core::Memory::Memory& memory) { | ||
| 174 | // Calculate region extents. | ||
| 175 | const size_t as_width = GetAddressSpaceWidth(as_type); | ||
| 176 | const KProcessAddress start = 0; | ||
| 177 | const KProcessAddress end = (1ULL << as_width); | ||
| 178 | |||
| 179 | // Validate the region. | ||
| 180 | ASSERT(start <= code_address); | ||
| 181 | ASSERT(code_address < code_address + code_size); | ||
| 182 | ASSERT(code_address + code_size - 1 <= end - 1); | ||
| 183 | |||
| 184 | // Define helpers. | ||
| 185 | auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) { | ||
| 186 | return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); | ||
| 187 | }; | ||
| 188 | auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) { | ||
| 189 | return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); | ||
| 190 | }; | ||
| 191 | |||
| 192 | // Set our bit width and heap/alias sizes. | ||
| 193 | m_address_space_width = static_cast<u32>(GetAddressSpaceWidth(as_type)); | ||
| 194 | size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); | ||
| 195 | size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); | ||
| 196 | |||
| 197 | // Adjust heap/alias size if we don't have an alias region. | ||
| 198 | if ((as_type & Svc::CreateProcessFlag::AddressSpaceMask) == | ||
| 199 | Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) { | ||
| 200 | heap_region_size += alias_region_size; | ||
| 201 | alias_region_size = 0; | ||
| 202 | } | ||
| 203 | |||
| 204 | // Set code regions and determine remaining sizes. | ||
| 205 | KProcessAddress process_code_start; | ||
| 206 | KProcessAddress process_code_end; | ||
| 207 | size_t stack_region_size; | ||
| 208 | size_t kernel_map_region_size; | ||
| 209 | if (m_address_space_width == 39) { | ||
| 210 | alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); | ||
| 211 | heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); | ||
| 212 | stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); | ||
| 213 | kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); | ||
| 214 | m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); | ||
| 215 | m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); | ||
| 216 | m_alias_code_region_start = m_code_region_start; | ||
| 217 | m_alias_code_region_end = m_code_region_end; | ||
| 218 | process_code_start = Common::AlignDown(GetInteger(code_address), RegionAlignment); | ||
| 219 | process_code_end = Common::AlignUp(GetInteger(code_address) + code_size, RegionAlignment); | ||
| 220 | } else { | ||
| 221 | stack_region_size = 0; | ||
| 222 | kernel_map_region_size = 0; | ||
| 223 | m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); | ||
| 224 | m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); | ||
| 225 | m_stack_region_start = m_code_region_start; | ||
| 226 | m_alias_code_region_start = m_code_region_start; | ||
| 227 | m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + | ||
| 228 | GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); | ||
| 229 | m_stack_region_end = m_code_region_end; | ||
| 230 | m_kernel_map_region_start = m_code_region_start; | ||
| 231 | m_kernel_map_region_end = m_code_region_end; | ||
| 232 | process_code_start = m_code_region_start; | ||
| 233 | process_code_end = m_code_region_end; | ||
| 234 | } | ||
| 235 | |||
| 236 | // Set other basic fields. | ||
| 237 | m_enable_aslr = enable_aslr; | ||
| 238 | m_enable_device_address_space_merge = enable_das_merge; | ||
| 239 | m_address_space_start = start; | ||
| 240 | m_address_space_end = end; | ||
| 241 | m_is_kernel = false; | ||
| 242 | m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer(); | ||
| 243 | m_block_info_manager = system_resource->GetBlockInfoManagerPointer(); | ||
| 244 | m_resource_limit = resource_limit; | ||
| 245 | |||
| 246 | // Determine the region we can place our undetermineds in. | ||
| 247 | KProcessAddress alloc_start; | ||
| 248 | size_t alloc_size; | ||
| 249 | if ((GetInteger(process_code_start) - GetInteger(m_code_region_start)) >= | ||
| 250 | (GetInteger(end) - GetInteger(process_code_end))) { | ||
| 251 | alloc_start = m_code_region_start; | ||
| 252 | alloc_size = GetInteger(process_code_start) - GetInteger(m_code_region_start); | ||
| 253 | } else { | ||
| 254 | alloc_start = process_code_end; | ||
| 255 | alloc_size = GetInteger(end) - GetInteger(process_code_end); | ||
| 256 | } | ||
| 257 | const size_t needed_size = | ||
| 258 | (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size); | ||
| 259 | R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory); | ||
| 260 | |||
| 261 | const size_t remaining_size = alloc_size - needed_size; | ||
| 262 | |||
| 263 | // Determine random placements for each region. | ||
| 264 | size_t alias_rnd = 0, heap_rnd = 0, stack_rnd = 0, kmap_rnd = 0; | ||
| 265 | if (enable_aslr) { | ||
| 266 | alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 267 | RegionAlignment; | ||
| 268 | heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 269 | RegionAlignment; | ||
| 270 | stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 271 | RegionAlignment; | ||
| 272 | kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 273 | RegionAlignment; | ||
| 274 | } | ||
| 275 | |||
| 276 | // Setup heap and alias regions. | ||
| 277 | m_alias_region_start = alloc_start + alias_rnd; | ||
| 278 | m_alias_region_end = m_alias_region_start + alias_region_size; | ||
| 279 | m_heap_region_start = alloc_start + heap_rnd; | ||
| 280 | m_heap_region_end = m_heap_region_start + heap_region_size; | ||
| 281 | |||
| 282 | if (alias_rnd <= heap_rnd) { | ||
| 283 | m_heap_region_start += alias_region_size; | ||
| 284 | m_heap_region_end += alias_region_size; | ||
| 285 | } else { | ||
| 286 | m_alias_region_start += heap_region_size; | ||
| 287 | m_alias_region_end += heap_region_size; | ||
| 288 | } | ||
| 289 | |||
| 290 | // Setup stack region. | ||
| 291 | if (stack_region_size) { | ||
| 292 | m_stack_region_start = alloc_start + stack_rnd; | ||
| 293 | m_stack_region_end = m_stack_region_start + stack_region_size; | ||
| 294 | |||
| 295 | if (alias_rnd < stack_rnd) { | ||
| 296 | m_stack_region_start += alias_region_size; | ||
| 297 | m_stack_region_end += alias_region_size; | ||
| 298 | } else { | ||
| 299 | m_alias_region_start += stack_region_size; | ||
| 300 | m_alias_region_end += stack_region_size; | ||
| 301 | } | ||
| 302 | |||
| 303 | if (heap_rnd < stack_rnd) { | ||
| 304 | m_stack_region_start += heap_region_size; | ||
| 305 | m_stack_region_end += heap_region_size; | ||
| 306 | } else { | ||
| 307 | m_heap_region_start += stack_region_size; | ||
| 308 | m_heap_region_end += stack_region_size; | ||
| 309 | } | ||
| 310 | } | ||
| 311 | |||
| 312 | // Setup kernel map region. | ||
| 313 | if (kernel_map_region_size) { | ||
| 314 | m_kernel_map_region_start = alloc_start + kmap_rnd; | ||
| 315 | m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; | ||
| 316 | |||
| 317 | if (alias_rnd < kmap_rnd) { | ||
| 318 | m_kernel_map_region_start += alias_region_size; | ||
| 319 | m_kernel_map_region_end += alias_region_size; | ||
| 320 | } else { | ||
| 321 | m_alias_region_start += kernel_map_region_size; | ||
| 322 | m_alias_region_end += kernel_map_region_size; | ||
| 323 | } | ||
| 324 | |||
| 325 | if (heap_rnd < kmap_rnd) { | ||
| 326 | m_kernel_map_region_start += heap_region_size; | ||
| 327 | m_kernel_map_region_end += heap_region_size; | ||
| 328 | } else { | ||
| 329 | m_heap_region_start += kernel_map_region_size; | ||
| 330 | m_heap_region_end += kernel_map_region_size; | ||
| 331 | } | ||
| 332 | |||
| 333 | if (stack_region_size) { | ||
| 334 | if (stack_rnd < kmap_rnd) { | ||
| 335 | m_kernel_map_region_start += stack_region_size; | ||
| 336 | m_kernel_map_region_end += stack_region_size; | ||
| 337 | } else { | ||
| 338 | m_stack_region_start += kernel_map_region_size; | ||
| 339 | m_stack_region_end += kernel_map_region_size; | ||
| 340 | } | ||
| 341 | } | ||
| 342 | } | ||
| 343 | |||
| 344 | // Set heap and fill members. | ||
| 345 | m_current_heap_end = m_heap_region_start; | ||
| 346 | m_max_heap_size = 0; | ||
| 347 | m_mapped_physical_memory_size = 0; | ||
| 348 | m_mapped_unsafe_physical_memory = 0; | ||
| 349 | m_mapped_insecure_memory = 0; | ||
| 350 | m_mapped_ipc_server_memory = 0; | ||
| 351 | |||
| 352 | // const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled(); | ||
| 353 | const bool fill_memory = false; | ||
| 354 | m_heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero; | ||
| 355 | m_ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero; | ||
| 356 | m_stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero; | ||
| 357 | |||
| 358 | // Set allocation option. | ||
| 359 | m_allocate_option = | ||
| 360 | KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack | ||
| 361 | : KMemoryManager::Direction::FromFront); | ||
| 362 | |||
| 363 | // Ensure that we regions inside our address space. | ||
| 364 | auto IsInAddressSpace = [&](KProcessAddress addr) { | ||
| 365 | return m_address_space_start <= addr && addr <= m_address_space_end; | ||
| 366 | }; | ||
| 367 | ASSERT(IsInAddressSpace(m_alias_region_start)); | ||
| 368 | ASSERT(IsInAddressSpace(m_alias_region_end)); | ||
| 369 | ASSERT(IsInAddressSpace(m_heap_region_start)); | ||
| 370 | ASSERT(IsInAddressSpace(m_heap_region_end)); | ||
| 371 | ASSERT(IsInAddressSpace(m_stack_region_start)); | ||
| 372 | ASSERT(IsInAddressSpace(m_stack_region_end)); | ||
| 373 | ASSERT(IsInAddressSpace(m_kernel_map_region_start)); | ||
| 374 | ASSERT(IsInAddressSpace(m_kernel_map_region_end)); | ||
| 375 | |||
| 376 | // Ensure that we selected regions that don't overlap. | ||
| 377 | const KProcessAddress alias_start = m_alias_region_start; | ||
| 378 | const KProcessAddress alias_last = m_alias_region_end - 1; | ||
| 379 | const KProcessAddress heap_start = m_heap_region_start; | ||
| 380 | const KProcessAddress heap_last = m_heap_region_end - 1; | ||
| 381 | const KProcessAddress stack_start = m_stack_region_start; | ||
| 382 | const KProcessAddress stack_last = m_stack_region_end - 1; | ||
| 383 | const KProcessAddress kmap_start = m_kernel_map_region_start; | ||
| 384 | const KProcessAddress kmap_last = m_kernel_map_region_end - 1; | ||
| 385 | ASSERT(alias_last < heap_start || heap_last < alias_start); | ||
| 386 | ASSERT(alias_last < stack_start || stack_last < alias_start); | ||
| 387 | ASSERT(alias_last < kmap_start || kmap_last < alias_start); | ||
| 388 | ASSERT(heap_last < stack_start || stack_last < heap_start); | ||
| 389 | ASSERT(heap_last < kmap_start || kmap_last < heap_start); | ||
| 390 | |||
| 391 | // Initialize our implementation. | ||
| 392 | m_impl = std::make_unique<Common::PageTable>(); | ||
| 393 | m_impl->Resize(m_address_space_width, PageBits); | ||
| 394 | |||
| 395 | // Set the tracking memory. | ||
| 396 | m_memory = std::addressof(memory); | ||
| 397 | |||
| 398 | // Initialize our memory block manager. | ||
| 399 | R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, | ||
| 400 | m_memory_block_slab_manager)); | ||
| 401 | } | ||
| 402 | |||
| 403 | void KPageTableBase::Finalize() { | ||
| 404 | auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) { | ||
| 405 | if (Settings::IsFastmemEnabled()) { | ||
| 406 | m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size); | ||
| 407 | } | ||
| 408 | }; | ||
| 409 | |||
| 410 | // Finalize memory blocks. | ||
| 411 | m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback)); | ||
| 412 | |||
| 413 | // Free any unsafe mapped memory. | ||
| 414 | if (m_mapped_unsafe_physical_memory) { | ||
| 415 | UNIMPLEMENTED(); | ||
| 416 | } | ||
| 417 | |||
| 418 | // Release any insecure mapped memory. | ||
| 419 | if (m_mapped_insecure_memory) { | ||
| 420 | if (auto* const insecure_resource_limit = | ||
| 421 | KSystemControl::GetInsecureMemoryResourceLimit(m_kernel); | ||
| 422 | insecure_resource_limit != nullptr) { | ||
| 423 | insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, | ||
| 424 | m_mapped_insecure_memory); | ||
| 425 | } | ||
| 426 | } | ||
| 427 | |||
| 428 | // Release any ipc server memory. | ||
| 429 | if (m_mapped_ipc_server_memory) { | ||
| 430 | m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, | ||
| 431 | m_mapped_ipc_server_memory); | ||
| 432 | } | ||
| 433 | |||
| 434 | // Close the backing page table, as the destructor is not called for guest objects. | ||
| 435 | m_impl.reset(); | ||
| 436 | } | ||
| 437 | |||
| 438 | KProcessAddress KPageTableBase::GetRegionAddress(Svc::MemoryState state) const { | ||
| 439 | switch (state) { | ||
| 440 | case Svc::MemoryState::Free: | ||
| 441 | case Svc::MemoryState::Kernel: | ||
| 442 | return m_address_space_start; | ||
| 443 | case Svc::MemoryState::Normal: | ||
| 444 | return m_heap_region_start; | ||
| 445 | case Svc::MemoryState::Ipc: | ||
| 446 | case Svc::MemoryState::NonSecureIpc: | ||
| 447 | case Svc::MemoryState::NonDeviceIpc: | ||
| 448 | return m_alias_region_start; | ||
| 449 | case Svc::MemoryState::Stack: | ||
| 450 | return m_stack_region_start; | ||
| 451 | case Svc::MemoryState::Static: | ||
| 452 | case Svc::MemoryState::ThreadLocal: | ||
| 453 | return m_kernel_map_region_start; | ||
| 454 | case Svc::MemoryState::Io: | ||
| 455 | case Svc::MemoryState::Shared: | ||
| 456 | case Svc::MemoryState::AliasCode: | ||
| 457 | case Svc::MemoryState::AliasCodeData: | ||
| 458 | case Svc::MemoryState::Transfered: | ||
| 459 | case Svc::MemoryState::SharedTransfered: | ||
| 460 | case Svc::MemoryState::SharedCode: | ||
| 461 | case Svc::MemoryState::GeneratedCode: | ||
| 462 | case Svc::MemoryState::CodeOut: | ||
| 463 | case Svc::MemoryState::Coverage: | ||
| 464 | case Svc::MemoryState::Insecure: | ||
| 465 | return m_alias_code_region_start; | ||
| 466 | case Svc::MemoryState::Code: | ||
| 467 | case Svc::MemoryState::CodeData: | ||
| 468 | return m_code_region_start; | ||
| 469 | default: | ||
| 470 | UNREACHABLE(); | ||
| 471 | } | ||
| 472 | } | ||
| 473 | |||
| 474 | size_t KPageTableBase::GetRegionSize(Svc::MemoryState state) const { | ||
| 475 | switch (state) { | ||
| 476 | case Svc::MemoryState::Free: | ||
| 477 | case Svc::MemoryState::Kernel: | ||
| 478 | return m_address_space_end - m_address_space_start; | ||
| 479 | case Svc::MemoryState::Normal: | ||
| 480 | return m_heap_region_end - m_heap_region_start; | ||
| 481 | case Svc::MemoryState::Ipc: | ||
| 482 | case Svc::MemoryState::NonSecureIpc: | ||
| 483 | case Svc::MemoryState::NonDeviceIpc: | ||
| 484 | return m_alias_region_end - m_alias_region_start; | ||
| 485 | case Svc::MemoryState::Stack: | ||
| 486 | return m_stack_region_end - m_stack_region_start; | ||
| 487 | case Svc::MemoryState::Static: | ||
| 488 | case Svc::MemoryState::ThreadLocal: | ||
| 489 | return m_kernel_map_region_end - m_kernel_map_region_start; | ||
| 490 | case Svc::MemoryState::Io: | ||
| 491 | case Svc::MemoryState::Shared: | ||
| 492 | case Svc::MemoryState::AliasCode: | ||
| 493 | case Svc::MemoryState::AliasCodeData: | ||
| 494 | case Svc::MemoryState::Transfered: | ||
| 495 | case Svc::MemoryState::SharedTransfered: | ||
| 496 | case Svc::MemoryState::SharedCode: | ||
| 497 | case Svc::MemoryState::GeneratedCode: | ||
| 498 | case Svc::MemoryState::CodeOut: | ||
| 499 | case Svc::MemoryState::Coverage: | ||
| 500 | case Svc::MemoryState::Insecure: | ||
| 501 | return m_alias_code_region_end - m_alias_code_region_start; | ||
| 502 | case Svc::MemoryState::Code: | ||
| 503 | case Svc::MemoryState::CodeData: | ||
| 504 | return m_code_region_end - m_code_region_start; | ||
| 505 | default: | ||
| 506 | UNREACHABLE(); | ||
| 507 | } | ||
| 508 | } | ||
| 509 | |||
| 510 | bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const { | ||
| 511 | const KProcessAddress end = addr + size; | ||
| 512 | const KProcessAddress last = end - 1; | ||
| 513 | |||
| 514 | const KProcessAddress region_start = this->GetRegionAddress(state); | ||
| 515 | const size_t region_size = this->GetRegionSize(state); | ||
| 516 | |||
| 517 | const bool is_in_region = | ||
| 518 | region_start <= addr && addr < end && last <= region_start + region_size - 1; | ||
| 519 | const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || | ||
| 520 | m_heap_region_start == m_heap_region_end); | ||
| 521 | const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || | ||
| 522 | m_alias_region_start == m_alias_region_end); | ||
| 523 | switch (state) { | ||
| 524 | case Svc::MemoryState::Free: | ||
| 525 | case Svc::MemoryState::Kernel: | ||
| 526 | return is_in_region; | ||
| 527 | case Svc::MemoryState::Io: | ||
| 528 | case Svc::MemoryState::Static: | ||
| 529 | case Svc::MemoryState::Code: | ||
| 530 | case Svc::MemoryState::CodeData: | ||
| 531 | case Svc::MemoryState::Shared: | ||
| 532 | case Svc::MemoryState::AliasCode: | ||
| 533 | case Svc::MemoryState::AliasCodeData: | ||
| 534 | case Svc::MemoryState::Stack: | ||
| 535 | case Svc::MemoryState::ThreadLocal: | ||
| 536 | case Svc::MemoryState::Transfered: | ||
| 537 | case Svc::MemoryState::SharedTransfered: | ||
| 538 | case Svc::MemoryState::SharedCode: | ||
| 539 | case Svc::MemoryState::GeneratedCode: | ||
| 540 | case Svc::MemoryState::CodeOut: | ||
| 541 | case Svc::MemoryState::Coverage: | ||
| 542 | case Svc::MemoryState::Insecure: | ||
| 543 | return is_in_region && !is_in_heap && !is_in_alias; | ||
| 544 | case Svc::MemoryState::Normal: | ||
| 545 | ASSERT(is_in_heap); | ||
| 546 | return is_in_region && !is_in_alias; | ||
| 547 | case Svc::MemoryState::Ipc: | ||
| 548 | case Svc::MemoryState::NonSecureIpc: | ||
| 549 | case Svc::MemoryState::NonDeviceIpc: | ||
| 550 | ASSERT(is_in_alias); | ||
| 551 | return is_in_region && !is_in_heap; | ||
| 552 | default: | ||
| 553 | return false; | ||
| 554 | } | ||
| 555 | } | ||
| 556 | |||
| 557 | Result KPageTableBase::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, | ||
| 558 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 559 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 560 | KMemoryAttribute attr) const { | ||
| 561 | // Validate the states match expectation. | ||
| 562 | R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory); | ||
| 563 | R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory); | ||
| 564 | R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory); | ||
| 565 | |||
| 566 | R_SUCCEED(); | ||
| 567 | } | ||
| 568 | |||
| 569 | Result KPageTableBase::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, | ||
| 570 | size_t size, KMemoryState state_mask, | ||
| 571 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 572 | KMemoryPermission perm, | ||
| 573 | KMemoryAttribute attr_mask, | ||
| 574 | KMemoryAttribute attr) const { | ||
| 575 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 576 | |||
| 577 | // Get information about the first block. | ||
| 578 | const KProcessAddress last_addr = addr + size - 1; | ||
| 579 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); | ||
| 580 | KMemoryInfo info = it->GetMemoryInfo(); | ||
| 581 | |||
| 582 | // If the start address isn't aligned, we need a block. | ||
| 583 | const size_t blocks_for_start_align = | ||
| 584 | (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0; | ||
| 585 | |||
| 586 | while (true) { | ||
| 587 | // Validate against the provided masks. | ||
| 588 | R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); | ||
| 589 | |||
| 590 | // Break once we're done. | ||
| 591 | if (last_addr <= info.GetLastAddress()) { | ||
| 592 | break; | ||
| 593 | } | ||
| 594 | |||
| 595 | // Advance our iterator. | ||
| 596 | it++; | ||
| 597 | ASSERT(it != m_memory_block_manager.cend()); | ||
| 598 | info = it->GetMemoryInfo(); | ||
| 599 | } | ||
| 600 | |||
| 601 | // If the end address isn't aligned, we need a block. | ||
| 602 | const size_t blocks_for_end_align = | ||
| 603 | (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0; | ||
| 604 | |||
| 605 | if (out_blocks_needed != nullptr) { | ||
| 606 | *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; | ||
| 607 | } | ||
| 608 | |||
| 609 | R_SUCCEED(); | ||
| 610 | } | ||
| 611 | |||
| 612 | Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 613 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 614 | KMemoryBlockManager::const_iterator it, | ||
| 615 | KProcessAddress last_addr, KMemoryState state_mask, | ||
| 616 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 617 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 618 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { | ||
| 619 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 620 | |||
| 621 | // Get information about the first block. | ||
| 622 | KMemoryInfo info = it->GetMemoryInfo(); | ||
| 623 | |||
| 624 | // Validate all blocks in the range have correct state. | ||
| 625 | const KMemoryState first_state = info.m_state; | ||
| 626 | const KMemoryPermission first_perm = info.m_permission; | ||
| 627 | const KMemoryAttribute first_attr = info.m_attribute; | ||
| 628 | while (true) { | ||
| 629 | // Validate the current block. | ||
| 630 | R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory); | ||
| 631 | R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory); | ||
| 632 | R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), | ||
| 633 | ResultInvalidCurrentMemory); | ||
| 634 | |||
| 635 | // Validate against the provided masks. | ||
| 636 | R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); | ||
| 637 | |||
| 638 | // Break once we're done. | ||
| 639 | if (last_addr <= info.GetLastAddress()) { | ||
| 640 | break; | ||
| 641 | } | ||
| 642 | |||
| 643 | // Advance our iterator. | ||
| 644 | it++; | ||
| 645 | ASSERT(it != m_memory_block_manager.cend()); | ||
| 646 | info = it->GetMemoryInfo(); | ||
| 647 | } | ||
| 648 | |||
| 649 | // Write output state. | ||
| 650 | if (out_state != nullptr) { | ||
| 651 | *out_state = first_state; | ||
| 652 | } | ||
| 653 | if (out_perm != nullptr) { | ||
| 654 | *out_perm = first_perm; | ||
| 655 | } | ||
| 656 | if (out_attr != nullptr) { | ||
| 657 | *out_attr = first_attr & ~ignore_attr; | ||
| 658 | } | ||
| 659 | |||
| 660 | // If the end address isn't aligned, we need a block. | ||
| 661 | if (out_blocks_needed != nullptr) { | ||
| 662 | const size_t blocks_for_end_align = | ||
| 663 | (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) | ||
| 664 | ? 1 | ||
| 665 | : 0; | ||
| 666 | *out_blocks_needed = blocks_for_end_align; | ||
| 667 | } | ||
| 668 | |||
| 669 | R_SUCCEED(); | ||
| 670 | } | ||
| 671 | |||
| 672 | Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 673 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 674 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 675 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 676 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 677 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { | ||
| 678 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 679 | |||
| 680 | // Check memory state. | ||
| 681 | const KProcessAddress last_addr = addr + size - 1; | ||
| 682 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); | ||
| 683 | R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr, | ||
| 684 | state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr)); | ||
| 685 | |||
| 686 | // If the start address isn't aligned, we need a block. | ||
| 687 | if (out_blocks_needed != nullptr && | ||
| 688 | Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) { | ||
| 689 | ++(*out_blocks_needed); | ||
| 690 | } | ||
| 691 | |||
| 692 | R_SUCCEED(); | ||
| 693 | } | ||
| 694 | |||
| 695 | Result KPageTableBase::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr, | ||
| 696 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 697 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 698 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 699 | KMemoryAttribute attr, KMemoryPermission new_perm, | ||
| 700 | KMemoryAttribute lock_attr) { | ||
| 701 | // Validate basic preconditions. | ||
| 702 | ASSERT(False(lock_attr & attr)); | ||
| 703 | ASSERT(False(lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); | ||
| 704 | |||
| 705 | // Validate the lock request. | ||
| 706 | const size_t num_pages = size / PageSize; | ||
| 707 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | ||
| 708 | |||
| 709 | // Lock the table. | ||
| 710 | KScopedLightLock lk(m_general_lock); | ||
| 711 | |||
| 712 | // Check that the output page group is empty, if it exists. | ||
| 713 | if (out_pg) { | ||
| 714 | ASSERT(out_pg->GetNumPages() == 0); | ||
| 715 | } | ||
| 716 | |||
| 717 | // Check the state. | ||
| 718 | KMemoryState old_state; | ||
| 719 | KMemoryPermission old_perm; | ||
| 720 | KMemoryAttribute old_attr; | ||
| 721 | size_t num_allocator_blocks; | ||
| 722 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 723 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 724 | addr, size, state_mask | KMemoryState::FlagReferenceCounted, | ||
| 725 | state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 726 | attr_mask, attr)); | ||
| 727 | |||
| 728 | // Get the physical address, if we're supposed to. | ||
| 729 | if (out_paddr != nullptr) { | ||
| 730 | ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr)); | ||
| 731 | } | ||
| 732 | |||
| 733 | // Make the page group, if we're supposed to. | ||
| 734 | if (out_pg != nullptr) { | ||
| 735 | R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); | ||
| 736 | } | ||
| 737 | |||
| 738 | // Create an update allocator. | ||
| 739 | Result allocator_result; | ||
| 740 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 741 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 742 | R_TRY(allocator_result); | ||
| 743 | |||
| 744 | // Decide on new perm and attr. | ||
| 745 | new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; | ||
| 746 | KMemoryAttribute new_attr = old_attr | static_cast<KMemoryAttribute>(lock_attr); | ||
| 747 | |||
| 748 | // Update permission, if we need to. | ||
| 749 | if (new_perm != old_perm) { | ||
| 750 | // We're going to perform an update, so create a helper. | ||
| 751 | KScopedPageTableUpdater updater(this); | ||
| 752 | |||
| 753 | const KPageProperties properties = {new_perm, false, | ||
| 754 | True(old_attr & KMemoryAttribute::Uncached), | ||
| 755 | DisableMergeAttribute::DisableHeadBodyTail}; | ||
| 756 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, | ||
| 757 | OperationType::ChangePermissions, false)); | ||
| 758 | } | ||
| 759 | |||
| 760 | // Apply the memory block updates. | ||
| 761 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | ||
| 762 | new_attr, KMemoryBlockDisableMergeAttribute::Locked, | ||
| 763 | KMemoryBlockDisableMergeAttribute::None); | ||
| 764 | |||
| 765 | // If we have an output group, open. | ||
| 766 | if (out_pg) { | ||
| 767 | out_pg->Open(); | ||
| 768 | } | ||
| 769 | |||
| 770 | R_SUCCEED(); | ||
| 771 | } | ||
| 772 | |||
| 773 | Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 774 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 775 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 776 | KMemoryAttribute attr, KMemoryPermission new_perm, | ||
| 777 | KMemoryAttribute lock_attr, const KPageGroup* pg) { | ||
| 778 | // Validate basic preconditions. | ||
| 779 | ASSERT((attr_mask & lock_attr) == lock_attr); | ||
| 780 | ASSERT((attr & lock_attr) == lock_attr); | ||
| 781 | |||
| 782 | // Validate the unlock request. | ||
| 783 | const size_t num_pages = size / PageSize; | ||
| 784 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | ||
| 785 | |||
| 786 | // Lock the table. | ||
| 787 | KScopedLightLock lk(m_general_lock); | ||
| 788 | |||
| 789 | // Check the state. | ||
| 790 | KMemoryState old_state; | ||
| 791 | KMemoryPermission old_perm; | ||
| 792 | KMemoryAttribute old_attr; | ||
| 793 | size_t num_allocator_blocks; | ||
| 794 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 795 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 796 | addr, size, state_mask | KMemoryState::FlagReferenceCounted, | ||
| 797 | state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 798 | attr_mask, attr)); | ||
| 799 | |||
| 800 | // Check the page group. | ||
| 801 | if (pg != nullptr) { | ||
| 802 | R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion); | ||
| 803 | } | ||
| 804 | |||
| 805 | // Decide on new perm and attr. | ||
| 806 | new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; | ||
| 807 | KMemoryAttribute new_attr = old_attr & ~static_cast<KMemoryAttribute>(lock_attr); | ||
| 808 | |||
| 809 | // Create an update allocator. | ||
| 810 | Result allocator_result; | ||
| 811 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 812 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 813 | R_TRY(allocator_result); | ||
| 814 | |||
| 815 | // Update permission, if we need to. | ||
| 816 | if (new_perm != old_perm) { | ||
| 817 | // We're going to perform an update, so create a helper. | ||
| 818 | KScopedPageTableUpdater updater(this); | ||
| 819 | |||
| 820 | const KPageProperties properties = {new_perm, false, | ||
| 821 | True(old_attr & KMemoryAttribute::Uncached), | ||
| 822 | DisableMergeAttribute::EnableAndMergeHeadBodyTail}; | ||
| 823 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, | ||
| 824 | OperationType::ChangePermissions, false)); | ||
| 825 | } | ||
| 826 | |||
| 827 | // Apply the memory block updates. | ||
| 828 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | ||
| 829 | new_attr, KMemoryBlockDisableMergeAttribute::None, | ||
| 830 | KMemoryBlockDisableMergeAttribute::Locked); | ||
| 831 | |||
| 832 | R_SUCCEED(); | ||
| 833 | } | ||
| 834 | |||
| 835 | Result KPageTableBase::QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page, | ||
| 836 | KProcessAddress address) const { | ||
| 837 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 838 | ASSERT(out_info != nullptr); | ||
| 839 | ASSERT(out_page != nullptr); | ||
| 840 | |||
| 841 | const KMemoryBlock* block = m_memory_block_manager.FindBlock(address); | ||
| 842 | R_UNLESS(block != nullptr, ResultInvalidCurrentMemory); | ||
| 843 | |||
| 844 | *out_info = block->GetMemoryInfo(); | ||
| 845 | out_page->flags = 0; | ||
| 846 | R_SUCCEED(); | ||
| 847 | } | ||
| 848 | |||
| 849 | Result KPageTableBase::QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size, | ||
| 850 | Svc::MemoryState state) const { | ||
| 851 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 852 | ASSERT(out != nullptr); | ||
| 853 | |||
| 854 | const KProcessAddress region_start = this->GetRegionAddress(state); | ||
| 855 | const size_t region_size = this->GetRegionSize(state); | ||
| 856 | |||
| 857 | // Check that the address/size are potentially valid. | ||
| 858 | R_UNLESS((address < address + size), ResultNotFound); | ||
| 859 | |||
| 860 | // Lock the table. | ||
| 861 | KScopedLightLock lk(m_general_lock); | ||
| 862 | |||
| 863 | auto& impl = this->GetImpl(); | ||
| 864 | |||
| 865 | // Begin traversal. | ||
| 866 | TraversalContext context; | ||
| 867 | TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; | ||
| 868 | bool cur_valid = false; | ||
| 869 | TraversalEntry next_entry; | ||
| 870 | bool next_valid; | ||
| 871 | size_t tot_size = 0; | ||
| 872 | |||
| 873 | next_valid = | ||
| 874 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), region_start); | ||
| 875 | next_entry.block_size = | ||
| 876 | (next_entry.block_size - (GetInteger(region_start) & (next_entry.block_size - 1))); | ||
| 877 | |||
| 878 | // Iterate, looking for entry. | ||
| 879 | while (true) { | ||
| 880 | if ((!next_valid && !cur_valid) || | ||
| 881 | (next_valid && cur_valid && | ||
| 882 | next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { | ||
| 883 | cur_entry.block_size += next_entry.block_size; | ||
| 884 | } else { | ||
| 885 | if (cur_valid && cur_entry.phys_addr <= address && | ||
| 886 | address + size <= cur_entry.phys_addr + cur_entry.block_size) { | ||
| 887 | // Check if this region is valid. | ||
| 888 | const KProcessAddress mapped_address = | ||
| 889 | (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr); | ||
| 890 | if (R_SUCCEEDED(this->CheckMemoryState( | ||
| 891 | mapped_address, size, KMemoryState::Mask, static_cast<KMemoryState>(state), | ||
| 892 | KMemoryPermission::UserRead, KMemoryPermission::UserRead, | ||
| 893 | KMemoryAttribute::None, KMemoryAttribute::None))) { | ||
| 894 | // It is! | ||
| 895 | *out = mapped_address; | ||
| 896 | R_SUCCEED(); | ||
| 897 | } | ||
| 898 | } | ||
| 899 | |||
| 900 | // Update tracking variables. | ||
| 901 | tot_size += cur_entry.block_size; | ||
| 902 | cur_entry = next_entry; | ||
| 903 | cur_valid = next_valid; | ||
| 904 | } | ||
| 905 | |||
| 906 | if (cur_entry.block_size + tot_size >= region_size) { | ||
| 907 | break; | ||
| 908 | } | ||
| 909 | |||
| 910 | next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 911 | } | ||
| 912 | |||
| 913 | // Check the last entry. | ||
| 914 | R_UNLESS(cur_valid, ResultNotFound); | ||
| 915 | R_UNLESS(cur_entry.phys_addr <= address, ResultNotFound); | ||
| 916 | R_UNLESS(address + size <= cur_entry.phys_addr + cur_entry.block_size, ResultNotFound); | ||
| 917 | |||
| 918 | // Check if the last region is valid. | ||
| 919 | const KProcessAddress mapped_address = | ||
| 920 | (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr); | ||
| 921 | R_TRY_CATCH(this->CheckMemoryState(mapped_address, size, KMemoryState::All, | ||
| 922 | static_cast<KMemoryState>(state), | ||
| 923 | KMemoryPermission::UserRead, KMemoryPermission::UserRead, | ||
| 924 | KMemoryAttribute::None, KMemoryAttribute::None)) { | ||
| 925 | R_CONVERT_ALL(ResultNotFound); | ||
| 926 | } | ||
| 927 | R_END_TRY_CATCH; | ||
| 928 | |||
| 929 | // We found the region. | ||
| 930 | *out = mapped_address; | ||
| 931 | R_SUCCEED(); | ||
| 932 | } | ||
| 933 | |||
| 934 | Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 935 | size_t size) { | ||
| 936 | // Lock the table. | ||
| 937 | KScopedLightLock lk(m_general_lock); | ||
| 938 | |||
| 939 | // Validate that the source address's state is valid. | ||
| 940 | KMemoryState src_state; | ||
| 941 | size_t num_src_allocator_blocks; | ||
| 942 | R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, | ||
| 943 | std::addressof(num_src_allocator_blocks), src_address, size, | ||
| 944 | KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, | ||
| 945 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 946 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 947 | |||
| 948 | // Validate that the dst address's state is valid. | ||
| 949 | size_t num_dst_allocator_blocks; | ||
| 950 | R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, | ||
| 951 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 952 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 953 | KMemoryAttribute::None)); | ||
| 954 | |||
| 955 | // Create an update allocator for the source. | ||
| 956 | Result src_allocator_result; | ||
| 957 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 958 | m_memory_block_slab_manager, | ||
| 959 | num_src_allocator_blocks); | ||
| 960 | R_TRY(src_allocator_result); | ||
| 961 | |||
| 962 | // Create an update allocator for the destination. | ||
| 963 | Result dst_allocator_result; | ||
| 964 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 965 | m_memory_block_slab_manager, | ||
| 966 | num_dst_allocator_blocks); | ||
| 967 | R_TRY(dst_allocator_result); | ||
| 968 | |||
| 969 | // Map the memory. | ||
| 970 | { | ||
| 971 | // Determine the number of pages being operated on. | ||
| 972 | const size_t num_pages = size / PageSize; | ||
| 973 | |||
| 974 | // Create page groups for the memory being unmapped. | ||
| 975 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 976 | |||
| 977 | // Create the page group representing the source. | ||
| 978 | R_TRY(this->MakePageGroup(pg, src_address, num_pages)); | ||
| 979 | |||
| 980 | // We're going to perform an update, so create a helper. | ||
| 981 | KScopedPageTableUpdater updater(this); | ||
| 982 | |||
| 983 | // Reprotect the source as kernel-read/not mapped. | ||
| 984 | const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( | ||
| 985 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | ||
| 986 | const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; | ||
| 987 | const KPageProperties src_properties = {new_src_perm, false, false, | ||
| 988 | DisableMergeAttribute::DisableHeadBodyTail}; | ||
| 989 | R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties, | ||
| 990 | OperationType::ChangePermissions, false)); | ||
| 991 | |||
| 992 | // Ensure that we unprotect the source pages on failure. | ||
| 993 | ON_RESULT_FAILURE { | ||
| 994 | const KPageProperties unprotect_properties = { | ||
| 995 | KMemoryPermission::UserReadWrite, false, false, | ||
| 996 | DisableMergeAttribute::EnableHeadBodyTail}; | ||
| 997 | R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, | ||
| 998 | unprotect_properties, OperationType::ChangePermissions, true)); | ||
| 999 | }; | ||
| 1000 | |||
| 1001 | // Map the alias pages. | ||
| 1002 | const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 1003 | DisableMergeAttribute::DisableHead}; | ||
| 1004 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties, | ||
| 1005 | false)); | ||
| 1006 | |||
| 1007 | // Apply the memory block updates. | ||
| 1008 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, | ||
| 1009 | src_state, new_src_perm, new_src_attr, | ||
| 1010 | KMemoryBlockDisableMergeAttribute::Locked, | ||
| 1011 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1012 | m_memory_block_manager.Update( | ||
| 1013 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack, | ||
| 1014 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 1015 | KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | R_SUCCEED(); | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 1022 | size_t size) { | ||
| 1023 | // Lock the table. | ||
| 1024 | KScopedLightLock lk(m_general_lock); | ||
| 1025 | |||
| 1026 | // Validate that the source address's state is valid. | ||
| 1027 | KMemoryState src_state; | ||
| 1028 | size_t num_src_allocator_blocks; | ||
| 1029 | R_TRY(this->CheckMemoryState( | ||
| 1030 | std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), | ||
| 1031 | src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, | ||
| 1032 | KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead, | ||
| 1033 | KMemoryAttribute::All, KMemoryAttribute::Locked)); | ||
| 1034 | |||
| 1035 | // Validate that the dst address's state is valid. | ||
| 1036 | KMemoryPermission dst_perm; | ||
| 1037 | size_t num_dst_allocator_blocks; | ||
| 1038 | R_TRY(this->CheckMemoryState( | ||
| 1039 | nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), | ||
| 1040 | dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, | ||
| 1041 | KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 1042 | |||
| 1043 | // Create an update allocator for the source. | ||
| 1044 | Result src_allocator_result; | ||
| 1045 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 1046 | m_memory_block_slab_manager, | ||
| 1047 | num_src_allocator_blocks); | ||
| 1048 | R_TRY(src_allocator_result); | ||
| 1049 | |||
| 1050 | // Create an update allocator for the destination. | ||
| 1051 | Result dst_allocator_result; | ||
| 1052 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 1053 | m_memory_block_slab_manager, | ||
| 1054 | num_dst_allocator_blocks); | ||
| 1055 | R_TRY(dst_allocator_result); | ||
| 1056 | |||
| 1057 | // Unmap the memory. | ||
| 1058 | { | ||
| 1059 | // Determine the number of pages being operated on. | ||
| 1060 | const size_t num_pages = size / PageSize; | ||
| 1061 | |||
| 1062 | // Create page groups for the memory being unmapped. | ||
| 1063 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 1064 | |||
| 1065 | // Create the page group representing the destination. | ||
| 1066 | R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); | ||
| 1067 | |||
| 1068 | // Ensure the page group is the valid for the source. | ||
| 1069 | R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion); | ||
| 1070 | |||
| 1071 | // We're going to perform an update, so create a helper. | ||
| 1072 | KScopedPageTableUpdater updater(this); | ||
| 1073 | |||
| 1074 | // Unmap the aliased copy of the pages. | ||
| 1075 | const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false, | ||
| 1076 | DisableMergeAttribute::None}; | ||
| 1077 | R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, | ||
| 1078 | dst_unmap_properties, OperationType::Unmap, false)); | ||
| 1079 | |||
| 1080 | // Ensure that we re-map the aliased pages on failure. | ||
| 1081 | ON_RESULT_FAILURE { | ||
| 1082 | this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg); | ||
| 1083 | }; | ||
| 1084 | |||
| 1085 | // Try to set the permissions for the source pages back to what they should be. | ||
| 1086 | const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 1087 | DisableMergeAttribute::EnableAndMergeHeadBodyTail}; | ||
| 1088 | R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties, | ||
| 1089 | OperationType::ChangePermissions, false)); | ||
| 1090 | |||
| 1091 | // Apply the memory block updates. | ||
| 1092 | m_memory_block_manager.Update( | ||
| 1093 | std::addressof(src_allocator), src_address, num_pages, src_state, | ||
| 1094 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 1095 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); | ||
| 1096 | m_memory_block_manager.Update( | ||
| 1097 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, | ||
| 1098 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1099 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | R_SUCCEED(); | ||
| 1103 | } | ||
| 1104 | |||
| 1105 | Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 1106 | size_t size) { | ||
| 1107 | // Validate the mapping request. | ||
| 1108 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), | ||
| 1109 | ResultInvalidMemoryRegion); | ||
| 1110 | |||
| 1111 | // Lock the table. | ||
| 1112 | KScopedLightLock lk(m_general_lock); | ||
| 1113 | |||
| 1114 | // Verify that the source memory is normal heap. | ||
| 1115 | KMemoryState src_state; | ||
| 1116 | KMemoryPermission src_perm; | ||
| 1117 | size_t num_src_allocator_blocks; | ||
| 1118 | R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr, | ||
| 1119 | std::addressof(num_src_allocator_blocks), src_address, size, | ||
| 1120 | KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, | ||
| 1121 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | ||
| 1122 | KMemoryAttribute::None)); | ||
| 1123 | |||
| 1124 | // Verify that the destination memory is unmapped. | ||
| 1125 | size_t num_dst_allocator_blocks; | ||
| 1126 | R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, | ||
| 1127 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 1128 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1129 | KMemoryAttribute::None)); | ||
| 1130 | |||
| 1131 | // Create an update allocator for the source. | ||
| 1132 | Result src_allocator_result; | ||
| 1133 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 1134 | m_memory_block_slab_manager, | ||
| 1135 | num_src_allocator_blocks); | ||
| 1136 | R_TRY(src_allocator_result); | ||
| 1137 | |||
| 1138 | // Create an update allocator for the destination. | ||
| 1139 | Result dst_allocator_result; | ||
| 1140 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 1141 | m_memory_block_slab_manager, | ||
| 1142 | num_dst_allocator_blocks); | ||
| 1143 | R_TRY(dst_allocator_result); | ||
| 1144 | |||
| 1145 | // Map the code memory. | ||
| 1146 | { | ||
| 1147 | // Determine the number of pages being operated on. | ||
| 1148 | const size_t num_pages = size / PageSize; | ||
| 1149 | |||
| 1150 | // Create page groups for the memory being unmapped. | ||
| 1151 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 1152 | |||
| 1153 | // Create the page group representing the source. | ||
| 1154 | R_TRY(this->MakePageGroup(pg, src_address, num_pages)); | ||
| 1155 | |||
| 1156 | // We're going to perform an update, so create a helper. | ||
| 1157 | KScopedPageTableUpdater updater(this); | ||
| 1158 | |||
| 1159 | // Reprotect the source as kernel-read/not mapped. | ||
| 1160 | const KMemoryPermission new_perm = static_cast<KMemoryPermission>( | ||
| 1161 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | ||
| 1162 | const KPageProperties src_properties = {new_perm, false, false, | ||
| 1163 | DisableMergeAttribute::DisableHeadBodyTail}; | ||
| 1164 | R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties, | ||
| 1165 | OperationType::ChangePermissions, false)); | ||
| 1166 | |||
| 1167 | // Ensure that we unprotect the source pages on failure. | ||
| 1168 | ON_RESULT_FAILURE { | ||
| 1169 | const KPageProperties unprotect_properties = { | ||
| 1170 | src_perm, false, false, DisableMergeAttribute::EnableHeadBodyTail}; | ||
| 1171 | R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, | ||
| 1172 | unprotect_properties, OperationType::ChangePermissions, true)); | ||
| 1173 | }; | ||
| 1174 | |||
| 1175 | // Map the alias pages. | ||
| 1176 | const KPageProperties dst_properties = {new_perm, false, false, | ||
| 1177 | DisableMergeAttribute::DisableHead}; | ||
| 1178 | R_TRY( | ||
| 1179 | this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false)); | ||
| 1180 | |||
| 1181 | // Apply the memory block updates. | ||
| 1182 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, | ||
| 1183 | src_state, new_perm, KMemoryAttribute::Locked, | ||
| 1184 | KMemoryBlockDisableMergeAttribute::Locked, | ||
| 1185 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1186 | m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | ||
| 1187 | KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, | ||
| 1188 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1189 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | R_SUCCEED(); | ||
| 1193 | } | ||
| 1194 | |||
| 1195 | Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 1196 | size_t size) { | ||
| 1197 | // Validate the mapping request. | ||
| 1198 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), | ||
| 1199 | ResultInvalidMemoryRegion); | ||
| 1200 | |||
| 1201 | // Lock the table. | ||
| 1202 | KScopedLightLock lk(m_general_lock); | ||
| 1203 | |||
| 1204 | // Verify that the source memory is locked normal heap. | ||
| 1205 | size_t num_src_allocator_blocks; | ||
| 1206 | R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, | ||
| 1207 | KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, | ||
| 1208 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 1209 | KMemoryAttribute::Locked)); | ||
| 1210 | |||
| 1211 | // Verify that the destination memory is aliasable code. | ||
| 1212 | size_t num_dst_allocator_blocks; | ||
| 1213 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 1214 | std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, | ||
| 1215 | KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, | ||
| 1216 | KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None)); | ||
| 1217 | |||
| 1218 | // Determine whether any pages being unmapped are code. | ||
| 1219 | bool any_code_pages = false; | ||
| 1220 | { | ||
| 1221 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); | ||
| 1222 | while (true) { | ||
| 1223 | // Get the memory info. | ||
| 1224 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1225 | |||
| 1226 | // Check if the memory has code flag. | ||
| 1227 | if (True(info.GetState() & KMemoryState::FlagCode)) { | ||
| 1228 | any_code_pages = true; | ||
| 1229 | break; | ||
| 1230 | } | ||
| 1231 | |||
| 1232 | // Check if we're done. | ||
| 1233 | if (dst_address + size - 1 <= info.GetLastAddress()) { | ||
| 1234 | break; | ||
| 1235 | } | ||
| 1236 | |||
| 1237 | // Advance. | ||
| 1238 | ++it; | ||
| 1239 | } | ||
| 1240 | } | ||
| 1241 | |||
| 1242 | // Ensure that we maintain the instruction cache. | ||
| 1243 | bool reprotected_pages = false; | ||
| 1244 | SCOPE_EXIT({ | ||
| 1245 | if (reprotected_pages && any_code_pages) { | ||
| 1246 | InvalidateInstructionCache(m_system, dst_address, size); | ||
| 1247 | } | ||
| 1248 | }); | ||
| 1249 | |||
| 1250 | // Unmap. | ||
| 1251 | { | ||
| 1252 | // Determine the number of pages being operated on. | ||
| 1253 | const size_t num_pages = size / PageSize; | ||
| 1254 | |||
| 1255 | // Create page groups for the memory being unmapped. | ||
| 1256 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 1257 | |||
| 1258 | // Create the page group representing the destination. | ||
| 1259 | R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); | ||
| 1260 | |||
| 1261 | // Verify that the page group contains the same pages as the source. | ||
| 1262 | R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion); | ||
| 1263 | |||
| 1264 | // Create an update allocator for the source. | ||
| 1265 | Result src_allocator_result; | ||
| 1266 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 1267 | m_memory_block_slab_manager, | ||
| 1268 | num_src_allocator_blocks); | ||
| 1269 | R_TRY(src_allocator_result); | ||
| 1270 | |||
| 1271 | // Create an update allocator for the destination. | ||
| 1272 | Result dst_allocator_result; | ||
| 1273 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 1274 | m_memory_block_slab_manager, | ||
| 1275 | num_dst_allocator_blocks); | ||
| 1276 | R_TRY(dst_allocator_result); | ||
| 1277 | |||
| 1278 | // We're going to perform an update, so create a helper. | ||
| 1279 | KScopedPageTableUpdater updater(this); | ||
| 1280 | |||
| 1281 | // Unmap the aliased copy of the pages. | ||
| 1282 | const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false, | ||
| 1283 | DisableMergeAttribute::None}; | ||
| 1284 | R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, | ||
| 1285 | dst_unmap_properties, OperationType::Unmap, false)); | ||
| 1286 | |||
| 1287 | // Ensure that we re-map the aliased pages on failure. | ||
| 1288 | ON_RESULT_FAILURE { | ||
| 1289 | this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg); | ||
| 1290 | }; | ||
| 1291 | |||
| 1292 | // Try to set the permissions for the source pages back to what they should be. | ||
| 1293 | const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 1294 | DisableMergeAttribute::EnableAndMergeHeadBodyTail}; | ||
| 1295 | R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties, | ||
| 1296 | OperationType::ChangePermissions, false)); | ||
| 1297 | |||
| 1298 | // Apply the memory block updates. | ||
| 1299 | m_memory_block_manager.Update( | ||
| 1300 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, | ||
| 1301 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1302 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1303 | m_memory_block_manager.Update( | ||
| 1304 | std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal, | ||
| 1305 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 1306 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); | ||
| 1307 | |||
| 1308 | // Note that we reprotected pages. | ||
| 1309 | reprotected_pages = true; | ||
| 1310 | } | ||
| 1311 | |||
| 1312 | R_SUCCEED(); | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) { | ||
| 1316 | // Get the insecure memory resource limit and pool. | ||
| 1317 | auto* const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(m_kernel); | ||
| 1318 | const auto insecure_pool = | ||
| 1319 | static_cast<KMemoryManager::Pool>(KSystemControl::GetInsecureMemoryPool()); | ||
| 1320 | |||
| 1321 | // Reserve the insecure memory. | ||
| 1322 | // NOTE: ResultOutOfMemory is returned here instead of the usual LimitReached. | ||
| 1323 | KScopedResourceReservation memory_reservation(insecure_resource_limit, | ||
| 1324 | Svc::LimitableResource::PhysicalMemoryMax, size); | ||
| 1325 | R_UNLESS(memory_reservation.Succeeded(), ResultOutOfMemory); | ||
| 1326 | |||
| 1327 | // Allocate pages for the insecure memory. | ||
| 1328 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 1329 | R_TRY(m_kernel.MemoryManager().AllocateAndOpen( | ||
| 1330 | std::addressof(pg), size / PageSize, | ||
| 1331 | KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction::FromFront))); | ||
| 1332 | |||
| 1333 | // Close the opened pages when we're done with them. | ||
| 1334 | // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed | ||
| 1335 | // automatically. | ||
| 1336 | SCOPE_EXIT({ pg.Close(); }); | ||
| 1337 | |||
| 1338 | // Clear all the newly allocated pages. | ||
| 1339 | for (const auto& it : pg) { | ||
| 1340 | std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), | ||
| 1341 | static_cast<u32>(m_heap_fill_value), it.GetSize()); | ||
| 1342 | } | ||
| 1343 | |||
| 1344 | // Lock the table. | ||
| 1345 | KScopedLightLock lk(m_general_lock); | ||
| 1346 | |||
| 1347 | // Validate that the address's state is valid. | ||
| 1348 | size_t num_allocator_blocks; | ||
| 1349 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 1350 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 1351 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1352 | KMemoryAttribute::None)); | ||
| 1353 | |||
| 1354 | // Create an update allocator. | ||
| 1355 | Result allocator_result; | ||
| 1356 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1357 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1358 | R_TRY(allocator_result); | ||
| 1359 | |||
| 1360 | // We're going to perform an update, so create a helper. | ||
| 1361 | KScopedPageTableUpdater updater(this); | ||
| 1362 | |||
| 1363 | // Map the pages. | ||
| 1364 | const size_t num_pages = size / PageSize; | ||
| 1365 | const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 1366 | DisableMergeAttribute::DisableHead}; | ||
| 1367 | R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties, | ||
| 1368 | OperationType::MapGroup, false)); | ||
| 1369 | |||
| 1370 | // Apply the memory block update. | ||
| 1371 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, | ||
| 1372 | KMemoryState::Insecure, KMemoryPermission::UserReadWrite, | ||
| 1373 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1374 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1375 | |||
| 1376 | // Update our mapped insecure size. | ||
| 1377 | m_mapped_insecure_memory += size; | ||
| 1378 | |||
| 1379 | // Commit the memory reservation. | ||
| 1380 | memory_reservation.Commit(); | ||
| 1381 | |||
| 1382 | // We succeeded. | ||
| 1383 | R_SUCCEED(); | ||
| 1384 | } | ||
| 1385 | |||
| 1386 | Result KPageTableBase::UnmapInsecureMemory(KProcessAddress address, size_t size) { | ||
| 1387 | // Lock the table. | ||
| 1388 | KScopedLightLock lk(m_general_lock); | ||
| 1389 | |||
| 1390 | // Check the memory state. | ||
| 1391 | size_t num_allocator_blocks; | ||
| 1392 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 1393 | KMemoryState::All, KMemoryState::Insecure, KMemoryPermission::All, | ||
| 1394 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | ||
| 1395 | KMemoryAttribute::None)); | ||
| 1396 | |||
| 1397 | // Create an update allocator. | ||
| 1398 | Result allocator_result; | ||
| 1399 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1400 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1401 | R_TRY(allocator_result); | ||
| 1402 | |||
| 1403 | // We're going to perform an update, so create a helper. | ||
| 1404 | KScopedPageTableUpdater updater(this); | ||
| 1405 | |||
| 1406 | // Unmap the memory. | ||
| 1407 | const size_t num_pages = size / PageSize; | ||
| 1408 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 1409 | DisableMergeAttribute::None}; | ||
| 1410 | R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties, | ||
| 1411 | OperationType::Unmap, false)); | ||
| 1412 | |||
| 1413 | // Apply the memory block update. | ||
| 1414 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | ||
| 1415 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1416 | KMemoryBlockDisableMergeAttribute::None, | ||
| 1417 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1418 | |||
| 1419 | // Update our mapped insecure size. | ||
| 1420 | m_mapped_insecure_memory -= size; | ||
| 1421 | |||
| 1422 | // Release the insecure memory from the insecure limit. | ||
| 1423 | if (auto* const insecure_resource_limit = | ||
| 1424 | KSystemControl::GetInsecureMemoryResourceLimit(m_kernel); | ||
| 1425 | insecure_resource_limit != nullptr) { | ||
| 1426 | insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, size); | ||
| 1427 | } | ||
| 1428 | |||
| 1429 | R_SUCCEED(); | ||
| 1430 | } | ||
| 1431 | |||
| 1432 | KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, | ||
| 1433 | size_t num_pages, size_t alignment, size_t offset, | ||
| 1434 | size_t guard_pages) const { | ||
| 1435 | KProcessAddress address = 0; | ||
| 1436 | |||
| 1437 | if (num_pages <= region_num_pages) { | ||
| 1438 | if (this->IsAslrEnabled()) { | ||
| 1439 | // Try to directly find a free area up to 8 times. | ||
| 1440 | for (size_t i = 0; i < 8; i++) { | ||
| 1441 | const size_t random_offset = | ||
| 1442 | KSystemControl::GenerateRandomRange( | ||
| 1443 | 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * | ||
| 1444 | alignment; | ||
| 1445 | const KProcessAddress candidate = | ||
| 1446 | Common::AlignDown(GetInteger(region_start + random_offset), alignment) + offset; | ||
| 1447 | |||
| 1448 | KMemoryInfo info; | ||
| 1449 | Svc::PageInfo page_info; | ||
| 1450 | R_ASSERT(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), | ||
| 1451 | candidate)); | ||
| 1452 | |||
| 1453 | if (info.m_state != KMemoryState::Free) { | ||
| 1454 | continue; | ||
| 1455 | } | ||
| 1456 | if (!(region_start <= candidate)) { | ||
| 1457 | continue; | ||
| 1458 | } | ||
| 1459 | if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { | ||
| 1460 | continue; | ||
| 1461 | } | ||
| 1462 | if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= | ||
| 1463 | info.GetLastAddress())) { | ||
| 1464 | continue; | ||
| 1465 | } | ||
| 1466 | if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= | ||
| 1467 | region_start + region_num_pages * PageSize - 1)) { | ||
| 1468 | continue; | ||
| 1469 | } | ||
| 1470 | |||
| 1471 | address = candidate; | ||
| 1472 | break; | ||
| 1473 | } | ||
| 1474 | // Fall back to finding the first free area with a random offset. | ||
| 1475 | if (address == 0) { | ||
| 1476 | // NOTE: Nintendo does not account for guard pages here. | ||
| 1477 | // This may theoretically cause an offset to be chosen that cannot be mapped. | ||
| 1478 | // We will account for guard pages. | ||
| 1479 | const size_t offset_pages = KSystemControl::GenerateRandomRange( | ||
| 1480 | 0, region_num_pages - num_pages - guard_pages); | ||
| 1481 | address = m_memory_block_manager.FindFreeArea( | ||
| 1482 | region_start + offset_pages * PageSize, region_num_pages - offset_pages, | ||
| 1483 | num_pages, alignment, offset, guard_pages); | ||
| 1484 | } | ||
| 1485 | } | ||
| 1486 | // Find the first free area. | ||
| 1487 | if (address == 0) { | ||
| 1488 | address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, | ||
| 1489 | alignment, offset, guard_pages); | ||
| 1490 | } | ||
| 1491 | } | ||
| 1492 | |||
| 1493 | return address; | ||
| 1494 | } | ||
| 1495 | |||
| 1496 | size_t KPageTableBase::GetSize(KMemoryState state) const { | ||
| 1497 | // Lock the table. | ||
| 1498 | KScopedLightLock lk(m_general_lock); | ||
| 1499 | |||
| 1500 | // Iterate, counting blocks with the desired state. | ||
| 1501 | size_t total_size = 0; | ||
| 1502 | for (KMemoryBlockManager::const_iterator it = | ||
| 1503 | m_memory_block_manager.FindIterator(m_address_space_start); | ||
| 1504 | it != m_memory_block_manager.end(); ++it) { | ||
| 1505 | // Get the memory info. | ||
| 1506 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1507 | if (info.GetState() == state) { | ||
| 1508 | total_size += info.GetSize(); | ||
| 1509 | } | ||
| 1510 | } | ||
| 1511 | |||
| 1512 | return total_size; | ||
| 1513 | } | ||
| 1514 | |||
| 1515 | size_t KPageTableBase::GetCodeSize() const { | ||
| 1516 | return this->GetSize(KMemoryState::Code); | ||
| 1517 | } | ||
| 1518 | |||
| 1519 | size_t KPageTableBase::GetCodeDataSize() const { | ||
| 1520 | return this->GetSize(KMemoryState::CodeData); | ||
| 1521 | } | ||
| 1522 | |||
| 1523 | size_t KPageTableBase::GetAliasCodeSize() const { | ||
| 1524 | return this->GetSize(KMemoryState::AliasCode); | ||
| 1525 | } | ||
| 1526 | |||
| 1527 | size_t KPageTableBase::GetAliasCodeDataSize() const { | ||
| 1528 | return this->GetSize(KMemoryState::AliasCodeData); | ||
| 1529 | } | ||
| 1530 | |||
| 1531 | Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 1532 | size_t num_pages, KMemoryPermission perm) { | ||
| 1533 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1534 | |||
| 1535 | // Create a page group to hold the pages we allocate. | ||
| 1536 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 1537 | |||
| 1538 | // Allocate the pages. | ||
| 1539 | R_TRY( | ||
| 1540 | m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option)); | ||
| 1541 | |||
| 1542 | // Ensure that the page group is closed when we're done working with it. | ||
| 1543 | SCOPE_EXIT({ pg.Close(); }); | ||
| 1544 | |||
| 1545 | // Clear all pages. | ||
| 1546 | for (const auto& it : pg) { | ||
| 1547 | std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), | ||
| 1548 | static_cast<u32>(m_heap_fill_value), it.GetSize()); | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | // Map the pages. | ||
| 1552 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::None}; | ||
| 1553 | R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType::MapGroup, | ||
| 1554 | false)); | ||
| 1555 | } | ||
| 1556 | |||
| 1557 | Result KPageTableBase::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 1558 | const KPageGroup& pg, const KPageProperties properties, | ||
| 1559 | bool reuse_ll) { | ||
| 1560 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1561 | |||
| 1562 | // Note the current address, so that we can iterate. | ||
| 1563 | const KProcessAddress start_address = address; | ||
| 1564 | KProcessAddress cur_address = address; | ||
| 1565 | |||
| 1566 | // Ensure that we clean up on failure. | ||
| 1567 | ON_RESULT_FAILURE { | ||
| 1568 | ASSERT(!reuse_ll); | ||
| 1569 | if (cur_address != start_address) { | ||
| 1570 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 1571 | DisableMergeAttribute::None}; | ||
| 1572 | R_ASSERT(this->Operate(page_list, start_address, | ||
| 1573 | (cur_address - start_address) / PageSize, 0, false, | ||
| 1574 | unmap_properties, OperationType::Unmap, true)); | ||
| 1575 | } | ||
| 1576 | }; | ||
| 1577 | |||
| 1578 | // Iterate, mapping all pages in the group. | ||
| 1579 | for (const auto& block : pg) { | ||
| 1580 | // Map and advance. | ||
| 1581 | const KPageProperties cur_properties = | ||
| 1582 | (cur_address == start_address) | ||
| 1583 | ? properties | ||
| 1584 | : KPageProperties{properties.perm, properties.io, properties.uncached, | ||
| 1585 | DisableMergeAttribute::None}; | ||
| 1586 | R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), block.GetAddress(), true, | ||
| 1587 | cur_properties, OperationType::Map, reuse_ll)); | ||
| 1588 | cur_address += block.GetSize(); | ||
| 1589 | } | ||
| 1590 | |||
| 1591 | // We succeeded! | ||
| 1592 | R_SUCCEED(); | ||
| 1593 | } | ||
| 1594 | |||
| 1595 | void KPageTableBase::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, | ||
| 1596 | const KPageGroup& pg) { | ||
| 1597 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1598 | |||
| 1599 | // Note the current address, so that we can iterate. | ||
| 1600 | const KProcessAddress start_address = address; | ||
| 1601 | const KProcessAddress last_address = start_address + size - 1; | ||
| 1602 | const KProcessAddress end_address = last_address + 1; | ||
| 1603 | |||
| 1604 | // Iterate over the memory. | ||
| 1605 | auto pg_it = pg.begin(); | ||
| 1606 | ASSERT(pg_it != pg.end()); | ||
| 1607 | |||
| 1608 | KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); | ||
| 1609 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 1610 | |||
| 1611 | auto it = m_memory_block_manager.FindIterator(start_address); | ||
| 1612 | while (true) { | ||
| 1613 | // Check that the iterator is valid. | ||
| 1614 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1615 | |||
| 1616 | // Get the memory info. | ||
| 1617 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1618 | |||
| 1619 | // Determine the range to map. | ||
| 1620 | KProcessAddress map_address = std::max(info.GetAddress(), GetInteger(start_address)); | ||
| 1621 | const KProcessAddress map_end_address = | ||
| 1622 | std::min(info.GetEndAddress(), GetInteger(end_address)); | ||
| 1623 | ASSERT(map_end_address != map_address); | ||
| 1624 | |||
| 1625 | // Determine if we should disable head merge. | ||
| 1626 | const bool disable_head_merge = | ||
| 1627 | info.GetAddress() >= GetInteger(start_address) && | ||
| 1628 | True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1629 | const KPageProperties map_properties = { | ||
| 1630 | info.GetPermission(), false, false, | ||
| 1631 | disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None}; | ||
| 1632 | |||
| 1633 | // While we have pages to map, map them. | ||
| 1634 | size_t map_pages = (map_end_address - map_address) / PageSize; | ||
| 1635 | while (map_pages > 0) { | ||
| 1636 | // Check if we're at the end of the physical block. | ||
| 1637 | if (pg_pages == 0) { | ||
| 1638 | // Ensure there are more pages to map. | ||
| 1639 | ASSERT(pg_it != pg.end()); | ||
| 1640 | |||
| 1641 | // Advance our physical block. | ||
| 1642 | ++pg_it; | ||
| 1643 | pg_phys_addr = pg_it->GetAddress(); | ||
| 1644 | pg_pages = pg_it->GetNumPages(); | ||
| 1645 | } | ||
| 1646 | |||
| 1647 | // Map whatever we can. | ||
| 1648 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 1649 | R_ASSERT(this->Operate(page_list, map_address, map_pages, pg_phys_addr, true, | ||
| 1650 | map_properties, OperationType::Map, true)); | ||
| 1651 | |||
| 1652 | // Advance. | ||
| 1653 | map_address += cur_pages * PageSize; | ||
| 1654 | map_pages -= cur_pages; | ||
| 1655 | |||
| 1656 | pg_phys_addr += cur_pages * PageSize; | ||
| 1657 | pg_pages -= cur_pages; | ||
| 1658 | } | ||
| 1659 | |||
| 1660 | // Check if we're done. | ||
| 1661 | if (last_address <= info.GetLastAddress()) { | ||
| 1662 | break; | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | // Advance. | ||
| 1666 | ++it; | ||
| 1667 | } | ||
| 1668 | |||
| 1669 | // Check that we re-mapped precisely the page group. | ||
| 1670 | ASSERT((++pg_it) == pg.end()); | ||
| 1671 | } | ||
| 1672 | |||
| 1673 | Result KPageTableBase::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) { | ||
| 1674 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1675 | |||
| 1676 | const size_t size = num_pages * PageSize; | ||
| 1677 | |||
| 1678 | // We're making a new group, not adding to an existing one. | ||
| 1679 | R_UNLESS(pg.empty(), ResultInvalidCurrentMemory); | ||
| 1680 | |||
| 1681 | auto& impl = this->GetImpl(); | ||
| 1682 | |||
| 1683 | // Begin traversal. | ||
| 1684 | TraversalContext context; | ||
| 1685 | TraversalEntry next_entry; | ||
| 1686 | R_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr), | ||
| 1687 | ResultInvalidCurrentMemory); | ||
| 1688 | |||
| 1689 | // Prepare tracking variables. | ||
| 1690 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 1691 | size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 1692 | size_t tot_size = cur_size; | ||
| 1693 | |||
| 1694 | // Iterate, adding to group as we go. | ||
| 1695 | while (tot_size < size) { | ||
| 1696 | R_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)), | ||
| 1697 | ResultInvalidCurrentMemory); | ||
| 1698 | |||
| 1699 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 1700 | const size_t cur_pages = cur_size / PageSize; | ||
| 1701 | |||
| 1702 | R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 1703 | R_TRY(pg.AddBlock(cur_addr, cur_pages)); | ||
| 1704 | |||
| 1705 | cur_addr = next_entry.phys_addr; | ||
| 1706 | cur_size = next_entry.block_size; | ||
| 1707 | } else { | ||
| 1708 | cur_size += next_entry.block_size; | ||
| 1709 | } | ||
| 1710 | |||
| 1711 | tot_size += next_entry.block_size; | ||
| 1712 | } | ||
| 1713 | |||
| 1714 | // Ensure we add the right amount for the last block. | ||
| 1715 | if (tot_size > size) { | ||
| 1716 | cur_size -= (tot_size - size); | ||
| 1717 | } | ||
| 1718 | |||
| 1719 | // add the last block. | ||
| 1720 | const size_t cur_pages = cur_size / PageSize; | ||
| 1721 | R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 1722 | R_TRY(pg.AddBlock(cur_addr, cur_pages)); | ||
| 1723 | |||
| 1724 | R_SUCCEED(); | ||
| 1725 | } | ||
| 1726 | |||
| 1727 | bool KPageTableBase::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, | ||
| 1728 | size_t num_pages) { | ||
| 1729 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1730 | |||
| 1731 | const size_t size = num_pages * PageSize; | ||
| 1732 | |||
| 1733 | // Empty groups are necessarily invalid. | ||
| 1734 | if (pg.empty()) { | ||
| 1735 | return false; | ||
| 1736 | } | ||
| 1737 | |||
| 1738 | auto& impl = this->GetImpl(); | ||
| 1739 | |||
| 1740 | // We're going to validate that the group we'd expect is the group we see. | ||
| 1741 | auto cur_it = pg.begin(); | ||
| 1742 | KPhysicalAddress cur_block_address = cur_it->GetAddress(); | ||
| 1743 | size_t cur_block_pages = cur_it->GetNumPages(); | ||
| 1744 | |||
| 1745 | auto UpdateCurrentIterator = [&]() { | ||
| 1746 | if (cur_block_pages == 0) { | ||
| 1747 | if ((++cur_it) == pg.end()) { | ||
| 1748 | return false; | ||
| 1749 | } | ||
| 1750 | |||
| 1751 | cur_block_address = cur_it->GetAddress(); | ||
| 1752 | cur_block_pages = cur_it->GetNumPages(); | ||
| 1753 | } | ||
| 1754 | return true; | ||
| 1755 | }; | ||
| 1756 | |||
| 1757 | // Begin traversal. | ||
| 1758 | TraversalContext context; | ||
| 1759 | TraversalEntry next_entry; | ||
| 1760 | if (!impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr)) { | ||
| 1761 | return false; | ||
| 1762 | } | ||
| 1763 | |||
| 1764 | // Prepare tracking variables. | ||
| 1765 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 1766 | size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 1767 | size_t tot_size = cur_size; | ||
| 1768 | |||
| 1769 | // Iterate, comparing expected to actual. | ||
| 1770 | while (tot_size < size) { | ||
| 1771 | if (!impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))) { | ||
| 1772 | return false; | ||
| 1773 | } | ||
| 1774 | |||
| 1775 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 1776 | const size_t cur_pages = cur_size / PageSize; | ||
| 1777 | |||
| 1778 | if (!IsHeapPhysicalAddress(cur_addr)) { | ||
| 1779 | return false; | ||
| 1780 | } | ||
| 1781 | |||
| 1782 | if (!UpdateCurrentIterator()) { | ||
| 1783 | return false; | ||
| 1784 | } | ||
| 1785 | |||
| 1786 | if (cur_block_address != cur_addr || cur_block_pages < cur_pages) { | ||
| 1787 | return false; | ||
| 1788 | } | ||
| 1789 | |||
| 1790 | cur_block_address += cur_size; | ||
| 1791 | cur_block_pages -= cur_pages; | ||
| 1792 | cur_addr = next_entry.phys_addr; | ||
| 1793 | cur_size = next_entry.block_size; | ||
| 1794 | } else { | ||
| 1795 | cur_size += next_entry.block_size; | ||
| 1796 | } | ||
| 1797 | |||
| 1798 | tot_size += next_entry.block_size; | ||
| 1799 | } | ||
| 1800 | |||
| 1801 | // Ensure we compare the right amount for the last block. | ||
| 1802 | if (tot_size > size) { | ||
| 1803 | cur_size -= (tot_size - size); | ||
| 1804 | } | ||
| 1805 | |||
| 1806 | if (!IsHeapPhysicalAddress(cur_addr)) { | ||
| 1807 | return false; | ||
| 1808 | } | ||
| 1809 | |||
| 1810 | if (!UpdateCurrentIterator()) { | ||
| 1811 | return false; | ||
| 1812 | } | ||
| 1813 | |||
| 1814 | return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); | ||
| 1815 | } | ||
| 1816 | |||
| 1817 | Result KPageTableBase::GetContiguousMemoryRangeWithState( | ||
| 1818 | MemoryRange* out, KProcessAddress address, size_t size, KMemoryState state_mask, | ||
| 1819 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 1820 | KMemoryAttribute attr_mask, KMemoryAttribute attr) { | ||
| 1821 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1822 | |||
| 1823 | auto& impl = this->GetImpl(); | ||
| 1824 | |||
| 1825 | // Begin a traversal. | ||
| 1826 | TraversalContext context; | ||
| 1827 | TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; | ||
| 1828 | R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address), | ||
| 1829 | ResultInvalidCurrentMemory); | ||
| 1830 | |||
| 1831 | // Traverse until we have enough size or we aren't contiguous any more. | ||
| 1832 | const KPhysicalAddress phys_address = cur_entry.phys_addr; | ||
| 1833 | size_t contig_size; | ||
| 1834 | for (contig_size = | ||
| 1835 | cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1)); | ||
| 1836 | contig_size < size; contig_size += cur_entry.block_size) { | ||
| 1837 | if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) { | ||
| 1838 | break; | ||
| 1839 | } | ||
| 1840 | if (cur_entry.phys_addr != phys_address + contig_size) { | ||
| 1841 | break; | ||
| 1842 | } | ||
| 1843 | } | ||
| 1844 | |||
| 1845 | // Take the minimum size for our region. | ||
| 1846 | size = std::min(size, contig_size); | ||
| 1847 | |||
| 1848 | // Check that the memory is contiguous (modulo the reference count bit). | ||
| 1849 | const KMemoryState test_state_mask = state_mask | KMemoryState::FlagReferenceCounted; | ||
| 1850 | const bool is_heap = R_SUCCEEDED(this->CheckMemoryStateContiguous( | ||
| 1851 | address, size, test_state_mask, state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 1852 | attr_mask, attr)); | ||
| 1853 | if (!is_heap) { | ||
| 1854 | R_TRY(this->CheckMemoryStateContiguous(address, size, test_state_mask, state, perm_mask, | ||
| 1855 | perm, attr_mask, attr)); | ||
| 1856 | } | ||
| 1857 | |||
| 1858 | // The memory is contiguous, so set the output range. | ||
| 1859 | out->Set(phys_address, size, is_heap); | ||
| 1860 | R_SUCCEED(); | ||
| 1861 | } | ||
| 1862 | |||
| 1863 | Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, | ||
| 1864 | Svc::MemoryPermission svc_perm) { | ||
| 1865 | const size_t num_pages = size / PageSize; | ||
| 1866 | |||
| 1867 | // Lock the table. | ||
| 1868 | KScopedLightLock lk(m_general_lock); | ||
| 1869 | |||
| 1870 | // Verify we can change the memory permission. | ||
| 1871 | KMemoryState old_state; | ||
| 1872 | KMemoryPermission old_perm; | ||
| 1873 | size_t num_allocator_blocks; | ||
| 1874 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, | ||
| 1875 | std::addressof(num_allocator_blocks), addr, size, | ||
| 1876 | KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, | ||
| 1877 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1878 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 1879 | |||
| 1880 | // Determine new perm. | ||
| 1881 | const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); | ||
| 1882 | R_SUCCEED_IF(old_perm == new_perm); | ||
| 1883 | |||
| 1884 | // Create an update allocator. | ||
| 1885 | Result allocator_result; | ||
| 1886 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1887 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1888 | R_TRY(allocator_result); | ||
| 1889 | |||
| 1890 | // We're going to perform an update, so create a helper. | ||
| 1891 | KScopedPageTableUpdater updater(this); | ||
| 1892 | |||
| 1893 | // Perform mapping operation. | ||
| 1894 | const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None}; | ||
| 1895 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, | ||
| 1896 | OperationType::ChangePermissions, false)); | ||
| 1897 | |||
| 1898 | // Update the blocks. | ||
| 1899 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | ||
| 1900 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1901 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1902 | |||
| 1903 | R_SUCCEED(); | ||
| 1904 | } | ||
| 1905 | |||
| 1906 | Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size, | ||
| 1907 | Svc::MemoryPermission svc_perm) { | ||
| 1908 | const size_t num_pages = size / PageSize; | ||
| 1909 | |||
| 1910 | // Lock the table. | ||
| 1911 | KScopedLightLock lk(m_general_lock); | ||
| 1912 | |||
| 1913 | // Verify we can change the memory permission. | ||
| 1914 | KMemoryState old_state; | ||
| 1915 | KMemoryPermission old_perm; | ||
| 1916 | size_t num_allocator_blocks; | ||
| 1917 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, | ||
| 1918 | std::addressof(num_allocator_blocks), addr, size, | ||
| 1919 | KMemoryState::FlagCode, KMemoryState::FlagCode, | ||
| 1920 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1921 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 1922 | |||
| 1923 | // Make a new page group for the region. | ||
| 1924 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 1925 | |||
| 1926 | // Determine new perm/state. | ||
| 1927 | const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); | ||
| 1928 | KMemoryState new_state = old_state; | ||
| 1929 | const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite; | ||
| 1930 | const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute; | ||
| 1931 | const bool was_x = | ||
| 1932 | (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute; | ||
| 1933 | ASSERT(!(is_w && is_x)); | ||
| 1934 | |||
| 1935 | if (is_w) { | ||
| 1936 | switch (old_state) { | ||
| 1937 | case KMemoryState::Code: | ||
| 1938 | new_state = KMemoryState::CodeData; | ||
| 1939 | break; | ||
| 1940 | case KMemoryState::AliasCode: | ||
| 1941 | new_state = KMemoryState::AliasCodeData; | ||
| 1942 | break; | ||
| 1943 | default: | ||
| 1944 | UNREACHABLE(); | ||
| 1945 | } | ||
| 1946 | } | ||
| 1947 | |||
| 1948 | // Create a page group, if we're setting execute permissions. | ||
| 1949 | if (is_x) { | ||
| 1950 | R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages)); | ||
| 1951 | } | ||
| 1952 | |||
| 1953 | // Succeed if there's nothing to do. | ||
| 1954 | R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); | ||
| 1955 | |||
| 1956 | // Create an update allocator. | ||
| 1957 | Result allocator_result; | ||
| 1958 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1959 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1960 | R_TRY(allocator_result); | ||
| 1961 | |||
| 1962 | // We're going to perform an update, so create a helper. | ||
| 1963 | KScopedPageTableUpdater updater(this); | ||
| 1964 | |||
| 1965 | // Perform mapping operation. | ||
| 1966 | const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None}; | ||
| 1967 | const auto operation = was_x ? OperationType::ChangePermissionsAndRefreshAndFlush | ||
| 1968 | : OperationType::ChangePermissions; | ||
| 1969 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, operation, | ||
| 1970 | false)); | ||
| 1971 | |||
| 1972 | // Update the blocks. | ||
| 1973 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, | ||
| 1974 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1975 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1976 | |||
| 1977 | // Ensure cache coherency, if we're setting pages as executable. | ||
| 1978 | if (is_x) { | ||
| 1979 | for (const auto& block : pg) { | ||
| 1980 | StoreDataCache(GetHeapVirtualPointer(m_kernel, block.GetAddress()), block.GetSize()); | ||
| 1981 | } | ||
| 1982 | InvalidateInstructionCache(m_system, addr, size); | ||
| 1983 | } | ||
| 1984 | |||
| 1985 | R_SUCCEED(); | ||
| 1986 | } | ||
| 1987 | |||
| 1988 | Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask, | ||
| 1989 | KMemoryAttribute attr) { | ||
| 1990 | const size_t num_pages = size / PageSize; | ||
| 1991 | ASSERT((mask | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask); | ||
| 1992 | |||
| 1993 | // Lock the table. | ||
| 1994 | KScopedLightLock lk(m_general_lock); | ||
| 1995 | |||
| 1996 | // Verify we can change the memory attribute. | ||
| 1997 | KMemoryState old_state; | ||
| 1998 | KMemoryPermission old_perm; | ||
| 1999 | KMemoryAttribute old_attr; | ||
| 2000 | size_t num_allocator_blocks; | ||
| 2001 | constexpr KMemoryAttribute AttributeTestMask = | ||
| 2002 | ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared); | ||
| 2003 | const KMemoryState state_test_mask = | ||
| 2004 | (True(mask & KMemoryAttribute::Uncached) ? KMemoryState::FlagCanChangeAttribute | ||
| 2005 | : KMemoryState::None) | | ||
| 2006 | (True(mask & KMemoryAttribute::PermissionLocked) ? KMemoryState::FlagCanPermissionLock | ||
| 2007 | : KMemoryState::None); | ||
| 2008 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 2009 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 2010 | addr, size, state_test_mask, state_test_mask, | ||
| 2011 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2012 | AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); | ||
| 2013 | |||
| 2014 | // Create an update allocator. | ||
| 2015 | Result allocator_result; | ||
| 2016 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2017 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2018 | R_TRY(allocator_result); | ||
| 2019 | |||
| 2020 | // We're going to perform an update, so create a helper. | ||
| 2021 | KScopedPageTableUpdater updater(this); | ||
| 2022 | |||
| 2023 | // If we need to, perform a change attribute operation. | ||
| 2024 | if (True(mask & KMemoryAttribute::Uncached)) { | ||
| 2025 | // Determine the new attribute. | ||
| 2026 | const KMemoryAttribute new_attr = | ||
| 2027 | static_cast<KMemoryAttribute>(((old_attr & ~mask) | (attr & mask))); | ||
| 2028 | |||
| 2029 | // Perform operation. | ||
| 2030 | const KPageProperties properties = {old_perm, false, | ||
| 2031 | True(new_attr & KMemoryAttribute::Uncached), | ||
| 2032 | DisableMergeAttribute::None}; | ||
| 2033 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, | ||
| 2034 | OperationType::ChangePermissionsAndRefreshAndFlush, false)); | ||
| 2035 | } | ||
| 2036 | |||
| 2037 | // Update the blocks. | ||
| 2038 | m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, mask, attr); | ||
| 2039 | |||
| 2040 | R_SUCCEED(); | ||
| 2041 | } | ||
| 2042 | |||
| 2043 | Result KPageTableBase::SetHeapSize(KProcessAddress* out, size_t size) { | ||
| 2044 | // Lock the physical memory mutex. | ||
| 2045 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); | ||
| 2046 | |||
| 2047 | // Try to perform a reduction in heap, instead of an extension. | ||
| 2048 | KProcessAddress cur_address; | ||
| 2049 | size_t allocation_size; | ||
| 2050 | { | ||
| 2051 | // Lock the table. | ||
| 2052 | KScopedLightLock lk(m_general_lock); | ||
| 2053 | |||
| 2054 | // Validate that setting heap size is possible at all. | ||
| 2055 | R_UNLESS(!m_is_kernel, ResultOutOfMemory); | ||
| 2056 | R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start), | ||
| 2057 | ResultOutOfMemory); | ||
| 2058 | R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory); | ||
| 2059 | |||
| 2060 | if (size < static_cast<size_t>(m_current_heap_end - m_heap_region_start)) { | ||
| 2061 | // The size being requested is less than the current size, so we need to free the end of | ||
| 2062 | // the heap. | ||
| 2063 | |||
| 2064 | // Validate memory state. | ||
| 2065 | size_t num_allocator_blocks; | ||
| 2066 | R_TRY(this->CheckMemoryState( | ||
| 2067 | std::addressof(num_allocator_blocks), m_heap_region_start + size, | ||
| 2068 | (m_current_heap_end - m_heap_region_start) - size, KMemoryState::All, | ||
| 2069 | KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 2070 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 2071 | |||
| 2072 | // Create an update allocator. | ||
| 2073 | Result allocator_result; | ||
| 2074 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2075 | m_memory_block_slab_manager, | ||
| 2076 | num_allocator_blocks); | ||
| 2077 | R_TRY(allocator_result); | ||
| 2078 | |||
| 2079 | // We're going to perform an update, so create a helper. | ||
| 2080 | KScopedPageTableUpdater updater(this); | ||
| 2081 | |||
| 2082 | // Unmap the end of the heap. | ||
| 2083 | const size_t num_pages = ((m_current_heap_end - m_heap_region_start) - size) / PageSize; | ||
| 2084 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2085 | DisableMergeAttribute::None}; | ||
| 2086 | R_TRY(this->Operate(updater.GetPageList(), m_heap_region_start + size, num_pages, 0, | ||
| 2087 | false, unmap_properties, OperationType::Unmap, false)); | ||
| 2088 | |||
| 2089 | // Release the memory from the resource limit. | ||
| 2090 | m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, | ||
| 2091 | num_pages * PageSize); | ||
| 2092 | |||
| 2093 | // Apply the memory block update. | ||
| 2094 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, | ||
| 2095 | num_pages, KMemoryState::Free, KMemoryPermission::None, | ||
| 2096 | KMemoryAttribute::None, | ||
| 2097 | KMemoryBlockDisableMergeAttribute::None, | ||
| 2098 | size == 0 ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 2099 | : KMemoryBlockDisableMergeAttribute::None); | ||
| 2100 | |||
| 2101 | // Update the current heap end. | ||
| 2102 | m_current_heap_end = m_heap_region_start + size; | ||
| 2103 | |||
| 2104 | // Set the output. | ||
| 2105 | *out = m_heap_region_start; | ||
| 2106 | R_SUCCEED(); | ||
| 2107 | } else if (size == static_cast<size_t>(m_current_heap_end - m_heap_region_start)) { | ||
| 2108 | // The size requested is exactly the current size. | ||
| 2109 | *out = m_heap_region_start; | ||
| 2110 | R_SUCCEED(); | ||
| 2111 | } else { | ||
| 2112 | // We have to allocate memory. Determine how much to allocate and where while the table | ||
| 2113 | // is locked. | ||
| 2114 | cur_address = m_current_heap_end; | ||
| 2115 | allocation_size = size - (m_current_heap_end - m_heap_region_start); | ||
| 2116 | } | ||
| 2117 | } | ||
| 2118 | |||
| 2119 | // Reserve memory for the heap extension. | ||
| 2120 | KScopedResourceReservation memory_reservation( | ||
| 2121 | m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, allocation_size); | ||
| 2122 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 2123 | |||
| 2124 | // Allocate pages for the heap extension. | ||
| 2125 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 2126 | R_TRY(m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, | ||
| 2127 | m_allocate_option)); | ||
| 2128 | |||
| 2129 | // Close the opened pages when we're done with them. | ||
| 2130 | // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed | ||
| 2131 | // automatically. | ||
| 2132 | SCOPE_EXIT({ pg.Close(); }); | ||
| 2133 | |||
| 2134 | // Clear all the newly allocated pages. | ||
| 2135 | for (const auto& it : pg) { | ||
| 2136 | std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), m_heap_fill_value, | ||
| 2137 | it.GetSize()); | ||
| 2138 | } | ||
| 2139 | |||
| 2140 | // Map the pages. | ||
| 2141 | { | ||
| 2142 | // Lock the table. | ||
| 2143 | KScopedLightLock lk(m_general_lock); | ||
| 2144 | |||
| 2145 | // Ensure that the heap hasn't changed since we began executing. | ||
| 2146 | ASSERT(cur_address == m_current_heap_end); | ||
| 2147 | |||
| 2148 | // Check the memory state. | ||
| 2149 | size_t num_allocator_blocks; | ||
| 2150 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, | ||
| 2151 | allocation_size, KMemoryState::All, KMemoryState::Free, | ||
| 2152 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2153 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2154 | |||
| 2155 | // Create an update allocator. | ||
| 2156 | Result allocator_result; | ||
| 2157 | KMemoryBlockManagerUpdateAllocator allocator( | ||
| 2158 | std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2159 | R_TRY(allocator_result); | ||
| 2160 | |||
| 2161 | // We're going to perform an update, so create a helper. | ||
| 2162 | KScopedPageTableUpdater updater(this); | ||
| 2163 | |||
| 2164 | // Map the pages. | ||
| 2165 | const size_t num_pages = allocation_size / PageSize; | ||
| 2166 | const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 2167 | (m_current_heap_end == m_heap_region_start) | ||
| 2168 | ? DisableMergeAttribute::DisableHead | ||
| 2169 | : DisableMergeAttribute::None}; | ||
| 2170 | R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg, | ||
| 2171 | map_properties, OperationType::MapGroup, false)); | ||
| 2172 | |||
| 2173 | // We succeeded, so commit our memory reservation. | ||
| 2174 | memory_reservation.Commit(); | ||
| 2175 | |||
| 2176 | // Apply the memory block update. | ||
| 2177 | m_memory_block_manager.Update( | ||
| 2178 | std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal, | ||
| 2179 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 2180 | m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 2181 | : KMemoryBlockDisableMergeAttribute::None, | ||
| 2182 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2183 | |||
| 2184 | // Update the current heap end. | ||
| 2185 | m_current_heap_end = m_heap_region_start + size; | ||
| 2186 | |||
| 2187 | // Set the output. | ||
| 2188 | *out = m_heap_region_start; | ||
| 2189 | R_SUCCEED(); | ||
| 2190 | } | ||
| 2191 | } | ||
| 2192 | |||
| 2193 | Result KPageTableBase::SetMaxHeapSize(size_t size) { | ||
| 2194 | // Lock the table. | ||
| 2195 | KScopedLightLock lk(m_general_lock); | ||
| 2196 | |||
| 2197 | // Only process page tables are allowed to set heap size. | ||
| 2198 | ASSERT(!this->IsKernel()); | ||
| 2199 | |||
| 2200 | m_max_heap_size = size; | ||
| 2201 | |||
| 2202 | R_SUCCEED(); | ||
| 2203 | } | ||
| 2204 | |||
| 2205 | Result KPageTableBase::QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info, | ||
| 2206 | KProcessAddress addr) const { | ||
| 2207 | // If the address is invalid, create a fake block. | ||
| 2208 | if (!this->Contains(addr, 1)) { | ||
| 2209 | *out_info = { | ||
| 2210 | .m_address = GetInteger(m_address_space_end), | ||
| 2211 | .m_size = 0 - GetInteger(m_address_space_end), | ||
| 2212 | .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible), | ||
| 2213 | .m_device_disable_merge_left_count = 0, | ||
| 2214 | .m_device_disable_merge_right_count = 0, | ||
| 2215 | .m_ipc_lock_count = 0, | ||
| 2216 | .m_device_use_count = 0, | ||
| 2217 | .m_ipc_disable_merge_count = 0, | ||
| 2218 | .m_permission = KMemoryPermission::None, | ||
| 2219 | .m_attribute = KMemoryAttribute::None, | ||
| 2220 | .m_original_permission = KMemoryPermission::None, | ||
| 2221 | .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None, | ||
| 2222 | }; | ||
| 2223 | out_page_info->flags = 0; | ||
| 2224 | |||
| 2225 | R_SUCCEED(); | ||
| 2226 | } | ||
| 2227 | |||
| 2228 | // Otherwise, lock the table and query. | ||
| 2229 | KScopedLightLock lk(m_general_lock); | ||
| 2230 | R_RETURN(this->QueryInfoImpl(out_info, out_page_info, addr)); | ||
| 2231 | } | ||
| 2232 | |||
| 2233 | Result KPageTableBase::QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, | ||
| 2234 | KProcessAddress address) const { | ||
| 2235 | // Lock the table. | ||
| 2236 | KScopedLightLock lk(m_general_lock); | ||
| 2237 | |||
| 2238 | // Align the address down to page size. | ||
| 2239 | address = Common::AlignDown(GetInteger(address), PageSize); | ||
| 2240 | |||
| 2241 | // Verify that we can query the address. | ||
| 2242 | KMemoryInfo info; | ||
| 2243 | Svc::PageInfo page_info; | ||
| 2244 | R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address)); | ||
| 2245 | |||
| 2246 | // Check the memory state. | ||
| 2247 | R_TRY(this->CheckMemoryState(info, KMemoryState::FlagCanQueryPhysical, | ||
| 2248 | KMemoryState::FlagCanQueryPhysical, | ||
| 2249 | KMemoryPermission::UserReadExecute, KMemoryPermission::UserRead, | ||
| 2250 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2251 | |||
| 2252 | // Prepare to traverse. | ||
| 2253 | KPhysicalAddress phys_addr; | ||
| 2254 | size_t phys_size; | ||
| 2255 | |||
| 2256 | KProcessAddress virt_addr = info.GetAddress(); | ||
| 2257 | KProcessAddress end_addr = info.GetEndAddress(); | ||
| 2258 | |||
| 2259 | // Perform traversal. | ||
| 2260 | { | ||
| 2261 | // Begin traversal. | ||
| 2262 | TraversalContext context; | ||
| 2263 | TraversalEntry next_entry; | ||
| 2264 | bool traverse_valid = | ||
| 2265 | m_impl->BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr); | ||
| 2266 | R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); | ||
| 2267 | |||
| 2268 | // Set tracking variables. | ||
| 2269 | phys_addr = next_entry.phys_addr; | ||
| 2270 | phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); | ||
| 2271 | |||
| 2272 | // Iterate. | ||
| 2273 | while (true) { | ||
| 2274 | // Continue the traversal. | ||
| 2275 | traverse_valid = | ||
| 2276 | m_impl->ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 2277 | if (!traverse_valid) { | ||
| 2278 | break; | ||
| 2279 | } | ||
| 2280 | |||
| 2281 | if (next_entry.phys_addr != (phys_addr + phys_size)) { | ||
| 2282 | // Check if we're done. | ||
| 2283 | if (virt_addr <= address && address <= virt_addr + phys_size - 1) { | ||
| 2284 | break; | ||
| 2285 | } | ||
| 2286 | |||
| 2287 | // Advance. | ||
| 2288 | phys_addr = next_entry.phys_addr; | ||
| 2289 | virt_addr += next_entry.block_size; | ||
| 2290 | phys_size = | ||
| 2291 | next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); | ||
| 2292 | } else { | ||
| 2293 | phys_size += next_entry.block_size; | ||
| 2294 | } | ||
| 2295 | |||
| 2296 | // Check if we're done. | ||
| 2297 | if (end_addr < virt_addr + phys_size) { | ||
| 2298 | break; | ||
| 2299 | } | ||
| 2300 | } | ||
| 2301 | ASSERT(virt_addr <= address && address <= virt_addr + phys_size - 1); | ||
| 2302 | |||
| 2303 | // Ensure we use the right size. | ||
| 2304 | if (end_addr < virt_addr + phys_size) { | ||
| 2305 | phys_size = end_addr - virt_addr; | ||
| 2306 | } | ||
| 2307 | } | ||
| 2308 | |||
| 2309 | // Set the output. | ||
| 2310 | out->physical_address = GetInteger(phys_addr); | ||
| 2311 | out->virtual_address = GetInteger(virt_addr); | ||
| 2312 | out->size = phys_size; | ||
| 2313 | R_SUCCEED(); | ||
| 2314 | } | ||
| 2315 | |||
| 2316 | Result KPageTableBase::MapIoImpl(KProcessAddress* out, PageLinkedList* page_list, | ||
| 2317 | KPhysicalAddress phys_addr, size_t size, KMemoryState state, | ||
| 2318 | KMemoryPermission perm) { | ||
| 2319 | // Check pre-conditions. | ||
| 2320 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 2321 | ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize)); | ||
| 2322 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 2323 | ASSERT(size > 0); | ||
| 2324 | |||
| 2325 | R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress); | ||
| 2326 | const size_t num_pages = size / PageSize; | ||
| 2327 | const KPhysicalAddress last = phys_addr + size - 1; | ||
| 2328 | |||
| 2329 | // Get region extents. | ||
| 2330 | const KProcessAddress region_start = m_kernel_map_region_start; | ||
| 2331 | const size_t region_size = m_kernel_map_region_end - m_kernel_map_region_start; | ||
| 2332 | const size_t region_num_pages = region_size / PageSize; | ||
| 2333 | |||
| 2334 | ASSERT(this->CanContain(region_start, region_size, state)); | ||
| 2335 | |||
| 2336 | // Locate the memory region. | ||
| 2337 | const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr); | ||
| 2338 | R_UNLESS(region != nullptr, ResultInvalidAddress); | ||
| 2339 | |||
| 2340 | ASSERT(region->Contains(GetInteger(phys_addr))); | ||
| 2341 | |||
| 2342 | // Ensure that the region is mappable. | ||
| 2343 | const bool is_rw = perm == KMemoryPermission::UserReadWrite; | ||
| 2344 | while (true) { | ||
| 2345 | // Check that the region exists. | ||
| 2346 | R_UNLESS(region != nullptr, ResultInvalidAddress); | ||
| 2347 | |||
| 2348 | // Check the region attributes. | ||
| 2349 | R_UNLESS(!region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress); | ||
| 2350 | R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, | ||
| 2351 | ResultInvalidAddress); | ||
| 2352 | R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress); | ||
| 2353 | |||
| 2354 | // Check if we're done. | ||
| 2355 | if (GetInteger(last) <= region->GetLastAddress()) { | ||
| 2356 | break; | ||
| 2357 | } | ||
| 2358 | |||
| 2359 | // Advance. | ||
| 2360 | region = region->GetNext(); | ||
| 2361 | }; | ||
| 2362 | |||
| 2363 | // Select an address to map at. | ||
| 2364 | KProcessAddress addr = 0; | ||
| 2365 | { | ||
| 2366 | const size_t alignment = 4_KiB; | ||
| 2367 | const KPhysicalAddress aligned_phys = | ||
| 2368 | Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1; | ||
| 2369 | R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress); | ||
| 2370 | |||
| 2371 | const KPhysicalAddress last_aligned_paddr = | ||
| 2372 | Common::AlignDown(GetInteger(last) + 1, alignment) - 1; | ||
| 2373 | R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr), | ||
| 2374 | ResultInvalidAddress); | ||
| 2375 | |||
| 2376 | addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, | ||
| 2377 | this->GetNumGuardPages()); | ||
| 2378 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2379 | } | ||
| 2380 | |||
| 2381 | // Check that we can map IO here. | ||
| 2382 | ASSERT(this->CanContain(addr, size, state)); | ||
| 2383 | R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, | ||
| 2384 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2385 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2386 | |||
| 2387 | // Perform mapping operation. | ||
| 2388 | const KPageProperties properties = {perm, state == KMemoryState::IoRegister, false, | ||
| 2389 | DisableMergeAttribute::DisableHead}; | ||
| 2390 | R_TRY(this->Operate(page_list, addr, num_pages, phys_addr, true, properties, OperationType::Map, | ||
| 2391 | false)); | ||
| 2392 | |||
| 2393 | // Set the output address. | ||
| 2394 | *out = addr; | ||
| 2395 | |||
| 2396 | R_SUCCEED(); | ||
| 2397 | } | ||
| 2398 | |||
| 2399 | Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { | ||
| 2400 | // Lock the table. | ||
| 2401 | KScopedLightLock lk(m_general_lock); | ||
| 2402 | |||
| 2403 | // Create an update allocator. | ||
| 2404 | Result allocator_result; | ||
| 2405 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2406 | m_memory_block_slab_manager); | ||
| 2407 | R_TRY(allocator_result); | ||
| 2408 | |||
| 2409 | // We're going to perform an update, so create a helper. | ||
| 2410 | KScopedPageTableUpdater updater(this); | ||
| 2411 | |||
| 2412 | // Map the io memory. | ||
| 2413 | KProcessAddress addr; | ||
| 2414 | R_TRY(this->MapIoImpl(std::addressof(addr), updater.GetPageList(), phys_addr, size, | ||
| 2415 | KMemoryState::IoRegister, perm)); | ||
| 2416 | |||
| 2417 | // Update the blocks. | ||
| 2418 | m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize, | ||
| 2419 | KMemoryState::IoRegister, perm, KMemoryAttribute::Locked, | ||
| 2420 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2421 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2422 | |||
| 2423 | // We successfully mapped the pages. | ||
| 2424 | R_SUCCEED(); | ||
| 2425 | } | ||
| 2426 | |||
| 2427 | Result KPageTableBase::MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, | ||
| 2428 | size_t size, Svc::MemoryMapping mapping, | ||
| 2429 | Svc::MemoryPermission svc_perm) { | ||
| 2430 | const size_t num_pages = size / PageSize; | ||
| 2431 | |||
| 2432 | // Lock the table. | ||
| 2433 | KScopedLightLock lk(m_general_lock); | ||
| 2434 | |||
| 2435 | // Validate the memory state. | ||
| 2436 | size_t num_allocator_blocks; | ||
| 2437 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size, | ||
| 2438 | KMemoryState::All, KMemoryState::None, KMemoryPermission::None, | ||
| 2439 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2440 | KMemoryAttribute::None)); | ||
| 2441 | |||
| 2442 | // Create an update allocator. | ||
| 2443 | Result allocator_result; | ||
| 2444 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2445 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2446 | R_TRY(allocator_result); | ||
| 2447 | |||
| 2448 | // We're going to perform an update, so create a helper. | ||
| 2449 | KScopedPageTableUpdater updater(this); | ||
| 2450 | |||
| 2451 | // Perform mapping operation. | ||
| 2452 | const KMemoryPermission perm = ConvertToKMemoryPermission(svc_perm); | ||
| 2453 | const KPageProperties properties = {perm, mapping == Svc::MemoryMapping::IoRegister, | ||
| 2454 | mapping == Svc::MemoryMapping::Uncached, | ||
| 2455 | DisableMergeAttribute::DisableHead}; | ||
| 2456 | R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, phys_addr, true, properties, | ||
| 2457 | OperationType::Map, false)); | ||
| 2458 | |||
| 2459 | // Update the blocks. | ||
| 2460 | const auto state = | ||
| 2461 | mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister; | ||
| 2462 | m_memory_block_manager.Update( | ||
| 2463 | std::addressof(allocator), dst_address, num_pages, state, perm, KMemoryAttribute::Locked, | ||
| 2464 | KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); | ||
| 2465 | |||
| 2466 | // We successfully mapped the pages. | ||
| 2467 | R_SUCCEED(); | ||
| 2468 | } | ||
| 2469 | |||
| 2470 | Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, | ||
| 2471 | size_t size, Svc::MemoryMapping mapping) { | ||
| 2472 | const size_t num_pages = size / PageSize; | ||
| 2473 | |||
| 2474 | // Lock the table. | ||
| 2475 | KScopedLightLock lk(m_general_lock); | ||
| 2476 | |||
| 2477 | // Validate the memory state. | ||
| 2478 | KMemoryState old_state; | ||
| 2479 | KMemoryPermission old_perm; | ||
| 2480 | KMemoryAttribute old_attr; | ||
| 2481 | size_t num_allocator_blocks; | ||
| 2482 | R_TRY(this->CheckMemoryState( | ||
| 2483 | std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), | ||
| 2484 | std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All, | ||
| 2485 | mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister, | ||
| 2486 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2487 | KMemoryAttribute::Locked)); | ||
| 2488 | |||
| 2489 | // Validate that the region being unmapped corresponds to the physical range described. | ||
| 2490 | { | ||
| 2491 | // Get the impl. | ||
| 2492 | auto& impl = this->GetImpl(); | ||
| 2493 | |||
| 2494 | // Begin traversal. | ||
| 2495 | TraversalContext context; | ||
| 2496 | TraversalEntry next_entry; | ||
| 2497 | ASSERT( | ||
| 2498 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address)); | ||
| 2499 | |||
| 2500 | // Check that the physical region matches. | ||
| 2501 | R_UNLESS(next_entry.phys_addr == phys_addr, ResultInvalidMemoryRegion); | ||
| 2502 | |||
| 2503 | // Iterate. | ||
| 2504 | for (size_t checked_size = | ||
| 2505 | next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); | ||
| 2506 | checked_size < size; checked_size += next_entry.block_size) { | ||
| 2507 | // Continue the traversal. | ||
| 2508 | ASSERT(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))); | ||
| 2509 | |||
| 2510 | // Check that the physical region matches. | ||
| 2511 | R_UNLESS(next_entry.phys_addr == phys_addr + checked_size, ResultInvalidMemoryRegion); | ||
| 2512 | } | ||
| 2513 | } | ||
| 2514 | |||
| 2515 | // Create an update allocator. | ||
| 2516 | Result allocator_result; | ||
| 2517 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2518 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2519 | R_TRY(allocator_result); | ||
| 2520 | |||
| 2521 | // We're going to perform an update, so create a helper. | ||
| 2522 | KScopedPageTableUpdater updater(this); | ||
| 2523 | |||
| 2524 | // If the region being unmapped is Memory, synchronize. | ||
| 2525 | if (mapping == Svc::MemoryMapping::Memory) { | ||
| 2526 | // Change the region to be uncached. | ||
| 2527 | const KPageProperties properties = {old_perm, false, true, DisableMergeAttribute::None}; | ||
| 2528 | R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, properties, | ||
| 2529 | OperationType::ChangePermissionsAndRefresh, false)); | ||
| 2530 | |||
| 2531 | // Temporarily unlock ourselves, so that other operations can occur while we flush the | ||
| 2532 | // region. | ||
| 2533 | m_general_lock.Unlock(); | ||
| 2534 | SCOPE_EXIT({ m_general_lock.Lock(); }); | ||
| 2535 | |||
| 2536 | // Flush the region. | ||
| 2537 | R_ASSERT(FlushDataCache(dst_address, size)); | ||
| 2538 | } | ||
| 2539 | |||
| 2540 | // Perform the unmap. | ||
| 2541 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2542 | DisableMergeAttribute::None}; | ||
| 2543 | R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, | ||
| 2544 | unmap_properties, OperationType::Unmap, false)); | ||
| 2545 | |||
| 2546 | // Update the blocks. | ||
| 2547 | m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, | ||
| 2548 | KMemoryState::Free, KMemoryPermission::None, | ||
| 2549 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 2550 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2551 | |||
| 2552 | R_SUCCEED(); | ||
| 2553 | } | ||
| 2554 | |||
| 2555 | Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { | ||
| 2556 | ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize)); | ||
| 2557 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 2558 | ASSERT(size > 0); | ||
| 2559 | R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress); | ||
| 2560 | const size_t num_pages = size / PageSize; | ||
| 2561 | const KPhysicalAddress last = phys_addr + size - 1; | ||
| 2562 | |||
| 2563 | // Get region extents. | ||
| 2564 | const KProcessAddress region_start = this->GetRegionAddress(KMemoryState::Static); | ||
| 2565 | const size_t region_size = this->GetRegionSize(KMemoryState::Static); | ||
| 2566 | const size_t region_num_pages = region_size / PageSize; | ||
| 2567 | |||
| 2568 | // Locate the memory region. | ||
| 2569 | const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr); | ||
| 2570 | R_UNLESS(region != nullptr, ResultInvalidAddress); | ||
| 2571 | |||
| 2572 | ASSERT(region->Contains(GetInteger(phys_addr))); | ||
| 2573 | R_UNLESS(GetInteger(last) <= region->GetLastAddress(), ResultInvalidAddress); | ||
| 2574 | |||
| 2575 | // Check the region attributes. | ||
| 2576 | const bool is_rw = perm == KMemoryPermission::UserReadWrite; | ||
| 2577 | R_UNLESS(region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress); | ||
| 2578 | R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress); | ||
| 2579 | R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, | ||
| 2580 | ResultInvalidAddress); | ||
| 2581 | |||
| 2582 | // Lock the table. | ||
| 2583 | KScopedLightLock lk(m_general_lock); | ||
| 2584 | |||
| 2585 | // Select an address to map at. | ||
| 2586 | KProcessAddress addr = 0; | ||
| 2587 | { | ||
| 2588 | const size_t alignment = 4_KiB; | ||
| 2589 | const KPhysicalAddress aligned_phys = | ||
| 2590 | Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1; | ||
| 2591 | R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress); | ||
| 2592 | |||
| 2593 | const KPhysicalAddress last_aligned_paddr = | ||
| 2594 | Common::AlignDown(GetInteger(last) + 1, alignment) - 1; | ||
| 2595 | R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr), | ||
| 2596 | ResultInvalidAddress); | ||
| 2597 | |||
| 2598 | addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, | ||
| 2599 | this->GetNumGuardPages()); | ||
| 2600 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2601 | } | ||
| 2602 | |||
| 2603 | // Check that we can map static here. | ||
| 2604 | ASSERT(this->CanContain(addr, size, KMemoryState::Static)); | ||
| 2605 | R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, | ||
| 2606 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2607 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2608 | |||
| 2609 | // Create an update allocator. | ||
| 2610 | Result allocator_result; | ||
| 2611 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2612 | m_memory_block_slab_manager); | ||
| 2613 | R_TRY(allocator_result); | ||
| 2614 | |||
| 2615 | // We're going to perform an update, so create a helper. | ||
| 2616 | KScopedPageTableUpdater updater(this); | ||
| 2617 | |||
| 2618 | // Perform mapping operation. | ||
| 2619 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2620 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, | ||
| 2621 | OperationType::Map, false)); | ||
| 2622 | |||
| 2623 | // Update the blocks. | ||
| 2624 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, KMemoryState::Static, | ||
| 2625 | perm, KMemoryAttribute::None, | ||
| 2626 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2627 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2628 | |||
| 2629 | // We successfully mapped the pages. | ||
| 2630 | R_SUCCEED(); | ||
| 2631 | } | ||
| 2632 | |||
| 2633 | Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) { | ||
| 2634 | // Get the memory region. | ||
| 2635 | const KMemoryRegion* region = | ||
| 2636 | m_kernel.MemoryLayout().GetPhysicalMemoryRegionTree().FindFirstDerived(region_type); | ||
| 2637 | R_UNLESS(region != nullptr, ResultOutOfRange); | ||
| 2638 | |||
| 2639 | // Check that the region is valid. | ||
| 2640 | ASSERT(region->GetEndAddress() != 0); | ||
| 2641 | |||
| 2642 | // Map the region. | ||
| 2643 | R_TRY_CATCH(this->MapStatic(region->GetAddress(), region->GetSize(), perm)){ | ||
| 2644 | R_CONVERT(ResultInvalidAddress, ResultOutOfRange)} R_END_TRY_CATCH; | ||
| 2645 | |||
| 2646 | R_SUCCEED(); | ||
| 2647 | } | ||
| 2648 | |||
| 2649 | Result KPageTableBase::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 2650 | KPhysicalAddress phys_addr, bool is_pa_valid, | ||
| 2651 | KProcessAddress region_start, size_t region_num_pages, | ||
| 2652 | KMemoryState state, KMemoryPermission perm) { | ||
| 2653 | ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); | ||
| 2654 | |||
| 2655 | // Ensure this is a valid map request. | ||
| 2656 | R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), | ||
| 2657 | ResultInvalidCurrentMemory); | ||
| 2658 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | ||
| 2659 | |||
| 2660 | // Lock the table. | ||
| 2661 | KScopedLightLock lk(m_general_lock); | ||
| 2662 | |||
| 2663 | // Find a random address to map at. | ||
| 2664 | KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, | ||
| 2665 | 0, this->GetNumGuardPages()); | ||
| 2666 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2667 | ASSERT(Common::IsAligned(GetInteger(addr), alignment)); | ||
| 2668 | ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | ||
| 2669 | R_ASSERT(this->CheckMemoryState( | ||
| 2670 | addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2671 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2672 | |||
| 2673 | // Create an update allocator. | ||
| 2674 | Result allocator_result; | ||
| 2675 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2676 | m_memory_block_slab_manager); | ||
| 2677 | R_TRY(allocator_result); | ||
| 2678 | |||
| 2679 | // We're going to perform an update, so create a helper. | ||
| 2680 | KScopedPageTableUpdater updater(this); | ||
| 2681 | |||
| 2682 | // Perform mapping operation. | ||
| 2683 | if (is_pa_valid) { | ||
| 2684 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2685 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, | ||
| 2686 | OperationType::Map, false)); | ||
| 2687 | } else { | ||
| 2688 | R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm)); | ||
| 2689 | } | ||
| 2690 | |||
| 2691 | // Update the blocks. | ||
| 2692 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2693 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2694 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2695 | |||
| 2696 | // We successfully mapped the pages. | ||
| 2697 | *out_addr = addr; | ||
| 2698 | R_SUCCEED(); | ||
| 2699 | } | ||
| 2700 | |||
| 2701 | Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||
| 2702 | KMemoryPermission perm) { | ||
| 2703 | // Check that the map is in range. | ||
| 2704 | const size_t size = num_pages * PageSize; | ||
| 2705 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||
| 2706 | |||
| 2707 | // Lock the table. | ||
| 2708 | KScopedLightLock lk(m_general_lock); | ||
| 2709 | |||
| 2710 | // Check the memory state. | ||
| 2711 | size_t num_allocator_blocks; | ||
| 2712 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 2713 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2714 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2715 | KMemoryAttribute::None)); | ||
| 2716 | |||
| 2717 | // Create an update allocator. | ||
| 2718 | Result allocator_result; | ||
| 2719 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2720 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2721 | R_TRY(allocator_result); | ||
| 2722 | |||
| 2723 | // We're going to perform an update, so create a helper. | ||
| 2724 | KScopedPageTableUpdater updater(this); | ||
| 2725 | |||
| 2726 | // Map the pages. | ||
| 2727 | R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm)); | ||
| 2728 | |||
| 2729 | // Update the blocks. | ||
| 2730 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, | ||
| 2731 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2732 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2733 | |||
| 2734 | R_SUCCEED(); | ||
| 2735 | } | ||
| 2736 | |||
| 2737 | Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { | ||
| 2738 | // Check that the unmap is in range. | ||
| 2739 | const size_t size = num_pages * PageSize; | ||
| 2740 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2741 | |||
| 2742 | // Lock the table. | ||
| 2743 | KScopedLightLock lk(m_general_lock); | ||
| 2744 | |||
| 2745 | // Check the memory state. | ||
| 2746 | size_t num_allocator_blocks; | ||
| 2747 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 2748 | KMemoryState::All, state, KMemoryPermission::None, | ||
| 2749 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2750 | KMemoryAttribute::None)); | ||
| 2751 | |||
| 2752 | // Create an update allocator. | ||
| 2753 | Result allocator_result; | ||
| 2754 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2755 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2756 | R_TRY(allocator_result); | ||
| 2757 | |||
| 2758 | // We're going to perform an update, so create a helper. | ||
| 2759 | KScopedPageTableUpdater updater(this); | ||
| 2760 | |||
| 2761 | // Perform the unmap. | ||
| 2762 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2763 | DisableMergeAttribute::None}; | ||
| 2764 | R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties, | ||
| 2765 | OperationType::Unmap, false)); | ||
| 2766 | |||
| 2767 | // Update the blocks. | ||
| 2768 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | ||
| 2769 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2770 | KMemoryBlockDisableMergeAttribute::None, | ||
| 2771 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2772 | |||
| 2773 | R_SUCCEED(); | ||
| 2774 | } | ||
| 2775 | |||
| 2776 | Result KPageTableBase::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, | ||
| 2777 | KProcessAddress region_start, size_t region_num_pages, | ||
| 2778 | KMemoryState state, KMemoryPermission perm) { | ||
| 2779 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2780 | |||
| 2781 | // Ensure this is a valid map request. | ||
| 2782 | const size_t num_pages = pg.GetNumPages(); | ||
| 2783 | R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), | ||
| 2784 | ResultInvalidCurrentMemory); | ||
| 2785 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | ||
| 2786 | |||
| 2787 | // Lock the table. | ||
| 2788 | KScopedLightLock lk(m_general_lock); | ||
| 2789 | |||
| 2790 | // Find a random address to map at. | ||
| 2791 | KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, | ||
| 2792 | 0, this->GetNumGuardPages()); | ||
| 2793 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2794 | ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | ||
| 2795 | R_ASSERT(this->CheckMemoryState( | ||
| 2796 | addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2797 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2798 | |||
| 2799 | // Create an update allocator. | ||
| 2800 | Result allocator_result; | ||
| 2801 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2802 | m_memory_block_slab_manager); | ||
| 2803 | R_TRY(allocator_result); | ||
| 2804 | |||
| 2805 | // We're going to perform an update, so create a helper. | ||
| 2806 | KScopedPageTableUpdater updater(this); | ||
| 2807 | |||
| 2808 | // Perform mapping operation. | ||
| 2809 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2810 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | ||
| 2811 | |||
| 2812 | // Update the blocks. | ||
| 2813 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2814 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2815 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2816 | |||
| 2817 | // We successfully mapped the pages. | ||
| 2818 | *out_addr = addr; | ||
| 2819 | R_SUCCEED(); | ||
| 2820 | } | ||
| 2821 | |||
| 2822 | Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state, | ||
| 2823 | KMemoryPermission perm) { | ||
| 2824 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2825 | |||
| 2826 | // Ensure this is a valid map request. | ||
| 2827 | const size_t num_pages = pg.GetNumPages(); | ||
| 2828 | const size_t size = num_pages * PageSize; | ||
| 2829 | R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); | ||
| 2830 | |||
| 2831 | // Lock the table. | ||
| 2832 | KScopedLightLock lk(m_general_lock); | ||
| 2833 | |||
| 2834 | // Check if state allows us to map. | ||
| 2835 | size_t num_allocator_blocks; | ||
| 2836 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size, | ||
| 2837 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2838 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2839 | KMemoryAttribute::None)); | ||
| 2840 | |||
| 2841 | // Create an update allocator. | ||
| 2842 | Result allocator_result; | ||
| 2843 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2844 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2845 | R_TRY(allocator_result); | ||
| 2846 | |||
| 2847 | // We're going to perform an update, so create a helper. | ||
| 2848 | KScopedPageTableUpdater updater(this); | ||
| 2849 | |||
| 2850 | // Perform mapping operation. | ||
| 2851 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2852 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | ||
| 2853 | |||
| 2854 | // Update the blocks. | ||
| 2855 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2856 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2857 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2858 | |||
| 2859 | // We successfully mapped the pages. | ||
| 2860 | R_SUCCEED(); | ||
| 2861 | } | ||
| 2862 | |||
| 2863 | Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, | ||
| 2864 | KMemoryState state) { | ||
| 2865 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2866 | |||
| 2867 | // Ensure this is a valid unmap request. | ||
| 2868 | const size_t num_pages = pg.GetNumPages(); | ||
| 2869 | const size_t size = num_pages * PageSize; | ||
| 2870 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||
| 2871 | |||
| 2872 | // Lock the table. | ||
| 2873 | KScopedLightLock lk(m_general_lock); | ||
| 2874 | |||
| 2875 | // Check if state allows us to unmap. | ||
| 2876 | size_t num_allocator_blocks; | ||
| 2877 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 2878 | KMemoryState::All, state, KMemoryPermission::None, | ||
| 2879 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2880 | KMemoryAttribute::None)); | ||
| 2881 | |||
| 2882 | // Check that the page group is valid. | ||
| 2883 | R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory); | ||
| 2884 | |||
| 2885 | // Create an update allocator. | ||
| 2886 | Result allocator_result; | ||
| 2887 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2888 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2889 | R_TRY(allocator_result); | ||
| 2890 | |||
| 2891 | // We're going to perform an update, so create a helper. | ||
| 2892 | KScopedPageTableUpdater updater(this); | ||
| 2893 | |||
| 2894 | // Perform unmapping operation. | ||
| 2895 | const KPageProperties properties = {KMemoryPermission::None, false, false, | ||
| 2896 | DisableMergeAttribute::None}; | ||
| 2897 | R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, properties, | ||
| 2898 | OperationType::Unmap, false)); | ||
| 2899 | |||
| 2900 | // Update the blocks. | ||
| 2901 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | ||
| 2902 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2903 | KMemoryBlockDisableMergeAttribute::None, | ||
| 2904 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2905 | |||
| 2906 | R_SUCCEED(); | ||
| 2907 | } | ||
| 2908 | |||
| 2909 | Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, | ||
| 2910 | size_t num_pages, KMemoryState state_mask, | ||
| 2911 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 2912 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 2913 | KMemoryAttribute attr) { | ||
| 2914 | // Ensure that the page group isn't null. | ||
| 2915 | ASSERT(out != nullptr); | ||
| 2916 | |||
| 2917 | // Make sure that the region we're mapping is valid for the table. | ||
| 2918 | const size_t size = num_pages * PageSize; | ||
| 2919 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2920 | |||
| 2921 | // Lock the table. | ||
| 2922 | KScopedLightLock lk(m_general_lock); | ||
| 2923 | |||
| 2924 | // Check if state allows us to create the group. | ||
| 2925 | R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, | ||
| 2926 | state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 2927 | attr_mask, attr)); | ||
| 2928 | |||
| 2929 | // Create a new page group for the region. | ||
| 2930 | R_TRY(this->MakePageGroup(*out, address, num_pages)); | ||
| 2931 | |||
| 2932 | // Open a new reference to the pages in the group. | ||
| 2933 | out->Open(); | ||
| 2934 | |||
| 2935 | R_SUCCEED(); | ||
| 2936 | } | ||
| 2937 | |||
| 2938 | Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) { | ||
| 2939 | // Check that the region is in range. | ||
| 2940 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2941 | |||
| 2942 | // Lock the table. | ||
| 2943 | KScopedLightLock lk(m_general_lock); | ||
| 2944 | |||
| 2945 | // Check the memory state. | ||
| 2946 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 2947 | address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted, | ||
| 2948 | KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite, | ||
| 2949 | KMemoryAttribute::Uncached, KMemoryAttribute::None)); | ||
| 2950 | |||
| 2951 | // Get the impl. | ||
| 2952 | auto& impl = this->GetImpl(); | ||
| 2953 | |||
| 2954 | // Begin traversal. | ||
| 2955 | TraversalContext context; | ||
| 2956 | TraversalEntry next_entry; | ||
| 2957 | bool traverse_valid = | ||
| 2958 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address); | ||
| 2959 | R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); | ||
| 2960 | |||
| 2961 | // Prepare tracking variables. | ||
| 2962 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 2963 | size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 2964 | size_t tot_size = cur_size; | ||
| 2965 | |||
| 2966 | // Iterate. | ||
| 2967 | while (tot_size < size) { | ||
| 2968 | // Continue the traversal. | ||
| 2969 | traverse_valid = | ||
| 2970 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 2971 | R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); | ||
| 2972 | |||
| 2973 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 2974 | // Check that the pages are linearly mapped. | ||
| 2975 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 2976 | |||
| 2977 | // Invalidate the block. | ||
| 2978 | if (cur_size > 0) { | ||
| 2979 | // NOTE: Nintendo does not check the result of invalidation. | ||
| 2980 | InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size); | ||
| 2981 | } | ||
| 2982 | |||
| 2983 | // Advance. | ||
| 2984 | cur_addr = next_entry.phys_addr; | ||
| 2985 | cur_size = next_entry.block_size; | ||
| 2986 | } else { | ||
| 2987 | cur_size += next_entry.block_size; | ||
| 2988 | } | ||
| 2989 | |||
| 2990 | tot_size += next_entry.block_size; | ||
| 2991 | } | ||
| 2992 | |||
| 2993 | // Ensure we use the right size for the last block. | ||
| 2994 | if (tot_size > size) { | ||
| 2995 | cur_size -= (tot_size - size); | ||
| 2996 | } | ||
| 2997 | |||
| 2998 | // Check that the last block is linearly mapped. | ||
| 2999 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3000 | |||
| 3001 | // Invalidate the last block. | ||
| 3002 | if (cur_size > 0) { | ||
| 3003 | // NOTE: Nintendo does not check the result of invalidation. | ||
| 3004 | InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size); | ||
| 3005 | } | ||
| 3006 | |||
| 3007 | R_SUCCEED(); | ||
| 3008 | } | ||
| 3009 | |||
| 3010 | Result KPageTableBase::InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) { | ||
| 3011 | // Check pre-condition: this is being called on the current process. | ||
| 3012 | ASSERT(this == std::addressof(GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable())); | ||
| 3013 | |||
| 3014 | // Check that the region is in range. | ||
| 3015 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 3016 | |||
| 3017 | // Lock the table. | ||
| 3018 | KScopedLightLock lk(m_general_lock); | ||
| 3019 | |||
| 3020 | // Check the memory state. | ||
| 3021 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3022 | address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted, | ||
| 3023 | KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite, | ||
| 3024 | KMemoryAttribute::Uncached, KMemoryAttribute::None)); | ||
| 3025 | |||
| 3026 | // Invalidate the data cache. | ||
| 3027 | R_RETURN(InvalidateDataCache(address, size)); | ||
| 3028 | } | ||
| 3029 | |||
| 3030 | Result KPageTableBase::ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 3031 | size_t size) { | ||
| 3032 | // Lightly validate the region is in range. | ||
| 3033 | R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory); | ||
| 3034 | |||
| 3035 | // Lock the table. | ||
| 3036 | KScopedLightLock lk(m_general_lock); | ||
| 3037 | |||
| 3038 | // Require that the memory either be user readable or debuggable. | ||
| 3039 | const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous( | ||
| 3040 | src_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserRead, | ||
| 3041 | KMemoryPermission::UserRead, KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 3042 | if (!can_read) { | ||
| 3043 | const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous( | ||
| 3044 | src_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug, | ||
| 3045 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, | ||
| 3046 | KMemoryAttribute::None)); | ||
| 3047 | R_UNLESS(can_debug, ResultInvalidCurrentMemory); | ||
| 3048 | } | ||
| 3049 | |||
| 3050 | // Get the impl. | ||
| 3051 | auto& impl = this->GetImpl(); | ||
| 3052 | auto& dst_memory = GetCurrentMemory(m_system.Kernel()); | ||
| 3053 | |||
| 3054 | // Begin traversal. | ||
| 3055 | TraversalContext context; | ||
| 3056 | TraversalEntry next_entry; | ||
| 3057 | bool traverse_valid = | ||
| 3058 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_address); | ||
| 3059 | R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); | ||
| 3060 | |||
| 3061 | // Prepare tracking variables. | ||
| 3062 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 3063 | size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 3064 | size_t tot_size = cur_size; | ||
| 3065 | |||
| 3066 | auto PerformCopy = [&]() -> Result { | ||
| 3067 | // Ensure the address is linear mapped. | ||
| 3068 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3069 | |||
| 3070 | // Copy as much aligned data as we can. | ||
| 3071 | if (cur_size >= sizeof(u32)) { | ||
| 3072 | const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32)); | ||
| 3073 | const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr); | ||
| 3074 | FlushDataCache(copy_src, copy_size); | ||
| 3075 | R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, copy_size), ResultInvalidPointer); | ||
| 3076 | |||
| 3077 | dst_address += copy_size; | ||
| 3078 | cur_addr += copy_size; | ||
| 3079 | cur_size -= copy_size; | ||
| 3080 | } | ||
| 3081 | |||
| 3082 | // Copy remaining data. | ||
| 3083 | if (cur_size > 0) { | ||
| 3084 | const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr); | ||
| 3085 | FlushDataCache(copy_src, cur_size); | ||
| 3086 | R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, cur_size), ResultInvalidPointer); | ||
| 3087 | } | ||
| 3088 | |||
| 3089 | R_SUCCEED(); | ||
| 3090 | }; | ||
| 3091 | |||
| 3092 | // Iterate. | ||
| 3093 | while (tot_size < size) { | ||
| 3094 | // Continue the traversal. | ||
| 3095 | traverse_valid = | ||
| 3096 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 3097 | ASSERT(traverse_valid); | ||
| 3098 | |||
| 3099 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 3100 | // Perform copy. | ||
| 3101 | R_TRY(PerformCopy()); | ||
| 3102 | |||
| 3103 | // Advance. | ||
| 3104 | dst_address += cur_size; | ||
| 3105 | |||
| 3106 | cur_addr = next_entry.phys_addr; | ||
| 3107 | cur_size = next_entry.block_size; | ||
| 3108 | } else { | ||
| 3109 | cur_size += next_entry.block_size; | ||
| 3110 | } | ||
| 3111 | |||
| 3112 | tot_size += next_entry.block_size; | ||
| 3113 | } | ||
| 3114 | |||
| 3115 | // Ensure we use the right size for the last block. | ||
| 3116 | if (tot_size > size) { | ||
| 3117 | cur_size -= (tot_size - size); | ||
| 3118 | } | ||
| 3119 | |||
| 3120 | // Perform copy for the last block. | ||
| 3121 | R_TRY(PerformCopy()); | ||
| 3122 | |||
| 3123 | R_SUCCEED(); | ||
| 3124 | } | ||
| 3125 | |||
| 3126 | Result KPageTableBase::WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 3127 | size_t size) { | ||
| 3128 | // Lightly validate the region is in range. | ||
| 3129 | R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory); | ||
| 3130 | |||
| 3131 | // Lock the table. | ||
| 3132 | KScopedLightLock lk(m_general_lock); | ||
| 3133 | |||
| 3134 | // Require that the memory either be user writable or debuggable. | ||
| 3135 | const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous( | ||
| 3136 | dst_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserReadWrite, | ||
| 3137 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 3138 | if (!can_read) { | ||
| 3139 | const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous( | ||
| 3140 | dst_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug, | ||
| 3141 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, | ||
| 3142 | KMemoryAttribute::None)); | ||
| 3143 | R_UNLESS(can_debug, ResultInvalidCurrentMemory); | ||
| 3144 | } | ||
| 3145 | |||
| 3146 | // Get the impl. | ||
| 3147 | auto& impl = this->GetImpl(); | ||
| 3148 | auto& src_memory = GetCurrentMemory(m_system.Kernel()); | ||
| 3149 | |||
| 3150 | // Begin traversal. | ||
| 3151 | TraversalContext context; | ||
| 3152 | TraversalEntry next_entry; | ||
| 3153 | bool traverse_valid = | ||
| 3154 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address); | ||
| 3155 | R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); | ||
| 3156 | |||
| 3157 | // Prepare tracking variables. | ||
| 3158 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 3159 | size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 3160 | size_t tot_size = cur_size; | ||
| 3161 | |||
| 3162 | auto PerformCopy = [&]() -> Result { | ||
| 3163 | // Ensure the address is linear mapped. | ||
| 3164 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3165 | |||
| 3166 | // Copy as much aligned data as we can. | ||
| 3167 | if (cur_size >= sizeof(u32)) { | ||
| 3168 | const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32)); | ||
| 3169 | void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr); | ||
| 3170 | R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, copy_size), | ||
| 3171 | ResultInvalidCurrentMemory); | ||
| 3172 | |||
| 3173 | StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), copy_size); | ||
| 3174 | |||
| 3175 | src_address += copy_size; | ||
| 3176 | cur_addr += copy_size; | ||
| 3177 | cur_size -= copy_size; | ||
| 3178 | } | ||
| 3179 | |||
| 3180 | // Copy remaining data. | ||
| 3181 | if (cur_size > 0) { | ||
| 3182 | void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr); | ||
| 3183 | R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, cur_size), | ||
| 3184 | ResultInvalidCurrentMemory); | ||
| 3185 | |||
| 3186 | StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size); | ||
| 3187 | } | ||
| 3188 | |||
| 3189 | R_SUCCEED(); | ||
| 3190 | }; | ||
| 3191 | |||
| 3192 | // Iterate. | ||
| 3193 | while (tot_size < size) { | ||
| 3194 | // Continue the traversal. | ||
| 3195 | traverse_valid = | ||
| 3196 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 3197 | ASSERT(traverse_valid); | ||
| 3198 | |||
| 3199 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 3200 | // Perform copy. | ||
| 3201 | R_TRY(PerformCopy()); | ||
| 3202 | |||
| 3203 | // Advance. | ||
| 3204 | src_address += cur_size; | ||
| 3205 | |||
| 3206 | cur_addr = next_entry.phys_addr; | ||
| 3207 | cur_size = next_entry.block_size; | ||
| 3208 | } else { | ||
| 3209 | cur_size += next_entry.block_size; | ||
| 3210 | } | ||
| 3211 | |||
| 3212 | tot_size += next_entry.block_size; | ||
| 3213 | } | ||
| 3214 | |||
| 3215 | // Ensure we use the right size for the last block. | ||
| 3216 | if (tot_size > size) { | ||
| 3217 | cur_size -= (tot_size - size); | ||
| 3218 | } | ||
| 3219 | |||
| 3220 | // Perform copy for the last block. | ||
| 3221 | R_TRY(PerformCopy()); | ||
| 3222 | |||
| 3223 | // Invalidate the instruction cache, as this svc allows modifying executable pages. | ||
| 3224 | InvalidateInstructionCache(m_system, dst_address, size); | ||
| 3225 | |||
| 3226 | R_SUCCEED(); | ||
| 3227 | } | ||
| 3228 | |||
| 3229 | Result KPageTableBase::ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr, | ||
| 3230 | size_t size, KMemoryState state) { | ||
| 3231 | // Check pre-conditions. | ||
| 3232 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3233 | |||
| 3234 | // Determine the mapping extents. | ||
| 3235 | const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize); | ||
| 3236 | const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize); | ||
| 3237 | const size_t map_size = map_end - map_start; | ||
| 3238 | |||
| 3239 | // Get the memory reference to write into. | ||
| 3240 | auto& dst_memory = GetCurrentMemory(m_kernel); | ||
| 3241 | |||
| 3242 | // We're going to perform an update, so create a helper. | ||
| 3243 | KScopedPageTableUpdater updater(this); | ||
| 3244 | |||
| 3245 | // Temporarily map the io memory. | ||
| 3246 | KProcessAddress io_addr; | ||
| 3247 | R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, | ||
| 3248 | state, KMemoryPermission::UserRead)); | ||
| 3249 | |||
| 3250 | // Ensure we unmap the io memory when we're done with it. | ||
| 3251 | const KPageProperties unmap_properties = | ||
| 3252 | KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None}; | ||
| 3253 | SCOPE_EXIT({ | ||
| 3254 | R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false, | ||
| 3255 | unmap_properties, OperationType::Unmap, true)); | ||
| 3256 | }); | ||
| 3257 | |||
| 3258 | // Read the memory. | ||
| 3259 | const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1)); | ||
| 3260 | dst_memory.CopyBlock(dst_addr, read_addr, size); | ||
| 3261 | |||
| 3262 | R_SUCCEED(); | ||
| 3263 | } | ||
| 3264 | |||
| 3265 | Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr, | ||
| 3266 | size_t size, KMemoryState state) { | ||
| 3267 | // Check pre-conditions. | ||
| 3268 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3269 | |||
| 3270 | // Determine the mapping extents. | ||
| 3271 | const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize); | ||
| 3272 | const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize); | ||
| 3273 | const size_t map_size = map_end - map_start; | ||
| 3274 | |||
| 3275 | // Get the memory reference to read from. | ||
| 3276 | auto& src_memory = GetCurrentMemory(m_kernel); | ||
| 3277 | |||
| 3278 | // We're going to perform an update, so create a helper. | ||
| 3279 | KScopedPageTableUpdater updater(this); | ||
| 3280 | |||
| 3281 | // Temporarily map the io memory. | ||
| 3282 | KProcessAddress io_addr; | ||
| 3283 | R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, | ||
| 3284 | state, KMemoryPermission::UserReadWrite)); | ||
| 3285 | |||
| 3286 | // Ensure we unmap the io memory when we're done with it. | ||
| 3287 | const KPageProperties unmap_properties = | ||
| 3288 | KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None}; | ||
| 3289 | SCOPE_EXIT({ | ||
| 3290 | R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false, | ||
| 3291 | unmap_properties, OperationType::Unmap, true)); | ||
| 3292 | }); | ||
| 3293 | |||
| 3294 | // Write the memory. | ||
| 3295 | const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1)); | ||
| 3296 | R_UNLESS(src_memory.CopyBlock(write_addr, src_addr, size), ResultInvalidPointer); | ||
| 3297 | |||
| 3298 | R_SUCCEED(); | ||
| 3299 | } | ||
| 3300 | |||
| 3301 | Result KPageTableBase::ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 3302 | size_t size, KMemoryState state) { | ||
| 3303 | // Lightly validate the range before doing anything else. | ||
| 3304 | R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory); | ||
| 3305 | |||
| 3306 | // We need to lock both this table, and the current process's table, so set up some aliases. | ||
| 3307 | KPageTableBase& src_page_table = *this; | ||
| 3308 | KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable(); | ||
| 3309 | |||
| 3310 | // Acquire the table locks. | ||
| 3311 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 3312 | |||
| 3313 | // Check that the desired range is readable io memory. | ||
| 3314 | R_TRY(this->CheckMemoryStateContiguous(src_address, size, KMemoryState::All, state, | ||
| 3315 | KMemoryPermission::UserRead, KMemoryPermission::UserRead, | ||
| 3316 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 3317 | |||
| 3318 | // Read the memory. | ||
| 3319 | KProcessAddress dst = dst_address; | ||
| 3320 | const KProcessAddress last_address = src_address + size - 1; | ||
| 3321 | while (src_address <= last_address) { | ||
| 3322 | // Get the current physical address. | ||
| 3323 | KPhysicalAddress phys_addr; | ||
| 3324 | ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), src_address)); | ||
| 3325 | |||
| 3326 | // Determine the current read size. | ||
| 3327 | const size_t cur_size = | ||
| 3328 | std::min<size_t>(last_address - src_address + 1, | ||
| 3329 | Common::AlignDown(GetInteger(src_address) + PageSize, PageSize) - | ||
| 3330 | GetInteger(src_address)); | ||
| 3331 | |||
| 3332 | // Read. | ||
| 3333 | R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state)); | ||
| 3334 | |||
| 3335 | // Advance. | ||
| 3336 | src_address += cur_size; | ||
| 3337 | dst += cur_size; | ||
| 3338 | } | ||
| 3339 | |||
| 3340 | R_SUCCEED(); | ||
| 3341 | } | ||
| 3342 | |||
| 3343 | Result KPageTableBase::WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 3344 | size_t size, KMemoryState state) { | ||
| 3345 | // Lightly validate the range before doing anything else. | ||
| 3346 | R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory); | ||
| 3347 | |||
| 3348 | // We need to lock both this table, and the current process's table, so set up some aliases. | ||
| 3349 | KPageTableBase& src_page_table = *this; | ||
| 3350 | KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable(); | ||
| 3351 | |||
| 3352 | // Acquire the table locks. | ||
| 3353 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 3354 | |||
| 3355 | // Check that the desired range is writable io memory. | ||
| 3356 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3357 | dst_address, size, KMemoryState::All, state, KMemoryPermission::UserReadWrite, | ||
| 3358 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 3359 | |||
| 3360 | // Read the memory. | ||
| 3361 | KProcessAddress src = src_address; | ||
| 3362 | const KProcessAddress last_address = dst_address + size - 1; | ||
| 3363 | while (dst_address <= last_address) { | ||
| 3364 | // Get the current physical address. | ||
| 3365 | KPhysicalAddress phys_addr; | ||
| 3366 | ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), dst_address)); | ||
| 3367 | |||
| 3368 | // Determine the current read size. | ||
| 3369 | const size_t cur_size = | ||
| 3370 | std::min<size_t>(last_address - dst_address + 1, | ||
| 3371 | Common::AlignDown(GetInteger(dst_address) + PageSize, PageSize) - | ||
| 3372 | GetInteger(dst_address)); | ||
| 3373 | |||
| 3374 | // Read. | ||
| 3375 | R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state)); | ||
| 3376 | |||
| 3377 | // Advance. | ||
| 3378 | dst_address += cur_size; | ||
| 3379 | src += cur_size; | ||
| 3380 | } | ||
| 3381 | |||
| 3382 | R_SUCCEED(); | ||
| 3383 | } | ||
| 3384 | |||
| 3385 | Result KPageTableBase::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, | ||
| 3386 | size_t size, KMemoryPermission perm, | ||
| 3387 | bool is_aligned, bool check_heap) { | ||
| 3388 | // Lightly validate the range before doing anything else. | ||
| 3389 | const size_t num_pages = size / PageSize; | ||
| 3390 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 3391 | |||
| 3392 | // Lock the table. | ||
| 3393 | KScopedLightLock lk(m_general_lock); | ||
| 3394 | |||
| 3395 | // Check the memory state. | ||
| 3396 | const KMemoryState test_state = | ||
| 3397 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) | | ||
| 3398 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 3399 | size_t num_allocator_blocks; | ||
| 3400 | KMemoryState old_state; | ||
| 3401 | R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, | ||
| 3402 | std::addressof(num_allocator_blocks), address, size, test_state, | ||
| 3403 | test_state, perm, perm, | ||
| 3404 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, | ||
| 3405 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); | ||
| 3406 | |||
| 3407 | // Create an update allocator. | ||
| 3408 | Result allocator_result; | ||
| 3409 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 3410 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 3411 | R_TRY(allocator_result); | ||
| 3412 | |||
| 3413 | // Update the memory blocks. | ||
| 3414 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | ||
| 3415 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); | ||
| 3416 | |||
| 3417 | // Set whether the locked memory was io. | ||
| 3418 | *out_is_io = | ||
| 3419 | static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io; | ||
| 3420 | |||
| 3421 | R_SUCCEED(); | ||
| 3422 | } | ||
| 3423 | |||
| 3424 | Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, | ||
| 3425 | bool check_heap) { | ||
| 3426 | // Lightly validate the range before doing anything else. | ||
| 3427 | const size_t num_pages = size / PageSize; | ||
| 3428 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 3429 | |||
| 3430 | // Lock the table. | ||
| 3431 | KScopedLightLock lk(m_general_lock); | ||
| 3432 | |||
| 3433 | // Check the memory state. | ||
| 3434 | const KMemoryState test_state = | ||
| 3435 | KMemoryState::FlagCanDeviceMap | | ||
| 3436 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 3437 | size_t num_allocator_blocks; | ||
| 3438 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3439 | std::addressof(num_allocator_blocks), address, size, test_state, test_state, | ||
| 3440 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 3441 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 3442 | |||
| 3443 | // Create an update allocator. | ||
| 3444 | Result allocator_result; | ||
| 3445 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 3446 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 3447 | R_TRY(allocator_result); | ||
| 3448 | |||
| 3449 | // Update the memory blocks. | ||
| 3450 | const KMemoryBlockManager::MemoryBlockLockFunction lock_func = | ||
| 3451 | m_enable_device_address_space_merge | ||
| 3452 | ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare | ||
| 3453 | : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; | ||
| 3454 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, | ||
| 3455 | KMemoryPermission::None); | ||
| 3456 | |||
| 3457 | R_SUCCEED(); | ||
| 3458 | } | ||
| 3459 | |||
| 3460 | Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) { | ||
| 3461 | // Lightly validate the range before doing anything else. | ||
| 3462 | const size_t num_pages = size / PageSize; | ||
| 3463 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 3464 | |||
| 3465 | // Lock the table. | ||
| 3466 | KScopedLightLock lk(m_general_lock); | ||
| 3467 | |||
| 3468 | // Check the memory state. | ||
| 3469 | size_t num_allocator_blocks; | ||
| 3470 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3471 | std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap, | ||
| 3472 | KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, | ||
| 3473 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 3474 | |||
| 3475 | // Create an update allocator. | ||
| 3476 | Result allocator_result; | ||
| 3477 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 3478 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 3479 | R_TRY(allocator_result); | ||
| 3480 | |||
| 3481 | // Update the memory blocks. | ||
| 3482 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | ||
| 3483 | &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); | ||
| 3484 | |||
| 3485 | R_SUCCEED(); | ||
| 3486 | } | ||
| 3487 | |||
| 3488 | Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) { | ||
| 3489 | // Lightly validate the range before doing anything else. | ||
| 3490 | const size_t num_pages = size / PageSize; | ||
| 3491 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 3492 | |||
| 3493 | // Lock the table. | ||
| 3494 | KScopedLightLock lk(m_general_lock); | ||
| 3495 | |||
| 3496 | // Check memory state. | ||
| 3497 | size_t allocator_num_blocks = 0; | ||
| 3498 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3499 | std::addressof(allocator_num_blocks), address, size, KMemoryState::FlagCanDeviceMap, | ||
| 3500 | KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, | ||
| 3501 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 3502 | |||
| 3503 | // Create an update allocator for the region. | ||
| 3504 | Result allocator_result; | ||
| 3505 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 3506 | m_memory_block_slab_manager, allocator_num_blocks); | ||
| 3507 | R_TRY(allocator_result); | ||
| 3508 | |||
| 3509 | // Update the memory blocks. | ||
| 3510 | m_memory_block_manager.UpdateLock( | ||
| 3511 | std::addressof(allocator), address, num_pages, | ||
| 3512 | m_enable_device_address_space_merge | ||
| 3513 | ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare | ||
| 3514 | : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight, | ||
| 3515 | KMemoryPermission::None); | ||
| 3516 | |||
| 3517 | R_SUCCEED(); | ||
| 3518 | } | ||
| 3519 | |||
| 3520 | Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out, | ||
| 3521 | KProcessAddress address, size_t size, | ||
| 3522 | KMemoryPermission perm, | ||
| 3523 | bool is_aligned) { | ||
| 3524 | // Lock the table. | ||
| 3525 | KScopedLightLock lk(m_general_lock); | ||
| 3526 | |||
| 3527 | // Get the range. | ||
| 3528 | const KMemoryState test_state = | ||
| 3529 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); | ||
| 3530 | R_TRY(this->GetContiguousMemoryRangeWithState( | ||
| 3531 | out, address, size, test_state, test_state, perm, perm, | ||
| 3532 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, KMemoryAttribute::None)); | ||
| 3533 | |||
| 3534 | // We got the range, so open it. | ||
| 3535 | out->Open(); | ||
| 3536 | |||
| 3537 | R_SUCCEED(); | ||
| 3538 | } | ||
| 3539 | |||
| 3540 | Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out, | ||
| 3541 | KProcessAddress address, | ||
| 3542 | size_t size) { | ||
| 3543 | // Lock the table. | ||
| 3544 | KScopedLightLock lk(m_general_lock); | ||
| 3545 | |||
| 3546 | // Get the range. | ||
| 3547 | R_TRY(this->GetContiguousMemoryRangeWithState( | ||
| 3548 | out, address, size, KMemoryState::FlagCanDeviceMap, KMemoryState::FlagCanDeviceMap, | ||
| 3549 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 3550 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 3551 | |||
| 3552 | // We got the range, so open it. | ||
| 3553 | out->Open(); | ||
| 3554 | |||
| 3555 | R_SUCCEED(); | ||
| 3556 | } | ||
| 3557 | |||
| 3558 | Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, | ||
| 3559 | size_t size) { | ||
| 3560 | R_RETURN(this->LockMemoryAndOpen( | ||
| 3561 | nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 3562 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All, | ||
| 3563 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, | ||
| 3564 | static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | | ||
| 3565 | KMemoryPermission::KernelReadWrite), | ||
| 3566 | KMemoryAttribute::Locked)); | ||
| 3567 | } | ||
| 3568 | |||
| 3569 | Result KPageTableBase::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) { | ||
| 3570 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 3571 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None, | ||
| 3572 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 3573 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 3574 | KMemoryAttribute::Locked, nullptr)); | ||
| 3575 | } | ||
| 3576 | |||
| 3577 | Result KPageTableBase::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, | ||
| 3578 | KMemoryPermission perm) { | ||
| 3579 | R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer, | ||
| 3580 | KMemoryState::FlagCanTransfer, KMemoryPermission::All, | ||
| 3581 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | ||
| 3582 | KMemoryAttribute::None, perm, KMemoryAttribute::Locked)); | ||
| 3583 | } | ||
| 3584 | |||
| 3585 | Result KPageTableBase::UnlockForTransferMemory(KProcessAddress address, size_t size, | ||
| 3586 | const KPageGroup& pg) { | ||
| 3587 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer, | ||
| 3588 | KMemoryState::FlagCanTransfer, KMemoryPermission::None, | ||
| 3589 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 3590 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 3591 | KMemoryAttribute::Locked, std::addressof(pg))); | ||
| 3592 | } | ||
| 3593 | |||
| 3594 | Result KPageTableBase::LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) { | ||
| 3595 | R_RETURN(this->LockMemoryAndOpen( | ||
| 3596 | out, nullptr, address, size, KMemoryState::FlagCanCodeMemory, | ||
| 3597 | KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 3598 | KMemoryAttribute::All, KMemoryAttribute::None, | ||
| 3599 | static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | | ||
| 3600 | KMemoryPermission::KernelReadWrite), | ||
| 3601 | KMemoryAttribute::Locked)); | ||
| 3602 | } | ||
| 3603 | |||
| 3604 | Result KPageTableBase::UnlockForCodeMemory(KProcessAddress address, size_t size, | ||
| 3605 | const KPageGroup& pg) { | ||
| 3606 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanCodeMemory, | ||
| 3607 | KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, | ||
| 3608 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 3609 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 3610 | KMemoryAttribute::Locked, std::addressof(pg))); | ||
| 3611 | } | ||
| 3612 | |||
| 3613 | Result KPageTableBase::OpenMemoryRangeForProcessCacheOperation(MemoryRange* out, | ||
| 3614 | KProcessAddress address, | ||
| 3615 | size_t size) { | ||
| 3616 | // Lock the table. | ||
| 3617 | KScopedLightLock lk(m_general_lock); | ||
| 3618 | |||
| 3619 | // Get the range. | ||
| 3620 | R_TRY(this->GetContiguousMemoryRangeWithState( | ||
| 3621 | out, address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted, | ||
| 3622 | KMemoryPermission::UserRead, KMemoryPermission::UserRead, KMemoryAttribute::Uncached, | ||
| 3623 | KMemoryAttribute::None)); | ||
| 3624 | |||
| 3625 | // We got the range, so open it. | ||
| 3626 | out->Open(); | ||
| 3627 | |||
| 3628 | R_SUCCEED(); | ||
| 3629 | } | ||
| 3630 | |||
| 3631 | Result KPageTableBase::CopyMemoryFromLinearToUser( | ||
| 3632 | KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask, | ||
| 3633 | KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask, | ||
| 3634 | KMemoryAttribute src_attr) { | ||
| 3635 | // Lightly validate the range before doing anything else. | ||
| 3636 | R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory); | ||
| 3637 | |||
| 3638 | // Get the destination memory reference. | ||
| 3639 | auto& dst_memory = GetCurrentMemory(m_kernel); | ||
| 3640 | |||
| 3641 | // Copy the memory. | ||
| 3642 | { | ||
| 3643 | // Lock the table. | ||
| 3644 | KScopedLightLock lk(m_general_lock); | ||
| 3645 | |||
| 3646 | // Check memory state. | ||
| 3647 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3648 | src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, | ||
| 3649 | src_attr_mask | KMemoryAttribute::Uncached, src_attr)); | ||
| 3650 | |||
| 3651 | auto& impl = this->GetImpl(); | ||
| 3652 | |||
| 3653 | // Begin traversal. | ||
| 3654 | TraversalContext context; | ||
| 3655 | TraversalEntry next_entry; | ||
| 3656 | bool traverse_valid = | ||
| 3657 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr); | ||
| 3658 | ASSERT(traverse_valid); | ||
| 3659 | |||
| 3660 | // Prepare tracking variables. | ||
| 3661 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 3662 | size_t cur_size = | ||
| 3663 | next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 3664 | size_t tot_size = cur_size; | ||
| 3665 | |||
| 3666 | auto PerformCopy = [&]() -> Result { | ||
| 3667 | // Ensure the address is linear mapped. | ||
| 3668 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3669 | |||
| 3670 | // Copy as much aligned data as we can. | ||
| 3671 | if (cur_size >= sizeof(u32)) { | ||
| 3672 | const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32)); | ||
| 3673 | R_UNLESS(dst_memory.WriteBlock(dst_addr, | ||
| 3674 | GetLinearMappedVirtualPointer(m_kernel, cur_addr), | ||
| 3675 | copy_size), | ||
| 3676 | ResultInvalidCurrentMemory); | ||
| 3677 | |||
| 3678 | dst_addr += copy_size; | ||
| 3679 | cur_addr += copy_size; | ||
| 3680 | cur_size -= copy_size; | ||
| 3681 | } | ||
| 3682 | |||
| 3683 | // Copy remaining data. | ||
| 3684 | if (cur_size > 0) { | ||
| 3685 | R_UNLESS(dst_memory.WriteBlock( | ||
| 3686 | dst_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size), | ||
| 3687 | ResultInvalidCurrentMemory); | ||
| 3688 | } | ||
| 3689 | |||
| 3690 | R_SUCCEED(); | ||
| 3691 | }; | ||
| 3692 | |||
| 3693 | // Iterate. | ||
| 3694 | while (tot_size < size) { | ||
| 3695 | // Continue the traversal. | ||
| 3696 | traverse_valid = | ||
| 3697 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 3698 | ASSERT(traverse_valid); | ||
| 3699 | |||
| 3700 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 3701 | // Perform copy. | ||
| 3702 | R_TRY(PerformCopy()); | ||
| 3703 | |||
| 3704 | // Advance. | ||
| 3705 | dst_addr += cur_size; | ||
| 3706 | |||
| 3707 | cur_addr = next_entry.phys_addr; | ||
| 3708 | cur_size = next_entry.block_size; | ||
| 3709 | } else { | ||
| 3710 | cur_size += next_entry.block_size; | ||
| 3711 | } | ||
| 3712 | |||
| 3713 | tot_size += next_entry.block_size; | ||
| 3714 | } | ||
| 3715 | |||
| 3716 | // Ensure we use the right size for the last block. | ||
| 3717 | if (tot_size > size) { | ||
| 3718 | cur_size -= (tot_size - size); | ||
| 3719 | } | ||
| 3720 | |||
| 3721 | // Perform copy for the last block. | ||
| 3722 | R_TRY(PerformCopy()); | ||
| 3723 | } | ||
| 3724 | |||
| 3725 | R_SUCCEED(); | ||
| 3726 | } | ||
| 3727 | |||
| 3728 | Result KPageTableBase::CopyMemoryFromLinearToKernel( | ||
| 3729 | void* buffer, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask, | ||
| 3730 | KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask, | ||
| 3731 | KMemoryAttribute src_attr) { | ||
| 3732 | // Lightly validate the range before doing anything else. | ||
| 3733 | R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory); | ||
| 3734 | |||
| 3735 | // Copy the memory. | ||
| 3736 | { | ||
| 3737 | // Lock the table. | ||
| 3738 | KScopedLightLock lk(m_general_lock); | ||
| 3739 | |||
| 3740 | // Check memory state. | ||
| 3741 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3742 | src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, | ||
| 3743 | src_attr_mask | KMemoryAttribute::Uncached, src_attr)); | ||
| 3744 | |||
| 3745 | auto& impl = this->GetImpl(); | ||
| 3746 | |||
| 3747 | // Begin traversal. | ||
| 3748 | TraversalContext context; | ||
| 3749 | TraversalEntry next_entry; | ||
| 3750 | bool traverse_valid = | ||
| 3751 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr); | ||
| 3752 | ASSERT(traverse_valid); | ||
| 3753 | |||
| 3754 | // Prepare tracking variables. | ||
| 3755 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 3756 | size_t cur_size = | ||
| 3757 | next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 3758 | size_t tot_size = cur_size; | ||
| 3759 | |||
| 3760 | auto PerformCopy = [&]() -> Result { | ||
| 3761 | // Ensure the address is linear mapped. | ||
| 3762 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3763 | |||
| 3764 | // Copy the data. | ||
| 3765 | std::memcpy(buffer, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size); | ||
| 3766 | |||
| 3767 | R_SUCCEED(); | ||
| 3768 | }; | ||
| 3769 | |||
| 3770 | // Iterate. | ||
| 3771 | while (tot_size < size) { | ||
| 3772 | // Continue the traversal. | ||
| 3773 | traverse_valid = | ||
| 3774 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 3775 | ASSERT(traverse_valid); | ||
| 3776 | |||
| 3777 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 3778 | // Perform copy. | ||
| 3779 | R_TRY(PerformCopy()); | ||
| 3780 | |||
| 3781 | // Advance. | ||
| 3782 | buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size); | ||
| 3783 | |||
| 3784 | cur_addr = next_entry.phys_addr; | ||
| 3785 | cur_size = next_entry.block_size; | ||
| 3786 | } else { | ||
| 3787 | cur_size += next_entry.block_size; | ||
| 3788 | } | ||
| 3789 | |||
| 3790 | tot_size += next_entry.block_size; | ||
| 3791 | } | ||
| 3792 | |||
| 3793 | // Ensure we use the right size for the last block. | ||
| 3794 | if (tot_size > size) { | ||
| 3795 | cur_size -= (tot_size - size); | ||
| 3796 | } | ||
| 3797 | |||
| 3798 | // Perform copy for the last block. | ||
| 3799 | R_TRY(PerformCopy()); | ||
| 3800 | } | ||
| 3801 | |||
| 3802 | R_SUCCEED(); | ||
| 3803 | } | ||
| 3804 | |||
| 3805 | Result KPageTableBase::CopyMemoryFromUserToLinear( | ||
| 3806 | KProcessAddress dst_addr, size_t size, KMemoryState dst_state_mask, KMemoryState dst_state, | ||
| 3807 | KMemoryPermission dst_test_perm, KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 3808 | KProcessAddress src_addr) { | ||
| 3809 | // Lightly validate the range before doing anything else. | ||
| 3810 | R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory); | ||
| 3811 | |||
| 3812 | // Get the source memory reference. | ||
| 3813 | auto& src_memory = GetCurrentMemory(m_kernel); | ||
| 3814 | |||
| 3815 | // Copy the memory. | ||
| 3816 | { | ||
| 3817 | // Lock the table. | ||
| 3818 | KScopedLightLock lk(m_general_lock); | ||
| 3819 | |||
| 3820 | // Check memory state. | ||
| 3821 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3822 | dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, | ||
| 3823 | dst_attr_mask | KMemoryAttribute::Uncached, dst_attr)); | ||
| 3824 | |||
| 3825 | auto& impl = this->GetImpl(); | ||
| 3826 | |||
| 3827 | // Begin traversal. | ||
| 3828 | TraversalContext context; | ||
| 3829 | TraversalEntry next_entry; | ||
| 3830 | bool traverse_valid = | ||
| 3831 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr); | ||
| 3832 | ASSERT(traverse_valid); | ||
| 3833 | |||
| 3834 | // Prepare tracking variables. | ||
| 3835 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 3836 | size_t cur_size = | ||
| 3837 | next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 3838 | size_t tot_size = cur_size; | ||
| 3839 | |||
| 3840 | auto PerformCopy = [&]() -> Result { | ||
| 3841 | // Ensure the address is linear mapped. | ||
| 3842 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3843 | |||
| 3844 | // Copy as much aligned data as we can. | ||
| 3845 | if (cur_size >= sizeof(u32)) { | ||
| 3846 | const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32)); | ||
| 3847 | R_UNLESS(src_memory.ReadBlock(src_addr, | ||
| 3848 | GetLinearMappedVirtualPointer(m_kernel, cur_addr), | ||
| 3849 | copy_size), | ||
| 3850 | ResultInvalidCurrentMemory); | ||
| 3851 | src_addr += copy_size; | ||
| 3852 | cur_addr += copy_size; | ||
| 3853 | cur_size -= copy_size; | ||
| 3854 | } | ||
| 3855 | |||
| 3856 | // Copy remaining data. | ||
| 3857 | if (cur_size > 0) { | ||
| 3858 | R_UNLESS(src_memory.ReadBlock( | ||
| 3859 | src_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size), | ||
| 3860 | ResultInvalidCurrentMemory); | ||
| 3861 | } | ||
| 3862 | |||
| 3863 | R_SUCCEED(); | ||
| 3864 | }; | ||
| 3865 | |||
| 3866 | // Iterate. | ||
| 3867 | while (tot_size < size) { | ||
| 3868 | // Continue the traversal. | ||
| 3869 | traverse_valid = | ||
| 3870 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 3871 | ASSERT(traverse_valid); | ||
| 3872 | |||
| 3873 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 3874 | // Perform copy. | ||
| 3875 | R_TRY(PerformCopy()); | ||
| 3876 | |||
| 3877 | // Advance. | ||
| 3878 | src_addr += cur_size; | ||
| 3879 | |||
| 3880 | cur_addr = next_entry.phys_addr; | ||
| 3881 | cur_size = next_entry.block_size; | ||
| 3882 | } else { | ||
| 3883 | cur_size += next_entry.block_size; | ||
| 3884 | } | ||
| 3885 | |||
| 3886 | tot_size += next_entry.block_size; | ||
| 3887 | } | ||
| 3888 | |||
| 3889 | // Ensure we use the right size for the last block. | ||
| 3890 | if (tot_size > size) { | ||
| 3891 | cur_size -= (tot_size - size); | ||
| 3892 | } | ||
| 3893 | |||
| 3894 | // Perform copy for the last block. | ||
| 3895 | R_TRY(PerformCopy()); | ||
| 3896 | } | ||
| 3897 | |||
| 3898 | R_SUCCEED(); | ||
| 3899 | } | ||
| 3900 | |||
| 3901 | Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, | ||
| 3902 | KMemoryState dst_state_mask, | ||
| 3903 | KMemoryState dst_state, | ||
| 3904 | KMemoryPermission dst_test_perm, | ||
| 3905 | KMemoryAttribute dst_attr_mask, | ||
| 3906 | KMemoryAttribute dst_attr, void* buffer) { | ||
| 3907 | // Lightly validate the range before doing anything else. | ||
| 3908 | R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory); | ||
| 3909 | |||
| 3910 | // Copy the memory. | ||
| 3911 | { | ||
| 3912 | // Lock the table. | ||
| 3913 | KScopedLightLock lk(m_general_lock); | ||
| 3914 | |||
| 3915 | // Check memory state. | ||
| 3916 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3917 | dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, | ||
| 3918 | dst_attr_mask | KMemoryAttribute::Uncached, dst_attr)); | ||
| 3919 | |||
| 3920 | auto& impl = this->GetImpl(); | ||
| 3921 | |||
| 3922 | // Begin traversal. | ||
| 3923 | TraversalContext context; | ||
| 3924 | TraversalEntry next_entry; | ||
| 3925 | bool traverse_valid = | ||
| 3926 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr); | ||
| 3927 | ASSERT(traverse_valid); | ||
| 3928 | |||
| 3929 | // Prepare tracking variables. | ||
| 3930 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 3931 | size_t cur_size = | ||
| 3932 | next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 3933 | size_t tot_size = cur_size; | ||
| 3934 | |||
| 3935 | auto PerformCopy = [&]() -> Result { | ||
| 3936 | // Ensure the address is linear mapped. | ||
| 3937 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3938 | |||
| 3939 | // Copy the data. | ||
| 3940 | std::memcpy(GetLinearMappedVirtualPointer(m_kernel, cur_addr), buffer, cur_size); | ||
| 3941 | |||
| 3942 | R_SUCCEED(); | ||
| 3943 | }; | ||
| 3944 | |||
| 3945 | // Iterate. | ||
| 3946 | while (tot_size < size) { | ||
| 3947 | // Continue the traversal. | ||
| 3948 | traverse_valid = | ||
| 3949 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 3950 | ASSERT(traverse_valid); | ||
| 3951 | |||
| 3952 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 3953 | // Perform copy. | ||
| 3954 | R_TRY(PerformCopy()); | ||
| 3955 | |||
| 3956 | // Advance. | ||
| 3957 | buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size); | ||
| 3958 | |||
| 3959 | cur_addr = next_entry.phys_addr; | ||
| 3960 | cur_size = next_entry.block_size; | ||
| 3961 | } else { | ||
| 3962 | cur_size += next_entry.block_size; | ||
| 3963 | } | ||
| 3964 | |||
| 3965 | tot_size += next_entry.block_size; | ||
| 3966 | } | ||
| 3967 | |||
| 3968 | // Ensure we use the right size for the last block. | ||
| 3969 | if (tot_size > size) { | ||
| 3970 | cur_size -= (tot_size - size); | ||
| 3971 | } | ||
| 3972 | |||
| 3973 | // Perform copy for the last block. | ||
| 3974 | R_TRY(PerformCopy()); | ||
| 3975 | } | ||
| 3976 | |||
| 3977 | R_SUCCEED(); | ||
| 3978 | } | ||
| 3979 | |||
| 3980 | Result KPageTableBase::CopyMemoryFromHeapToHeap( | ||
| 3981 | KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size, | ||
| 3982 | KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm, | ||
| 3983 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr, | ||
| 3984 | KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 3985 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { | ||
| 3986 | // For convenience, alias this. | ||
| 3987 | KPageTableBase& src_page_table = *this; | ||
| 3988 | |||
| 3989 | // Lightly validate the ranges before doing anything else. | ||
| 3990 | R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory); | ||
| 3991 | R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory); | ||
| 3992 | |||
| 3993 | // Copy the memory. | ||
| 3994 | { | ||
| 3995 | // Acquire the table locks. | ||
| 3996 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 3997 | |||
| 3998 | // Check memory state. | ||
| 3999 | R_TRY(src_page_table.CheckMemoryStateContiguous( | ||
| 4000 | src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, | ||
| 4001 | src_attr_mask | KMemoryAttribute::Uncached, src_attr)); | ||
| 4002 | R_TRY(dst_page_table.CheckMemoryStateContiguous( | ||
| 4003 | dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, | ||
| 4004 | dst_attr_mask | KMemoryAttribute::Uncached, dst_attr)); | ||
| 4005 | |||
| 4006 | // Get implementations. | ||
| 4007 | auto& src_impl = src_page_table.GetImpl(); | ||
| 4008 | auto& dst_impl = dst_page_table.GetImpl(); | ||
| 4009 | |||
| 4010 | // Prepare for traversal. | ||
| 4011 | TraversalContext src_context; | ||
| 4012 | TraversalContext dst_context; | ||
| 4013 | TraversalEntry src_next_entry; | ||
| 4014 | TraversalEntry dst_next_entry; | ||
| 4015 | bool traverse_valid; | ||
| 4016 | |||
| 4017 | // Begin traversal. | ||
| 4018 | traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry), | ||
| 4019 | std::addressof(src_context), src_addr); | ||
| 4020 | ASSERT(traverse_valid); | ||
| 4021 | traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry), | ||
| 4022 | std::addressof(dst_context), dst_addr); | ||
| 4023 | ASSERT(traverse_valid); | ||
| 4024 | |||
| 4025 | // Prepare tracking variables. | ||
| 4026 | KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr; | ||
| 4027 | KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr; | ||
| 4028 | size_t cur_src_size = src_next_entry.block_size - | ||
| 4029 | (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1)); | ||
| 4030 | size_t cur_dst_size = dst_next_entry.block_size - | ||
| 4031 | (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1)); | ||
| 4032 | |||
| 4033 | // Adjust the initial block sizes. | ||
| 4034 | src_next_entry.block_size = cur_src_size; | ||
| 4035 | dst_next_entry.block_size = cur_dst_size; | ||
| 4036 | |||
| 4037 | // Before we get any crazier, succeed if there's nothing to do. | ||
| 4038 | R_SUCCEED_IF(size == 0); | ||
| 4039 | |||
| 4040 | // We're going to manage dual traversal via an offset against the total size. | ||
| 4041 | KPhysicalAddress cur_src_addr = cur_src_block_addr; | ||
| 4042 | KPhysicalAddress cur_dst_addr = cur_dst_block_addr; | ||
| 4043 | size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size); | ||
| 4044 | |||
| 4045 | // Iterate. | ||
| 4046 | size_t ofs = 0; | ||
| 4047 | while (ofs < size) { | ||
| 4048 | // Determine how much we can copy this iteration. | ||
| 4049 | const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs); | ||
| 4050 | |||
| 4051 | // If we need to advance the traversals, do so. | ||
| 4052 | bool updated_src = false, updated_dst = false, skip_copy = false; | ||
| 4053 | if (ofs + cur_copy_size != size) { | ||
| 4054 | if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) { | ||
| 4055 | // Continue the src traversal. | ||
| 4056 | traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry), | ||
| 4057 | std::addressof(src_context)); | ||
| 4058 | ASSERT(traverse_valid); | ||
| 4059 | |||
| 4060 | // Update source. | ||
| 4061 | updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr; | ||
| 4062 | } | ||
| 4063 | |||
| 4064 | if (cur_dst_addr + cur_min_size == | ||
| 4065 | dst_next_entry.phys_addr + dst_next_entry.block_size) { | ||
| 4066 | // Continue the dst traversal. | ||
| 4067 | traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry), | ||
| 4068 | std::addressof(dst_context)); | ||
| 4069 | ASSERT(traverse_valid); | ||
| 4070 | |||
| 4071 | // Update destination. | ||
| 4072 | updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr; | ||
| 4073 | } | ||
| 4074 | |||
| 4075 | // If we didn't update either of source/destination, skip the copy this iteration. | ||
| 4076 | if (!updated_src && !updated_dst) { | ||
| 4077 | skip_copy = true; | ||
| 4078 | |||
| 4079 | // Update the source block address. | ||
| 4080 | cur_src_block_addr = src_next_entry.phys_addr; | ||
| 4081 | } | ||
| 4082 | } | ||
| 4083 | |||
| 4084 | // Do the copy, unless we're skipping it. | ||
| 4085 | if (!skip_copy) { | ||
| 4086 | // We need both ends of the copy to be heap blocks. | ||
| 4087 | R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory); | ||
| 4088 | R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory); | ||
| 4089 | |||
| 4090 | // Copy the data. | ||
| 4091 | std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr), | ||
| 4092 | GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size); | ||
| 4093 | |||
| 4094 | // Update. | ||
| 4095 | cur_src_block_addr = src_next_entry.phys_addr; | ||
| 4096 | cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size; | ||
| 4097 | cur_dst_block_addr = dst_next_entry.phys_addr; | ||
| 4098 | cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size; | ||
| 4099 | |||
| 4100 | // Advance offset. | ||
| 4101 | ofs += cur_copy_size; | ||
| 4102 | } | ||
| 4103 | |||
| 4104 | // Update min size. | ||
| 4105 | cur_src_size = src_next_entry.block_size; | ||
| 4106 | cur_dst_size = dst_next_entry.block_size; | ||
| 4107 | cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size, | ||
| 4108 | cur_dst_block_addr - cur_dst_addr + cur_dst_size); | ||
| 4109 | } | ||
| 4110 | } | ||
| 4111 | |||
| 4112 | R_SUCCEED(); | ||
| 4113 | } | ||
| 4114 | |||
| 4115 | Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination( | ||
| 4116 | KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size, | ||
| 4117 | KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm, | ||
| 4118 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr, | ||
| 4119 | KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 4120 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { | ||
| 4121 | // For convenience, alias this. | ||
| 4122 | KPageTableBase& src_page_table = *this; | ||
| 4123 | |||
| 4124 | // Lightly validate the ranges before doing anything else. | ||
| 4125 | R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory); | ||
| 4126 | R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory); | ||
| 4127 | |||
| 4128 | // Copy the memory. | ||
| 4129 | { | ||
| 4130 | // Acquire the table locks. | ||
| 4131 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 4132 | |||
| 4133 | // Check memory state for source. | ||
| 4134 | R_TRY(src_page_table.CheckMemoryStateContiguous( | ||
| 4135 | src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, | ||
| 4136 | src_attr_mask | KMemoryAttribute::Uncached, src_attr)); | ||
| 4137 | |||
| 4138 | // Destination state is intentionally unchecked. | ||
| 4139 | |||
| 4140 | // Get implementations. | ||
| 4141 | auto& src_impl = src_page_table.GetImpl(); | ||
| 4142 | auto& dst_impl = dst_page_table.GetImpl(); | ||
| 4143 | |||
| 4144 | // Prepare for traversal. | ||
| 4145 | TraversalContext src_context; | ||
| 4146 | TraversalContext dst_context; | ||
| 4147 | TraversalEntry src_next_entry; | ||
| 4148 | TraversalEntry dst_next_entry; | ||
| 4149 | bool traverse_valid; | ||
| 4150 | |||
| 4151 | // Begin traversal. | ||
| 4152 | traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry), | ||
| 4153 | std::addressof(src_context), src_addr); | ||
| 4154 | ASSERT(traverse_valid); | ||
| 4155 | traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry), | ||
| 4156 | std::addressof(dst_context), dst_addr); | ||
| 4157 | ASSERT(traverse_valid); | ||
| 4158 | |||
| 4159 | // Prepare tracking variables. | ||
| 4160 | KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr; | ||
| 4161 | KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr; | ||
| 4162 | size_t cur_src_size = src_next_entry.block_size - | ||
| 4163 | (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1)); | ||
| 4164 | size_t cur_dst_size = dst_next_entry.block_size - | ||
| 4165 | (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1)); | ||
| 4166 | |||
| 4167 | // Adjust the initial block sizes. | ||
| 4168 | src_next_entry.block_size = cur_src_size; | ||
| 4169 | dst_next_entry.block_size = cur_dst_size; | ||
| 4170 | |||
| 4171 | // Before we get any crazier, succeed if there's nothing to do. | ||
| 4172 | R_SUCCEED_IF(size == 0); | ||
| 4173 | |||
| 4174 | // We're going to manage dual traversal via an offset against the total size. | ||
| 4175 | KPhysicalAddress cur_src_addr = cur_src_block_addr; | ||
| 4176 | KPhysicalAddress cur_dst_addr = cur_dst_block_addr; | ||
| 4177 | size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size); | ||
| 4178 | |||
| 4179 | // Iterate. | ||
| 4180 | size_t ofs = 0; | ||
| 4181 | while (ofs < size) { | ||
| 4182 | // Determine how much we can copy this iteration. | ||
| 4183 | const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs); | ||
| 4184 | |||
| 4185 | // If we need to advance the traversals, do so. | ||
| 4186 | bool updated_src = false, updated_dst = false, skip_copy = false; | ||
| 4187 | if (ofs + cur_copy_size != size) { | ||
| 4188 | if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) { | ||
| 4189 | // Continue the src traversal. | ||
| 4190 | traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry), | ||
| 4191 | std::addressof(src_context)); | ||
| 4192 | ASSERT(traverse_valid); | ||
| 4193 | |||
| 4194 | // Update source. | ||
| 4195 | updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr; | ||
| 4196 | } | ||
| 4197 | |||
| 4198 | if (cur_dst_addr + cur_min_size == | ||
| 4199 | dst_next_entry.phys_addr + dst_next_entry.block_size) { | ||
| 4200 | // Continue the dst traversal. | ||
| 4201 | traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry), | ||
| 4202 | std::addressof(dst_context)); | ||
| 4203 | ASSERT(traverse_valid); | ||
| 4204 | |||
| 4205 | // Update destination. | ||
| 4206 | updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr; | ||
| 4207 | } | ||
| 4208 | |||
| 4209 | // If we didn't update either of source/destination, skip the copy this iteration. | ||
| 4210 | if (!updated_src && !updated_dst) { | ||
| 4211 | skip_copy = true; | ||
| 4212 | |||
| 4213 | // Update the source block address. | ||
| 4214 | cur_src_block_addr = src_next_entry.phys_addr; | ||
| 4215 | } | ||
| 4216 | } | ||
| 4217 | |||
| 4218 | // Do the copy, unless we're skipping it. | ||
| 4219 | if (!skip_copy) { | ||
| 4220 | // We need both ends of the copy to be heap blocks. | ||
| 4221 | R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory); | ||
| 4222 | R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory); | ||
| 4223 | |||
| 4224 | // Copy the data. | ||
| 4225 | std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr), | ||
| 4226 | GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size); | ||
| 4227 | |||
| 4228 | // Update. | ||
| 4229 | cur_src_block_addr = src_next_entry.phys_addr; | ||
| 4230 | cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size; | ||
| 4231 | cur_dst_block_addr = dst_next_entry.phys_addr; | ||
| 4232 | cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size; | ||
| 4233 | |||
| 4234 | // Advance offset. | ||
| 4235 | ofs += cur_copy_size; | ||
| 4236 | } | ||
| 4237 | |||
| 4238 | // Update min size. | ||
| 4239 | cur_src_size = src_next_entry.block_size; | ||
| 4240 | cur_dst_size = dst_next_entry.block_size; | ||
| 4241 | cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size, | ||
| 4242 | cur_dst_block_addr - cur_dst_addr + cur_dst_size); | ||
| 4243 | } | ||
| 4244 | } | ||
| 4245 | |||
| 4246 | R_SUCCEED(); | ||
| 4247 | } | ||
| 4248 | |||
| 4249 | Result KPageTableBase::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, | ||
| 4250 | KProcessAddress address, size_t size, | ||
| 4251 | KMemoryPermission test_perm, KMemoryState dst_state) { | ||
| 4252 | // Validate pre-conditions. | ||
| 4253 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 4254 | ASSERT(test_perm == KMemoryPermission::UserReadWrite || | ||
| 4255 | test_perm == KMemoryPermission::UserRead); | ||
| 4256 | |||
| 4257 | // Check that the address is in range. | ||
| 4258 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 4259 | |||
| 4260 | // Get the source permission. | ||
| 4261 | const auto src_perm = static_cast<KMemoryPermission>( | ||
| 4262 | (test_perm == KMemoryPermission::UserReadWrite) | ||
| 4263 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 4264 | : KMemoryPermission::UserRead); | ||
| 4265 | |||
| 4266 | // Get aligned extents. | ||
| 4267 | const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize); | ||
| 4268 | const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize); | ||
| 4269 | const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize); | ||
| 4270 | const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize); | ||
| 4271 | |||
| 4272 | const auto aligned_src_last = GetInteger(aligned_src_end) - 1; | ||
| 4273 | const auto mapping_src_last = GetInteger(mapping_src_end) - 1; | ||
| 4274 | |||
| 4275 | // Get the test state and attribute mask. | ||
| 4276 | KMemoryState test_state; | ||
| 4277 | KMemoryAttribute test_attr_mask; | ||
| 4278 | switch (dst_state) { | ||
| 4279 | case KMemoryState::Ipc: | ||
| 4280 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 4281 | test_attr_mask = | ||
| 4282 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 4283 | break; | ||
| 4284 | case KMemoryState::NonSecureIpc: | ||
| 4285 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 4286 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 4287 | break; | ||
| 4288 | case KMemoryState::NonDeviceIpc: | ||
| 4289 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 4290 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 4291 | break; | ||
| 4292 | default: | ||
| 4293 | R_THROW(ResultInvalidCombination); | ||
| 4294 | } | ||
| 4295 | |||
| 4296 | // Ensure that on failure, we roll back appropriately. | ||
| 4297 | size_t mapped_size = 0; | ||
| 4298 | ON_RESULT_FAILURE { | ||
| 4299 | if (mapped_size > 0) { | ||
| 4300 | this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, | ||
| 4301 | src_perm); | ||
| 4302 | } | ||
| 4303 | }; | ||
| 4304 | |||
| 4305 | size_t blocks_needed = 0; | ||
| 4306 | |||
| 4307 | // Iterate, mapping as needed. | ||
| 4308 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start); | ||
| 4309 | while (true) { | ||
| 4310 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 4311 | |||
| 4312 | // Validate the current block. | ||
| 4313 | R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, | ||
| 4314 | test_attr_mask, KMemoryAttribute::None)); | ||
| 4315 | |||
| 4316 | if (mapping_src_start < mapping_src_end && | ||
| 4317 | GetInteger(mapping_src_start) < info.GetEndAddress() && | ||
| 4318 | info.GetAddress() < GetInteger(mapping_src_end)) { | ||
| 4319 | const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start) | ||
| 4320 | ? info.GetAddress() | ||
| 4321 | : GetInteger(mapping_src_start); | ||
| 4322 | const auto cur_end = mapping_src_last >= info.GetLastAddress() | ||
| 4323 | ? info.GetEndAddress() | ||
| 4324 | : GetInteger(mapping_src_end); | ||
| 4325 | const size_t cur_size = cur_end - cur_start; | ||
| 4326 | |||
| 4327 | if (info.GetAddress() < GetInteger(mapping_src_start)) { | ||
| 4328 | ++blocks_needed; | ||
| 4329 | } | ||
| 4330 | if (mapping_src_last < info.GetLastAddress()) { | ||
| 4331 | ++blocks_needed; | ||
| 4332 | } | ||
| 4333 | |||
| 4334 | // Set the permissions on the block, if we need to. | ||
| 4335 | if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) { | ||
| 4336 | const DisableMergeAttribute head_body_attr = | ||
| 4337 | (GetInteger(mapping_src_start) >= info.GetAddress()) | ||
| 4338 | ? DisableMergeAttribute::DisableHeadAndBody | ||
| 4339 | : DisableMergeAttribute::None; | ||
| 4340 | const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end)) | ||
| 4341 | ? DisableMergeAttribute::DisableTail | ||
| 4342 | : DisableMergeAttribute::None; | ||
| 4343 | const KPageProperties properties = { | ||
| 4344 | src_perm, false, false, | ||
| 4345 | static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)}; | ||
| 4346 | R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, 0, false, properties, | ||
| 4347 | OperationType::ChangePermissions, false)); | ||
| 4348 | } | ||
| 4349 | |||
| 4350 | // Note that we mapped this part. | ||
| 4351 | mapped_size += cur_size; | ||
| 4352 | } | ||
| 4353 | |||
| 4354 | // If the block is at the end, we're done. | ||
| 4355 | if (aligned_src_last <= info.GetLastAddress()) { | ||
| 4356 | break; | ||
| 4357 | } | ||
| 4358 | |||
| 4359 | // Advance. | ||
| 4360 | ++it; | ||
| 4361 | ASSERT(it != m_memory_block_manager.end()); | ||
| 4362 | } | ||
| 4363 | |||
| 4364 | if (out_blocks_needed != nullptr) { | ||
| 4365 | ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 4366 | *out_blocks_needed = blocks_needed; | ||
| 4367 | } | ||
| 4368 | |||
| 4369 | R_SUCCEED(); | ||
| 4370 | } | ||
| 4371 | |||
| 4372 | Result KPageTableBase::SetupForIpcServer(KProcessAddress* out_addr, size_t size, | ||
| 4373 | KProcessAddress src_addr, KMemoryPermission test_perm, | ||
| 4374 | KMemoryState dst_state, KPageTableBase& src_page_table, | ||
| 4375 | bool send) { | ||
| 4376 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 4377 | ASSERT(src_page_table.IsLockedByCurrentThread()); | ||
| 4378 | |||
| 4379 | // Check that we can theoretically map. | ||
| 4380 | const KProcessAddress region_start = m_alias_region_start; | ||
| 4381 | const size_t region_size = m_alias_region_end - m_alias_region_start; | ||
| 4382 | R_UNLESS(size < region_size, ResultOutOfAddressSpace); | ||
| 4383 | |||
| 4384 | // Get aligned source extents. | ||
| 4385 | const KProcessAddress src_start = src_addr; | ||
| 4386 | const KProcessAddress src_end = src_addr + size; | ||
| 4387 | const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize); | ||
| 4388 | const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize); | ||
| 4389 | const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize); | ||
| 4390 | const KProcessAddress mapping_src_end = | ||
| 4391 | Common::AlignDown(GetInteger(src_start) + size, PageSize); | ||
| 4392 | const size_t aligned_src_size = aligned_src_end - aligned_src_start; | ||
| 4393 | const size_t mapping_src_size = | ||
| 4394 | (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0; | ||
| 4395 | |||
| 4396 | // Select a random address to map at. | ||
| 4397 | KProcessAddress dst_addr = 0; | ||
| 4398 | { | ||
| 4399 | const size_t alignment = 4_KiB; | ||
| 4400 | const size_t offset = GetInteger(aligned_src_start) & (alignment - 1); | ||
| 4401 | |||
| 4402 | dst_addr = | ||
| 4403 | this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, | ||
| 4404 | alignment, offset, this->GetNumGuardPages()); | ||
| 4405 | R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace); | ||
| 4406 | } | ||
| 4407 | |||
| 4408 | // Check that we can perform the operation we're about to perform. | ||
| 4409 | ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state)); | ||
| 4410 | |||
| 4411 | // Create an update allocator. | ||
| 4412 | Result allocator_result; | ||
| 4413 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 4414 | m_memory_block_slab_manager); | ||
| 4415 | R_TRY(allocator_result); | ||
| 4416 | |||
| 4417 | // We're going to perform an update, so create a helper. | ||
| 4418 | KScopedPageTableUpdater updater(this); | ||
| 4419 | |||
| 4420 | // Reserve space for any partial pages we allocate. | ||
| 4421 | const size_t unmapped_size = aligned_src_size - mapping_src_size; | ||
| 4422 | KScopedResourceReservation memory_reservation( | ||
| 4423 | m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, unmapped_size); | ||
| 4424 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 4425 | |||
| 4426 | // Ensure that we manage page references correctly. | ||
| 4427 | KPhysicalAddress start_partial_page = 0; | ||
| 4428 | KPhysicalAddress end_partial_page = 0; | ||
| 4429 | KProcessAddress cur_mapped_addr = dst_addr; | ||
| 4430 | |||
| 4431 | // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll | ||
| 4432 | // free on scope exit. | ||
| 4433 | SCOPE_EXIT({ | ||
| 4434 | if (start_partial_page != 0) { | ||
| 4435 | m_kernel.MemoryManager().Close(start_partial_page, 1); | ||
| 4436 | } | ||
| 4437 | if (end_partial_page != 0) { | ||
| 4438 | m_kernel.MemoryManager().Close(end_partial_page, 1); | ||
| 4439 | } | ||
| 4440 | }); | ||
| 4441 | |||
| 4442 | ON_RESULT_FAILURE { | ||
| 4443 | if (cur_mapped_addr != dst_addr) { | ||
| 4444 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 4445 | DisableMergeAttribute::None}; | ||
| 4446 | R_ASSERT(this->Operate(updater.GetPageList(), dst_addr, | ||
| 4447 | (cur_mapped_addr - dst_addr) / PageSize, 0, false, | ||
| 4448 | unmap_properties, OperationType::Unmap, true)); | ||
| 4449 | } | ||
| 4450 | }; | ||
| 4451 | |||
| 4452 | // Allocate the start page as needed. | ||
| 4453 | if (aligned_src_start < mapping_src_start) { | ||
| 4454 | start_partial_page = | ||
| 4455 | m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 4456 | R_UNLESS(start_partial_page != 0, ResultOutOfMemory); | ||
| 4457 | } | ||
| 4458 | |||
| 4459 | // Allocate the end page as needed. | ||
| 4460 | if (mapping_src_end < aligned_src_end && | ||
| 4461 | (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { | ||
| 4462 | end_partial_page = | ||
| 4463 | m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 4464 | R_UNLESS(end_partial_page != 0, ResultOutOfMemory); | ||
| 4465 | } | ||
| 4466 | |||
| 4467 | // Get the implementation. | ||
| 4468 | auto& src_impl = src_page_table.GetImpl(); | ||
| 4469 | |||
| 4470 | // Get the fill value for partial pages. | ||
| 4471 | const auto fill_val = m_ipc_fill_value; | ||
| 4472 | |||
| 4473 | // Begin traversal. | ||
| 4474 | TraversalContext context; | ||
| 4475 | TraversalEntry next_entry; | ||
| 4476 | bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry), | ||
| 4477 | std::addressof(context), aligned_src_start); | ||
| 4478 | ASSERT(traverse_valid); | ||
| 4479 | |||
| 4480 | // Prepare tracking variables. | ||
| 4481 | KPhysicalAddress cur_block_addr = next_entry.phys_addr; | ||
| 4482 | size_t cur_block_size = | ||
| 4483 | next_entry.block_size - (GetInteger(cur_block_addr) & (next_entry.block_size - 1)); | ||
| 4484 | size_t tot_block_size = cur_block_size; | ||
| 4485 | |||
| 4486 | // Map the start page, if we have one. | ||
| 4487 | if (start_partial_page != 0) { | ||
| 4488 | // Ensure the page holds correct data. | ||
| 4489 | u8* const start_partial_virt = GetHeapVirtualPointer(m_kernel, start_partial_page); | ||
| 4490 | if (send) { | ||
| 4491 | const size_t partial_offset = src_start - aligned_src_start; | ||
| 4492 | size_t copy_size, clear_size; | ||
| 4493 | if (src_end < mapping_src_start) { | ||
| 4494 | copy_size = size; | ||
| 4495 | clear_size = mapping_src_start - src_end; | ||
| 4496 | } else { | ||
| 4497 | copy_size = mapping_src_start - src_start; | ||
| 4498 | clear_size = 0; | ||
| 4499 | } | ||
| 4500 | |||
| 4501 | std::memset(start_partial_virt, fill_val, partial_offset); | ||
| 4502 | std::memcpy(start_partial_virt + partial_offset, | ||
| 4503 | GetHeapVirtualPointer(m_kernel, cur_block_addr) + partial_offset, | ||
| 4504 | copy_size); | ||
| 4505 | if (clear_size > 0) { | ||
| 4506 | std::memset(start_partial_virt + partial_offset + copy_size, fill_val, clear_size); | ||
| 4507 | } | ||
| 4508 | } else { | ||
| 4509 | std::memset(start_partial_virt, fill_val, PageSize); | ||
| 4510 | } | ||
| 4511 | |||
| 4512 | // Map the page. | ||
| 4513 | const KPageProperties start_map_properties = {test_perm, false, false, | ||
| 4514 | DisableMergeAttribute::DisableHead}; | ||
| 4515 | R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, start_partial_page, true, | ||
| 4516 | start_map_properties, OperationType::Map, false)); | ||
| 4517 | |||
| 4518 | // Update tracking extents. | ||
| 4519 | cur_mapped_addr += PageSize; | ||
| 4520 | cur_block_addr += PageSize; | ||
| 4521 | cur_block_size -= PageSize; | ||
| 4522 | |||
| 4523 | // If the block's size was one page, we may need to continue traversal. | ||
| 4524 | if (cur_block_size == 0 && aligned_src_size > PageSize) { | ||
| 4525 | traverse_valid = | ||
| 4526 | src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 4527 | ASSERT(traverse_valid); | ||
| 4528 | |||
| 4529 | cur_block_addr = next_entry.phys_addr; | ||
| 4530 | cur_block_size = next_entry.block_size; | ||
| 4531 | tot_block_size += next_entry.block_size; | ||
| 4532 | } | ||
| 4533 | } | ||
| 4534 | |||
| 4535 | // Map the remaining pages. | ||
| 4536 | while (aligned_src_start + tot_block_size < mapping_src_end) { | ||
| 4537 | // Continue the traversal. | ||
| 4538 | traverse_valid = | ||
| 4539 | src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 4540 | ASSERT(traverse_valid); | ||
| 4541 | |||
| 4542 | // Process the block. | ||
| 4543 | if (next_entry.phys_addr != cur_block_addr + cur_block_size) { | ||
| 4544 | // Map the block we've been processing so far. | ||
| 4545 | const KPageProperties map_properties = {test_perm, false, false, | ||
| 4546 | (cur_mapped_addr == dst_addr) | ||
| 4547 | ? DisableMergeAttribute::DisableHead | ||
| 4548 | : DisableMergeAttribute::None}; | ||
| 4549 | R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, cur_block_size / PageSize, | ||
| 4550 | cur_block_addr, true, map_properties, OperationType::Map, false)); | ||
| 4551 | |||
| 4552 | // Update tracking extents. | ||
| 4553 | cur_mapped_addr += cur_block_size; | ||
| 4554 | cur_block_addr = next_entry.phys_addr; | ||
| 4555 | cur_block_size = next_entry.block_size; | ||
| 4556 | } else { | ||
| 4557 | cur_block_size += next_entry.block_size; | ||
| 4558 | } | ||
| 4559 | tot_block_size += next_entry.block_size; | ||
| 4560 | } | ||
| 4561 | |||
| 4562 | // Handle the last direct-mapped page. | ||
| 4563 | if (const KProcessAddress mapped_block_end = | ||
| 4564 | aligned_src_start + tot_block_size - cur_block_size; | ||
| 4565 | mapped_block_end < mapping_src_end) { | ||
| 4566 | const size_t last_block_size = mapping_src_end - mapped_block_end; | ||
| 4567 | |||
| 4568 | // Map the last block. | ||
| 4569 | const KPageProperties map_properties = {test_perm, false, false, | ||
| 4570 | (cur_mapped_addr == dst_addr) | ||
| 4571 | ? DisableMergeAttribute::DisableHead | ||
| 4572 | : DisableMergeAttribute::None}; | ||
| 4573 | R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, last_block_size / PageSize, | ||
| 4574 | cur_block_addr, true, map_properties, OperationType::Map, false)); | ||
| 4575 | |||
| 4576 | // Update tracking extents. | ||
| 4577 | cur_mapped_addr += last_block_size; | ||
| 4578 | cur_block_addr += last_block_size; | ||
| 4579 | if (mapped_block_end + cur_block_size < aligned_src_end && | ||
| 4580 | cur_block_size == last_block_size) { | ||
| 4581 | traverse_valid = | ||
| 4582 | src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 4583 | ASSERT(traverse_valid); | ||
| 4584 | |||
| 4585 | cur_block_addr = next_entry.phys_addr; | ||
| 4586 | } | ||
| 4587 | } | ||
| 4588 | |||
| 4589 | // Map the end page, if we have one. | ||
| 4590 | if (end_partial_page != 0) { | ||
| 4591 | // Ensure the page holds correct data. | ||
| 4592 | u8* const end_partial_virt = GetHeapVirtualPointer(m_kernel, end_partial_page); | ||
| 4593 | if (send) { | ||
| 4594 | const size_t copy_size = src_end - mapping_src_end; | ||
| 4595 | std::memcpy(end_partial_virt, GetHeapVirtualPointer(m_kernel, cur_block_addr), | ||
| 4596 | copy_size); | ||
| 4597 | std::memset(end_partial_virt + copy_size, fill_val, PageSize - copy_size); | ||
| 4598 | } else { | ||
| 4599 | std::memset(end_partial_virt, fill_val, PageSize); | ||
| 4600 | } | ||
| 4601 | |||
| 4602 | // Map the page. | ||
| 4603 | const KPageProperties map_properties = {test_perm, false, false, | ||
| 4604 | (cur_mapped_addr == dst_addr) | ||
| 4605 | ? DisableMergeAttribute::DisableHead | ||
| 4606 | : DisableMergeAttribute::None}; | ||
| 4607 | R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, end_partial_page, true, | ||
| 4608 | map_properties, OperationType::Map, false)); | ||
| 4609 | } | ||
| 4610 | |||
| 4611 | // Update memory blocks to reflect our changes | ||
| 4612 | m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, | ||
| 4613 | dst_state, test_perm, KMemoryAttribute::None, | ||
| 4614 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 4615 | KMemoryBlockDisableMergeAttribute::None); | ||
| 4616 | |||
| 4617 | // Set the output address. | ||
| 4618 | *out_addr = dst_addr + (src_start - aligned_src_start); | ||
| 4619 | |||
| 4620 | // We succeeded. | ||
| 4621 | memory_reservation.Commit(); | ||
| 4622 | R_SUCCEED(); | ||
| 4623 | } | ||
| 4624 | |||
| 4625 | Result KPageTableBase::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, | ||
| 4626 | KProcessAddress src_addr, KPageTableBase& src_page_table, | ||
| 4627 | KMemoryPermission test_perm, KMemoryState dst_state, bool send) { | ||
| 4628 | // For convenience, alias this. | ||
| 4629 | KPageTableBase& dst_page_table = *this; | ||
| 4630 | |||
| 4631 | // Acquire the table locks. | ||
| 4632 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 4633 | |||
| 4634 | // We're going to perform an update, so create a helper. | ||
| 4635 | KScopedPageTableUpdater updater(std::addressof(src_page_table)); | ||
| 4636 | |||
| 4637 | // Perform client setup. | ||
| 4638 | size_t num_allocator_blocks; | ||
| 4639 | R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), | ||
| 4640 | std::addressof(num_allocator_blocks), src_addr, size, | ||
| 4641 | test_perm, dst_state)); | ||
| 4642 | |||
| 4643 | // Create an update allocator. | ||
| 4644 | Result allocator_result; | ||
| 4645 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 4646 | src_page_table.m_memory_block_slab_manager, | ||
| 4647 | num_allocator_blocks); | ||
| 4648 | R_TRY(allocator_result); | ||
| 4649 | |||
| 4650 | // Get the mapped extents. | ||
| 4651 | const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize); | ||
| 4652 | const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize); | ||
| 4653 | const size_t src_map_size = src_map_end - src_map_start; | ||
| 4654 | |||
| 4655 | // Ensure that we clean up appropriately if we fail after this. | ||
| 4656 | const auto src_perm = static_cast<KMemoryPermission>( | ||
| 4657 | (test_perm == KMemoryPermission::UserReadWrite) | ||
| 4658 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 4659 | : KMemoryPermission::UserRead); | ||
| 4660 | ON_RESULT_FAILURE { | ||
| 4661 | if (src_map_end > src_map_start) { | ||
| 4662 | src_page_table.CleanupForIpcClientOnServerSetupFailure( | ||
| 4663 | updater.GetPageList(), src_map_start, src_map_size, src_perm); | ||
| 4664 | } | ||
| 4665 | }; | ||
| 4666 | |||
| 4667 | // Perform server setup. | ||
| 4668 | R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state, | ||
| 4669 | src_page_table, send)); | ||
| 4670 | |||
| 4671 | // If anything was mapped, ipc-lock the pages. | ||
| 4672 | if (src_map_start < src_map_end) { | ||
| 4673 | // Get the source permission. | ||
| 4674 | src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, | ||
| 4675 | (src_map_end - src_map_start) / PageSize, | ||
| 4676 | &KMemoryBlock::LockForIpc, src_perm); | ||
| 4677 | } | ||
| 4678 | |||
| 4679 | R_SUCCEED(); | ||
| 4680 | } | ||
| 4681 | |||
| 4682 | Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size, | ||
| 4683 | KMemoryState dst_state) { | ||
| 4684 | // Validate the address. | ||
| 4685 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 4686 | |||
| 4687 | // Lock the table. | ||
| 4688 | KScopedLightLock lk(m_general_lock); | ||
| 4689 | |||
| 4690 | // Validate the memory state. | ||
| 4691 | size_t num_allocator_blocks; | ||
| 4692 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 4693 | KMemoryState::All, dst_state, KMemoryPermission::UserRead, | ||
| 4694 | KMemoryPermission::UserRead, KMemoryAttribute::All, | ||
| 4695 | KMemoryAttribute::None)); | ||
| 4696 | |||
| 4697 | // Create an update allocator. | ||
| 4698 | Result allocator_result; | ||
| 4699 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 4700 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 4701 | R_TRY(allocator_result); | ||
| 4702 | |||
| 4703 | // We're going to perform an update, so create a helper. | ||
| 4704 | KScopedPageTableUpdater updater(this); | ||
| 4705 | |||
| 4706 | // Get aligned extents. | ||
| 4707 | const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize); | ||
| 4708 | const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize); | ||
| 4709 | const size_t aligned_size = aligned_end - aligned_start; | ||
| 4710 | const size_t aligned_num_pages = aligned_size / PageSize; | ||
| 4711 | |||
| 4712 | // Unmap the pages. | ||
| 4713 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 4714 | DisableMergeAttribute::None}; | ||
| 4715 | R_TRY(this->Operate(updater.GetPageList(), aligned_start, aligned_num_pages, 0, false, | ||
| 4716 | unmap_properties, OperationType::Unmap, false)); | ||
| 4717 | |||
| 4718 | // Update memory blocks. | ||
| 4719 | m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, | ||
| 4720 | KMemoryState::None, KMemoryPermission::None, | ||
| 4721 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 4722 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 4723 | |||
| 4724 | // Release from the resource limit as relevant. | ||
| 4725 | const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize); | ||
| 4726 | const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize); | ||
| 4727 | const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0; | ||
| 4728 | m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, | ||
| 4729 | aligned_size - mapping_size); | ||
| 4730 | |||
| 4731 | R_SUCCEED(); | ||
| 4732 | } | ||
| 4733 | |||
| 4734 | Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size, | ||
| 4735 | KMemoryState dst_state) { | ||
| 4736 | // Validate the address. | ||
| 4737 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 4738 | |||
| 4739 | // Get aligned source extents. | ||
| 4740 | const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize); | ||
| 4741 | const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize); | ||
| 4742 | const KProcessAddress mapping_last = mapping_end - 1; | ||
| 4743 | const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0; | ||
| 4744 | |||
| 4745 | // If nothing was mapped, we're actually done immediately. | ||
| 4746 | R_SUCCEED_IF(mapping_size == 0); | ||
| 4747 | |||
| 4748 | // Get the test state and attribute mask. | ||
| 4749 | KMemoryState test_state; | ||
| 4750 | KMemoryAttribute test_attr_mask; | ||
| 4751 | switch (dst_state) { | ||
| 4752 | case KMemoryState::Ipc: | ||
| 4753 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 4754 | test_attr_mask = | ||
| 4755 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 4756 | break; | ||
| 4757 | case KMemoryState::NonSecureIpc: | ||
| 4758 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 4759 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 4760 | break; | ||
| 4761 | case KMemoryState::NonDeviceIpc: | ||
| 4762 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 4763 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 4764 | break; | ||
| 4765 | default: | ||
| 4766 | R_THROW(ResultInvalidCombination); | ||
| 4767 | } | ||
| 4768 | |||
| 4769 | // Lock the table. | ||
| 4770 | // NOTE: Nintendo does this *after* creating the updater below, but this does not follow | ||
| 4771 | // convention elsewhere in KPageTableBase. | ||
| 4772 | KScopedLightLock lk(m_general_lock); | ||
| 4773 | |||
| 4774 | // We're going to perform an update, so create a helper. | ||
| 4775 | KScopedPageTableUpdater updater(this); | ||
| 4776 | |||
| 4777 | // Ensure that on failure, we roll back appropriately. | ||
| 4778 | size_t mapped_size = 0; | ||
| 4779 | ON_RESULT_FAILURE { | ||
| 4780 | if (mapped_size > 0) { | ||
| 4781 | // Determine where the mapping ends. | ||
| 4782 | const auto mapped_end = GetInteger(mapping_start) + mapped_size; | ||
| 4783 | const auto mapped_last = mapped_end - 1; | ||
| 4784 | |||
| 4785 | // Get current and next iterators. | ||
| 4786 | KMemoryBlockManager::const_iterator start_it = | ||
| 4787 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 4788 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 4789 | ++next_it; | ||
| 4790 | |||
| 4791 | // Get the current block info. | ||
| 4792 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 4793 | |||
| 4794 | // Create tracking variables. | ||
| 4795 | KProcessAddress cur_address = cur_info.GetAddress(); | ||
| 4796 | size_t cur_size = cur_info.GetSize(); | ||
| 4797 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 4798 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 4799 | bool first = cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 4800 | False(cur_info.GetDisableMergeAttribute() & | ||
| 4801 | KMemoryBlockDisableMergeAttribute::Locked); | ||
| 4802 | |||
| 4803 | while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) { | ||
| 4804 | // Check that we have a next block. | ||
| 4805 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 4806 | |||
| 4807 | // Get the next info. | ||
| 4808 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 4809 | |||
| 4810 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 4811 | const bool next_perm_eq = | ||
| 4812 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 4813 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 4814 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 4815 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 4816 | // We can consolidate the reprotection for the current and next block into a | ||
| 4817 | // single call. | ||
| 4818 | cur_size += next_info.GetSize(); | ||
| 4819 | } else { | ||
| 4820 | // We have to operate on the current block. | ||
| 4821 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 4822 | const KPageProperties properties = { | ||
| 4823 | cur_info.GetPermission(), false, false, | ||
| 4824 | first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail | ||
| 4825 | : DisableMergeAttribute::None}; | ||
| 4826 | R_ASSERT(this->Operate(updater.GetPageList(), cur_address, | ||
| 4827 | cur_size / PageSize, 0, false, properties, | ||
| 4828 | OperationType::ChangePermissions, true)); | ||
| 4829 | } | ||
| 4830 | |||
| 4831 | // Advance. | ||
| 4832 | cur_address = next_info.GetAddress(); | ||
| 4833 | cur_size = next_info.GetSize(); | ||
| 4834 | first = false; | ||
| 4835 | } | ||
| 4836 | |||
| 4837 | // Advance. | ||
| 4838 | cur_info = next_info; | ||
| 4839 | cur_perm_eq = next_perm_eq; | ||
| 4840 | cur_needs_set_perm = next_needs_set_perm; | ||
| 4841 | ++next_it; | ||
| 4842 | } | ||
| 4843 | |||
| 4844 | // Process the last block. | ||
| 4845 | if ((first || cur_needs_set_perm) && !cur_perm_eq) { | ||
| 4846 | const KPageProperties properties = { | ||
| 4847 | cur_info.GetPermission(), false, false, | ||
| 4848 | first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail | ||
| 4849 | : DisableMergeAttribute::None}; | ||
| 4850 | R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, | ||
| 4851 | false, properties, OperationType::ChangePermissions, true)); | ||
| 4852 | } | ||
| 4853 | } | ||
| 4854 | }; | ||
| 4855 | |||
| 4856 | // Iterate, reprotecting as needed. | ||
| 4857 | { | ||
| 4858 | // Get current and next iterators. | ||
| 4859 | KMemoryBlockManager::const_iterator start_it = | ||
| 4860 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 4861 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 4862 | ++next_it; | ||
| 4863 | |||
| 4864 | // Validate the current block. | ||
| 4865 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 4866 | R_ASSERT(this->CheckMemoryState( | ||
| 4867 | cur_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None, | ||
| 4868 | test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked)); | ||
| 4869 | |||
| 4870 | // Create tracking variables. | ||
| 4871 | KProcessAddress cur_address = cur_info.GetAddress(); | ||
| 4872 | size_t cur_size = cur_info.GetSize(); | ||
| 4873 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 4874 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 4875 | bool first = | ||
| 4876 | cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 4877 | False(cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked); | ||
| 4878 | |||
| 4879 | while ((cur_address + cur_size - 1) < mapping_last) { | ||
| 4880 | // Check that we have a next block. | ||
| 4881 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 4882 | |||
| 4883 | // Get the next info. | ||
| 4884 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 4885 | |||
| 4886 | // Validate the next block. | ||
| 4887 | R_ASSERT(this->CheckMemoryState( | ||
| 4888 | next_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None, | ||
| 4889 | test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked)); | ||
| 4890 | |||
| 4891 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 4892 | const bool next_perm_eq = | ||
| 4893 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 4894 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 4895 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 4896 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 4897 | // We can consolidate the reprotection for the current and next block into a single | ||
| 4898 | // call. | ||
| 4899 | cur_size += next_info.GetSize(); | ||
| 4900 | } else { | ||
| 4901 | // We have to operate on the current block. | ||
| 4902 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 4903 | const KPageProperties properties = { | ||
| 4904 | cur_needs_set_perm ? cur_info.GetOriginalPermission() | ||
| 4905 | : cur_info.GetPermission(), | ||
| 4906 | false, false, | ||
| 4907 | first ? DisableMergeAttribute::EnableHeadAndBody | ||
| 4908 | : DisableMergeAttribute::None}; | ||
| 4909 | R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, | ||
| 4910 | false, properties, OperationType::ChangePermissions, | ||
| 4911 | false)); | ||
| 4912 | } | ||
| 4913 | |||
| 4914 | // Mark that we mapped the block. | ||
| 4915 | mapped_size += cur_size; | ||
| 4916 | |||
| 4917 | // Advance. | ||
| 4918 | cur_address = next_info.GetAddress(); | ||
| 4919 | cur_size = next_info.GetSize(); | ||
| 4920 | first = false; | ||
| 4921 | } | ||
| 4922 | |||
| 4923 | // Advance. | ||
| 4924 | cur_info = next_info; | ||
| 4925 | cur_perm_eq = next_perm_eq; | ||
| 4926 | cur_needs_set_perm = next_needs_set_perm; | ||
| 4927 | ++next_it; | ||
| 4928 | } | ||
| 4929 | |||
| 4930 | // Process the last block. | ||
| 4931 | const auto lock_count = | ||
| 4932 | cur_info.GetIpcLockCount() + | ||
| 4933 | (next_it != m_memory_block_manager.end() | ||
| 4934 | ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) | ||
| 4935 | : 0); | ||
| 4936 | if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) { | ||
| 4937 | const DisableMergeAttribute head_body_attr = | ||
| 4938 | first ? DisableMergeAttribute::EnableHeadAndBody : DisableMergeAttribute::None; | ||
| 4939 | const DisableMergeAttribute tail_attr = | ||
| 4940 | lock_count == 1 ? DisableMergeAttribute::EnableTail : DisableMergeAttribute::None; | ||
| 4941 | const KPageProperties properties = { | ||
| 4942 | cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(), | ||
| 4943 | false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)}; | ||
| 4944 | R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, false, | ||
| 4945 | properties, OperationType::ChangePermissions, false)); | ||
| 4946 | } | ||
| 4947 | } | ||
| 4948 | |||
| 4949 | // Create an update allocator. | ||
| 4950 | // NOTE: Guaranteed zero blocks needed here. | ||
| 4951 | Result allocator_result; | ||
| 4952 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 4953 | m_memory_block_slab_manager, 0); | ||
| 4954 | R_TRY(allocator_result); | ||
| 4955 | |||
| 4956 | // Unlock the pages. | ||
| 4957 | m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, | ||
| 4958 | mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, | ||
| 4959 | KMemoryPermission::None); | ||
| 4960 | |||
| 4961 | R_SUCCEED(); | ||
| 4962 | } | ||
| 4963 | |||
| 4964 | void KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, | ||
| 4965 | KProcessAddress address, size_t size, | ||
| 4966 | KMemoryPermission prot_perm) { | ||
| 4967 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 4968 | ASSERT(Common::IsAligned(GetInteger(address), PageSize)); | ||
| 4969 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 4970 | |||
| 4971 | // Get the mapped extents. | ||
| 4972 | const KProcessAddress src_map_start = address; | ||
| 4973 | const KProcessAddress src_map_end = address + size; | ||
| 4974 | const KProcessAddress src_map_last = src_map_end - 1; | ||
| 4975 | |||
| 4976 | // This function is only invoked when there's something to do. | ||
| 4977 | ASSERT(src_map_end > src_map_start); | ||
| 4978 | |||
| 4979 | // Iterate over blocks, fixing permissions. | ||
| 4980 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address); | ||
| 4981 | while (true) { | ||
| 4982 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 4983 | |||
| 4984 | const auto cur_start = info.GetAddress() >= GetInteger(src_map_start) | ||
| 4985 | ? info.GetAddress() | ||
| 4986 | : GetInteger(src_map_start); | ||
| 4987 | const auto cur_end = | ||
| 4988 | src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress(); | ||
| 4989 | |||
| 4990 | // If we can, fix the protections on the block. | ||
| 4991 | if ((info.GetIpcLockCount() == 0 && | ||
| 4992 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) || | ||
| 4993 | (info.GetIpcLockCount() != 0 && | ||
| 4994 | (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) { | ||
| 4995 | // Check if we actually need to fix the protections on the block. | ||
| 4996 | if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) || | ||
| 4997 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) { | ||
| 4998 | const bool start_nc = (info.GetAddress() == GetInteger(src_map_start)) | ||
| 4999 | ? (False(info.GetDisableMergeAttribute() & | ||
| 5000 | (KMemoryBlockDisableMergeAttribute::Locked | | ||
| 5001 | KMemoryBlockDisableMergeAttribute::IpcLeft))) | ||
| 5002 | : info.GetAddress() <= GetInteger(src_map_start); | ||
| 5003 | |||
| 5004 | const DisableMergeAttribute head_body_attr = | ||
| 5005 | start_nc ? DisableMergeAttribute::EnableHeadAndBody | ||
| 5006 | : DisableMergeAttribute::None; | ||
| 5007 | DisableMergeAttribute tail_attr; | ||
| 5008 | if (cur_end == src_map_end && info.GetEndAddress() == src_map_end) { | ||
| 5009 | auto next_it = it; | ||
| 5010 | ++next_it; | ||
| 5011 | |||
| 5012 | const auto lock_count = | ||
| 5013 | info.GetIpcLockCount() + | ||
| 5014 | (next_it != m_memory_block_manager.end() | ||
| 5015 | ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) | ||
| 5016 | : 0); | ||
| 5017 | tail_attr = lock_count == 0 ? DisableMergeAttribute::EnableTail | ||
| 5018 | : DisableMergeAttribute::None; | ||
| 5019 | } else { | ||
| 5020 | tail_attr = DisableMergeAttribute::None; | ||
| 5021 | } | ||
| 5022 | |||
| 5023 | const KPageProperties properties = { | ||
| 5024 | info.GetPermission(), false, false, | ||
| 5025 | static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)}; | ||
| 5026 | R_ASSERT(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, 0, | ||
| 5027 | false, properties, OperationType::ChangePermissions, true)); | ||
| 5028 | } | ||
| 5029 | } | ||
| 5030 | |||
| 5031 | // If we're past the end of the region, we're done. | ||
| 5032 | if (src_map_last <= info.GetLastAddress()) { | ||
| 5033 | break; | ||
| 5034 | } | ||
| 5035 | |||
| 5036 | // Advance. | ||
| 5037 | ++it; | ||
| 5038 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5039 | } | ||
| 5040 | } | ||
| 5041 | |||
| 5042 | Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) { | ||
| 5043 | // Lock the physical memory lock. | ||
| 5044 | KScopedLightLock phys_lk(m_map_physical_memory_lock); | ||
| 5045 | |||
| 5046 | // Calculate the last address for convenience. | ||
| 5047 | const KProcessAddress last_address = address + size - 1; | ||
| 5048 | |||
| 5049 | // Define iteration variables. | ||
| 5050 | KProcessAddress cur_address; | ||
| 5051 | size_t mapped_size; | ||
| 5052 | |||
| 5053 | // The entire mapping process can be retried. | ||
| 5054 | while (true) { | ||
| 5055 | // Check if the memory is already mapped. | ||
| 5056 | { | ||
| 5057 | // Lock the table. | ||
| 5058 | KScopedLightLock lk(m_general_lock); | ||
| 5059 | |||
| 5060 | // Iterate over the memory. | ||
| 5061 | cur_address = address; | ||
| 5062 | mapped_size = 0; | ||
| 5063 | |||
| 5064 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 5065 | while (true) { | ||
| 5066 | // Check that the iterator is valid. | ||
| 5067 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5068 | |||
| 5069 | // Get the memory info. | ||
| 5070 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 5071 | |||
| 5072 | // Check if we're done. | ||
| 5073 | if (last_address <= info.GetLastAddress()) { | ||
| 5074 | if (info.GetState() != KMemoryState::Free) { | ||
| 5075 | mapped_size += (last_address + 1 - cur_address); | ||
| 5076 | } | ||
| 5077 | break; | ||
| 5078 | } | ||
| 5079 | |||
| 5080 | // Track the memory if it's mapped. | ||
| 5081 | if (info.GetState() != KMemoryState::Free) { | ||
| 5082 | mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address; | ||
| 5083 | } | ||
| 5084 | |||
| 5085 | // Advance. | ||
| 5086 | cur_address = info.GetEndAddress(); | ||
| 5087 | ++it; | ||
| 5088 | } | ||
| 5089 | |||
| 5090 | // If the size mapped is the size requested, we've nothing to do. | ||
| 5091 | R_SUCCEED_IF(size == mapped_size); | ||
| 5092 | } | ||
| 5093 | |||
| 5094 | // Allocate and map the memory. | ||
| 5095 | { | ||
| 5096 | // Reserve the memory from the process resource limit. | ||
| 5097 | KScopedResourceReservation memory_reservation( | ||
| 5098 | m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, size - mapped_size); | ||
| 5099 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 5100 | |||
| 5101 | // Allocate pages for the new memory. | ||
| 5102 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 5103 | R_TRY(m_kernel.MemoryManager().AllocateForProcess( | ||
| 5104 | std::addressof(pg), (size - mapped_size) / PageSize, m_allocate_option, | ||
| 5105 | GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value)); | ||
| 5106 | |||
| 5107 | // If we fail in the next bit (or retry), we need to cleanup the pages. | ||
| 5108 | auto pg_guard = SCOPE_GUARD({ | ||
| 5109 | pg.OpenFirst(); | ||
| 5110 | pg.Close(); | ||
| 5111 | }); | ||
| 5112 | |||
| 5113 | // Map the memory. | ||
| 5114 | { | ||
| 5115 | // Lock the table. | ||
| 5116 | KScopedLightLock lk(m_general_lock); | ||
| 5117 | |||
| 5118 | size_t num_allocator_blocks = 0; | ||
| 5119 | |||
| 5120 | // Verify that nobody has mapped memory since we first checked. | ||
| 5121 | { | ||
| 5122 | // Iterate over the memory. | ||
| 5123 | size_t checked_mapped_size = 0; | ||
| 5124 | cur_address = address; | ||
| 5125 | |||
| 5126 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 5127 | while (true) { | ||
| 5128 | // Check that the iterator is valid. | ||
| 5129 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5130 | |||
| 5131 | // Get the memory info. | ||
| 5132 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 5133 | |||
| 5134 | const bool is_free = info.GetState() == KMemoryState::Free; | ||
| 5135 | if (is_free) { | ||
| 5136 | if (info.GetAddress() < GetInteger(address)) { | ||
| 5137 | ++num_allocator_blocks; | ||
| 5138 | } | ||
| 5139 | if (last_address < info.GetLastAddress()) { | ||
| 5140 | ++num_allocator_blocks; | ||
| 5141 | } | ||
| 5142 | } | ||
| 5143 | |||
| 5144 | // Check if we're done. | ||
| 5145 | if (last_address <= info.GetLastAddress()) { | ||
| 5146 | if (!is_free) { | ||
| 5147 | checked_mapped_size += (last_address + 1 - cur_address); | ||
| 5148 | } | ||
| 5149 | break; | ||
| 5150 | } | ||
| 5151 | |||
| 5152 | // Track the memory if it's mapped. | ||
| 5153 | if (!is_free) { | ||
| 5154 | checked_mapped_size += | ||
| 5155 | KProcessAddress(info.GetEndAddress()) - cur_address; | ||
| 5156 | } | ||
| 5157 | |||
| 5158 | // Advance. | ||
| 5159 | cur_address = info.GetEndAddress(); | ||
| 5160 | ++it; | ||
| 5161 | } | ||
| 5162 | |||
| 5163 | // If the size now isn't what it was before, somebody mapped or unmapped | ||
| 5164 | // concurrently. If this happened, retry. | ||
| 5165 | if (mapped_size != checked_mapped_size) { | ||
| 5166 | continue; | ||
| 5167 | } | ||
| 5168 | } | ||
| 5169 | |||
| 5170 | // Create an update allocator. | ||
| 5171 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 5172 | Result allocator_result; | ||
| 5173 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 5174 | m_memory_block_slab_manager, | ||
| 5175 | num_allocator_blocks); | ||
| 5176 | R_TRY(allocator_result); | ||
| 5177 | |||
| 5178 | // We're going to perform an update, so create a helper. | ||
| 5179 | KScopedPageTableUpdater updater(this); | ||
| 5180 | |||
| 5181 | // Prepare to iterate over the memory. | ||
| 5182 | auto pg_it = pg.begin(); | ||
| 5183 | KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); | ||
| 5184 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 5185 | |||
| 5186 | // Reset the current tracking address, and make sure we clean up on failure. | ||
| 5187 | pg_guard.Cancel(); | ||
| 5188 | cur_address = address; | ||
| 5189 | ON_RESULT_FAILURE { | ||
| 5190 | if (cur_address > address) { | ||
| 5191 | const KProcessAddress last_unmap_address = cur_address - 1; | ||
| 5192 | |||
| 5193 | // Iterate, unmapping the pages. | ||
| 5194 | cur_address = address; | ||
| 5195 | |||
| 5196 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 5197 | while (true) { | ||
| 5198 | // Check that the iterator is valid. | ||
| 5199 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5200 | |||
| 5201 | // Get the memory info. | ||
| 5202 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 5203 | |||
| 5204 | // If the memory state is free, we mapped it and need to unmap it. | ||
| 5205 | if (info.GetState() == KMemoryState::Free) { | ||
| 5206 | // Determine the range to unmap. | ||
| 5207 | const KPageProperties unmap_properties = { | ||
| 5208 | KMemoryPermission::None, false, false, | ||
| 5209 | DisableMergeAttribute::None}; | ||
| 5210 | const size_t cur_pages = | ||
| 5211 | std::min(KProcessAddress(info.GetEndAddress()) - cur_address, | ||
| 5212 | last_unmap_address + 1 - cur_address) / | ||
| 5213 | PageSize; | ||
| 5214 | |||
| 5215 | // Unmap. | ||
| 5216 | R_ASSERT(this->Operate(updater.GetPageList(), cur_address, | ||
| 5217 | cur_pages, 0, false, unmap_properties, | ||
| 5218 | OperationType::Unmap, true)); | ||
| 5219 | } | ||
| 5220 | |||
| 5221 | // Check if we're done. | ||
| 5222 | if (last_unmap_address <= info.GetLastAddress()) { | ||
| 5223 | break; | ||
| 5224 | } | ||
| 5225 | |||
| 5226 | // Advance. | ||
| 5227 | cur_address = info.GetEndAddress(); | ||
| 5228 | ++it; | ||
| 5229 | } | ||
| 5230 | } | ||
| 5231 | |||
| 5232 | // Release any remaining unmapped memory. | ||
| 5233 | m_kernel.MemoryManager().OpenFirst(pg_phys_addr, pg_pages); | ||
| 5234 | m_kernel.MemoryManager().Close(pg_phys_addr, pg_pages); | ||
| 5235 | for (++pg_it; pg_it != pg.end(); ++pg_it) { | ||
| 5236 | m_kernel.MemoryManager().OpenFirst(pg_it->GetAddress(), | ||
| 5237 | pg_it->GetNumPages()); | ||
| 5238 | m_kernel.MemoryManager().Close(pg_it->GetAddress(), pg_it->GetNumPages()); | ||
| 5239 | } | ||
| 5240 | }; | ||
| 5241 | |||
| 5242 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 5243 | while (true) { | ||
| 5244 | // Check that the iterator is valid. | ||
| 5245 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5246 | |||
| 5247 | // Get the memory info. | ||
| 5248 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 5249 | |||
| 5250 | // If it's unmapped, we need to map it. | ||
| 5251 | if (info.GetState() == KMemoryState::Free) { | ||
| 5252 | // Determine the range to map. | ||
| 5253 | const KPageProperties map_properties = { | ||
| 5254 | KMemoryPermission::UserReadWrite, false, false, | ||
| 5255 | cur_address == this->GetAliasRegionStart() | ||
| 5256 | ? DisableMergeAttribute::DisableHead | ||
| 5257 | : DisableMergeAttribute::None}; | ||
| 5258 | size_t map_pages = | ||
| 5259 | std::min(KProcessAddress(info.GetEndAddress()) - cur_address, | ||
| 5260 | last_address + 1 - cur_address) / | ||
| 5261 | PageSize; | ||
| 5262 | |||
| 5263 | // While we have pages to map, map them. | ||
| 5264 | { | ||
| 5265 | // Create a page group for the current mapping range. | ||
| 5266 | KPageGroup cur_pg(m_kernel, m_block_info_manager); | ||
| 5267 | { | ||
| 5268 | ON_RESULT_FAILURE_2 { | ||
| 5269 | cur_pg.OpenFirst(); | ||
| 5270 | cur_pg.Close(); | ||
| 5271 | }; | ||
| 5272 | |||
| 5273 | size_t remain_pages = map_pages; | ||
| 5274 | while (remain_pages > 0) { | ||
| 5275 | // Check if we're at the end of the physical block. | ||
| 5276 | if (pg_pages == 0) { | ||
| 5277 | // Ensure there are more pages to map. | ||
| 5278 | ASSERT(pg_it != pg.end()); | ||
| 5279 | |||
| 5280 | // Advance our physical block. | ||
| 5281 | ++pg_it; | ||
| 5282 | pg_phys_addr = pg_it->GetAddress(); | ||
| 5283 | pg_pages = pg_it->GetNumPages(); | ||
| 5284 | } | ||
| 5285 | |||
| 5286 | // Add whatever we can to the current block. | ||
| 5287 | const size_t cur_pages = std::min(pg_pages, remain_pages); | ||
| 5288 | R_TRY(cur_pg.AddBlock(pg_phys_addr + | ||
| 5289 | ((pg_pages - cur_pages) * PageSize), | ||
| 5290 | cur_pages)); | ||
| 5291 | |||
| 5292 | // Advance. | ||
| 5293 | remain_pages -= cur_pages; | ||
| 5294 | pg_pages -= cur_pages; | ||
| 5295 | } | ||
| 5296 | } | ||
| 5297 | |||
| 5298 | // Map the papges. | ||
| 5299 | R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, | ||
| 5300 | cur_pg, map_properties, | ||
| 5301 | OperationType::MapFirstGroup, false)); | ||
| 5302 | } | ||
| 5303 | } | ||
| 5304 | |||
| 5305 | // Check if we're done. | ||
| 5306 | if (last_address <= info.GetLastAddress()) { | ||
| 5307 | break; | ||
| 5308 | } | ||
| 5309 | |||
| 5310 | // Advance. | ||
| 5311 | cur_address = info.GetEndAddress(); | ||
| 5312 | ++it; | ||
| 5313 | } | ||
| 5314 | |||
| 5315 | // We succeeded, so commit the memory reservation. | ||
| 5316 | memory_reservation.Commit(); | ||
| 5317 | |||
| 5318 | // Increase our tracked mapped size. | ||
| 5319 | m_mapped_physical_memory_size += (size - mapped_size); | ||
| 5320 | |||
| 5321 | // Update the relevant memory blocks. | ||
| 5322 | m_memory_block_manager.UpdateIfMatch( | ||
| 5323 | std::addressof(allocator), address, size / PageSize, KMemoryState::Free, | ||
| 5324 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, | ||
| 5325 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 5326 | address == this->GetAliasRegionStart() | ||
| 5327 | ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 5328 | : KMemoryBlockDisableMergeAttribute::None, | ||
| 5329 | KMemoryBlockDisableMergeAttribute::None); | ||
| 5330 | |||
| 5331 | R_SUCCEED(); | ||
| 5332 | } | ||
| 5333 | } | ||
| 5334 | } | ||
| 5335 | } | ||
| 5336 | |||
| 5337 | Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) { | ||
| 5338 | // Lock the physical memory lock. | ||
| 5339 | KScopedLightLock phys_lk(m_map_physical_memory_lock); | ||
| 5340 | |||
| 5341 | // Lock the table. | ||
| 5342 | KScopedLightLock lk(m_general_lock); | ||
| 5343 | |||
| 5344 | // Calculate the last address for convenience. | ||
| 5345 | const KProcessAddress last_address = address + size - 1; | ||
| 5346 | |||
| 5347 | // Define iteration variables. | ||
| 5348 | KProcessAddress map_start_address = 0; | ||
| 5349 | KProcessAddress map_last_address = 0; | ||
| 5350 | |||
| 5351 | KProcessAddress cur_address; | ||
| 5352 | size_t mapped_size; | ||
| 5353 | size_t num_allocator_blocks = 0; | ||
| 5354 | |||
| 5355 | // Check if the memory is mapped. | ||
| 5356 | { | ||
| 5357 | // Iterate over the memory. | ||
| 5358 | cur_address = address; | ||
| 5359 | mapped_size = 0; | ||
| 5360 | |||
| 5361 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 5362 | while (true) { | ||
| 5363 | // Check that the iterator is valid. | ||
| 5364 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5365 | |||
| 5366 | // Get the memory info. | ||
| 5367 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 5368 | |||
| 5369 | // Verify the memory's state. | ||
| 5370 | const bool is_normal = info.GetState() == KMemoryState::Normal && | ||
| 5371 | info.GetAttribute() == KMemoryAttribute::None; | ||
| 5372 | const bool is_free = info.GetState() == KMemoryState::Free; | ||
| 5373 | R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory); | ||
| 5374 | |||
| 5375 | if (is_normal) { | ||
| 5376 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); | ||
| 5377 | |||
| 5378 | if (map_start_address == 0) { | ||
| 5379 | map_start_address = cur_address; | ||
| 5380 | } | ||
| 5381 | map_last_address = | ||
| 5382 | (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address; | ||
| 5383 | |||
| 5384 | if (info.GetAddress() < GetInteger(address)) { | ||
| 5385 | ++num_allocator_blocks; | ||
| 5386 | } | ||
| 5387 | if (last_address < info.GetLastAddress()) { | ||
| 5388 | ++num_allocator_blocks; | ||
| 5389 | } | ||
| 5390 | |||
| 5391 | mapped_size += (map_last_address + 1 - cur_address); | ||
| 5392 | } | ||
| 5393 | |||
| 5394 | // Check if we're done. | ||
| 5395 | if (last_address <= info.GetLastAddress()) { | ||
| 5396 | break; | ||
| 5397 | } | ||
| 5398 | |||
| 5399 | // Advance. | ||
| 5400 | cur_address = info.GetEndAddress(); | ||
| 5401 | ++it; | ||
| 5402 | } | ||
| 5403 | |||
| 5404 | // If there's nothing mapped, we've nothing to do. | ||
| 5405 | R_SUCCEED_IF(mapped_size == 0); | ||
| 5406 | } | ||
| 5407 | |||
| 5408 | // Create an update allocator. | ||
| 5409 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 5410 | Result allocator_result; | ||
| 5411 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 5412 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 5413 | R_TRY(allocator_result); | ||
| 5414 | |||
| 5415 | // We're going to perform an update, so create a helper. | ||
| 5416 | KScopedPageTableUpdater updater(this); | ||
| 5417 | |||
| 5418 | // Separate the mapping. | ||
| 5419 | const KPageProperties sep_properties = {KMemoryPermission::None, false, false, | ||
| 5420 | DisableMergeAttribute::None}; | ||
| 5421 | R_TRY(this->Operate(updater.GetPageList(), map_start_address, | ||
| 5422 | (map_last_address + 1 - map_start_address) / PageSize, 0, false, | ||
| 5423 | sep_properties, OperationType::Separate, false)); | ||
| 5424 | |||
| 5425 | // Reset the current tracking address, and make sure we clean up on failure. | ||
| 5426 | cur_address = address; | ||
| 5427 | |||
| 5428 | // Iterate over the memory, unmapping as we go. | ||
| 5429 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 5430 | |||
| 5431 | const auto clear_merge_attr = | ||
| 5432 | (it->GetState() == KMemoryState::Normal && | ||
| 5433 | it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address) | ||
| 5434 | ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 5435 | : KMemoryBlockDisableMergeAttribute::None; | ||
| 5436 | |||
| 5437 | while (true) { | ||
| 5438 | // Check that the iterator is valid. | ||
| 5439 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5440 | |||
| 5441 | // Get the memory info. | ||
| 5442 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 5443 | |||
| 5444 | // If the memory state is normal, we need to unmap it. | ||
| 5445 | if (info.GetState() == KMemoryState::Normal) { | ||
| 5446 | // Determine the range to unmap. | ||
| 5447 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 5448 | DisableMergeAttribute::None}; | ||
| 5449 | const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, | ||
| 5450 | last_address + 1 - cur_address) / | ||
| 5451 | PageSize; | ||
| 5452 | |||
| 5453 | // Unmap. | ||
| 5454 | R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false, | ||
| 5455 | unmap_properties, OperationType::Unmap, false)); | ||
| 5456 | } | ||
| 5457 | |||
| 5458 | // Check if we're done. | ||
| 5459 | if (last_address <= info.GetLastAddress()) { | ||
| 5460 | break; | ||
| 5461 | } | ||
| 5462 | |||
| 5463 | // Advance. | ||
| 5464 | cur_address = info.GetEndAddress(); | ||
| 5465 | ++it; | ||
| 5466 | } | ||
| 5467 | |||
| 5468 | // Release the memory resource. | ||
| 5469 | m_mapped_physical_memory_size -= mapped_size; | ||
| 5470 | m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, mapped_size); | ||
| 5471 | |||
| 5472 | // Update memory blocks. | ||
| 5473 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, | ||
| 5474 | KMemoryState::Free, KMemoryPermission::None, | ||
| 5475 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 5476 | clear_merge_attr); | ||
| 5477 | |||
| 5478 | // We succeeded. | ||
| 5479 | R_SUCCEED(); | ||
| 5480 | } | ||
| 5481 | |||
| 5482 | Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { | ||
| 5483 | UNIMPLEMENTED(); | ||
| 5484 | R_THROW(ResultNotImplemented); | ||
| 5485 | } | ||
| 5486 | |||
| 5487 | Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { | ||
| 5488 | UNIMPLEMENTED(); | ||
| 5489 | R_THROW(ResultNotImplemented); | ||
| 5490 | } | ||
| 5491 | |||
| 5492 | Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size, | ||
| 5493 | KPageTableBase& src_page_table, | ||
| 5494 | KProcessAddress src_address) { | ||
| 5495 | // We need to lock both this table, and the current process's table, so set up an alias. | ||
| 5496 | KPageTableBase& dst_page_table = *this; | ||
| 5497 | |||
| 5498 | // Acquire the table locks. | ||
| 5499 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 5500 | |||
| 5501 | // Check that the memory is mapped in the destination process. | ||
| 5502 | size_t num_allocator_blocks; | ||
| 5503 | R_TRY(dst_page_table.CheckMemoryState( | ||
| 5504 | std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All, | ||
| 5505 | KMemoryState::SharedCode, KMemoryPermission::UserReadWrite, | ||
| 5506 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 5507 | |||
| 5508 | // Check that the memory is mapped in the source process. | ||
| 5509 | R_TRY(src_page_table.CheckMemoryState(src_address, size, KMemoryState::FlagCanMapProcess, | ||
| 5510 | KMemoryState::FlagCanMapProcess, KMemoryPermission::None, | ||
| 5511 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 5512 | KMemoryAttribute::None)); | ||
| 5513 | |||
| 5514 | // Validate that the memory ranges are compatible. | ||
| 5515 | { | ||
| 5516 | // Define a helper type. | ||
| 5517 | struct ContiguousRangeInfo { | ||
| 5518 | public: | ||
| 5519 | KPageTableBase& m_pt; | ||
| 5520 | TraversalContext m_context; | ||
| 5521 | TraversalEntry m_entry; | ||
| 5522 | KPhysicalAddress m_phys_addr; | ||
| 5523 | size_t m_cur_size; | ||
| 5524 | size_t m_remaining_size; | ||
| 5525 | |||
| 5526 | public: | ||
| 5527 | ContiguousRangeInfo(KPageTableBase& pt, KProcessAddress address, size_t size) | ||
| 5528 | : m_pt(pt), m_remaining_size(size) { | ||
| 5529 | // Begin a traversal. | ||
| 5530 | ASSERT(m_pt.GetImpl().BeginTraversal(std::addressof(m_entry), | ||
| 5531 | std::addressof(m_context), address)); | ||
| 5532 | |||
| 5533 | // Setup tracking fields. | ||
| 5534 | m_phys_addr = m_entry.phys_addr; | ||
| 5535 | m_cur_size = std::min<size_t>( | ||
| 5536 | m_remaining_size, | ||
| 5537 | m_entry.block_size - (GetInteger(m_phys_addr) & (m_entry.block_size - 1))); | ||
| 5538 | |||
| 5539 | // Consume the whole contiguous block. | ||
| 5540 | this->DetermineContiguousBlockExtents(); | ||
| 5541 | } | ||
| 5542 | |||
| 5543 | void ContinueTraversal() { | ||
| 5544 | // Update our remaining size. | ||
| 5545 | m_remaining_size = m_remaining_size - m_cur_size; | ||
| 5546 | |||
| 5547 | // Update our tracking fields. | ||
| 5548 | if (m_remaining_size > 0) { | ||
| 5549 | m_phys_addr = m_entry.phys_addr; | ||
| 5550 | m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size); | ||
| 5551 | |||
| 5552 | // Consume the whole contiguous block. | ||
| 5553 | this->DetermineContiguousBlockExtents(); | ||
| 5554 | } | ||
| 5555 | } | ||
| 5556 | |||
| 5557 | private: | ||
| 5558 | void DetermineContiguousBlockExtents() { | ||
| 5559 | // Continue traversing until we're not contiguous, or we have enough. | ||
| 5560 | while (m_cur_size < m_remaining_size) { | ||
| 5561 | ASSERT(m_pt.GetImpl().ContinueTraversal(std::addressof(m_entry), | ||
| 5562 | std::addressof(m_context))); | ||
| 5563 | |||
| 5564 | // If we're not contiguous, we're done. | ||
| 5565 | if (m_entry.phys_addr != m_phys_addr + m_cur_size) { | ||
| 5566 | break; | ||
| 5567 | } | ||
| 5568 | |||
| 5569 | // Update our current size. | ||
| 5570 | m_cur_size = std::min(m_remaining_size, m_cur_size + m_entry.block_size); | ||
| 5571 | } | ||
| 5572 | } | ||
| 5573 | }; | ||
| 5574 | |||
| 5575 | // Create ranges for both tables. | ||
| 5576 | ContiguousRangeInfo src_range(src_page_table, src_address, size); | ||
| 5577 | ContiguousRangeInfo dst_range(dst_page_table, dst_address, size); | ||
| 5578 | |||
| 5579 | // Validate the ranges. | ||
| 5580 | while (src_range.m_remaining_size > 0 && dst_range.m_remaining_size > 0) { | ||
| 5581 | R_UNLESS(src_range.m_phys_addr == dst_range.m_phys_addr, ResultInvalidMemoryRegion); | ||
| 5582 | R_UNLESS(src_range.m_cur_size == dst_range.m_cur_size, ResultInvalidMemoryRegion); | ||
| 5583 | |||
| 5584 | src_range.ContinueTraversal(); | ||
| 5585 | dst_range.ContinueTraversal(); | ||
| 5586 | } | ||
| 5587 | } | ||
| 5588 | |||
| 5589 | // We no longer need to hold our lock on the source page table. | ||
| 5590 | lk.TryUnlockHalf(src_page_table.m_general_lock); | ||
| 5591 | |||
| 5592 | // Create an update allocator. | ||
| 5593 | Result allocator_result; | ||
| 5594 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 5595 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 5596 | R_TRY(allocator_result); | ||
| 5597 | |||
| 5598 | // We're going to perform an update, so create a helper. | ||
| 5599 | KScopedPageTableUpdater updater(this); | ||
| 5600 | |||
| 5601 | // Unmap the memory. | ||
| 5602 | const size_t num_pages = size / PageSize; | ||
| 5603 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 5604 | DisableMergeAttribute::None}; | ||
| 5605 | R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, unmap_properties, | ||
| 5606 | OperationType::Unmap, false)); | ||
| 5607 | |||
| 5608 | // Apply the memory block update. | ||
| 5609 | m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, | ||
| 5610 | KMemoryState::Free, KMemoryPermission::None, | ||
| 5611 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 5612 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 5613 | |||
| 5614 | R_SUCCEED(); | ||
| 5615 | } | ||
| 5616 | |||
| 5617 | Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr, | ||
| 5618 | size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, | ||
| 5619 | const KPageProperties properties, OperationType operation, | ||
| 5620 | bool reuse_ll) { | ||
| 5621 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 5622 | ASSERT(num_pages > 0); | ||
| 5623 | ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); | ||
| 5624 | ASSERT(this->ContainsPages(virt_addr, num_pages)); | ||
| 5625 | |||
| 5626 | // As we don't allocate page entries in guest memory, we don't need to allocate them from | ||
| 5627 | // or free them to the page list, and so it goes unused (along with page properties). | ||
| 5628 | |||
| 5629 | switch (operation) { | ||
| 5630 | case OperationType::Unmap: { | ||
| 5631 | // Ensure that any pages we track are closed on exit. | ||
| 5632 | KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager()); | ||
| 5633 | SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); | ||
| 5634 | |||
| 5635 | // Make a page group representing the region to unmap. | ||
| 5636 | this->MakePageGroup(pages_to_close, virt_addr, num_pages); | ||
| 5637 | |||
| 5638 | // Unmap. | ||
| 5639 | m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize); | ||
| 5640 | |||
| 5641 | R_SUCCEED(); | ||
| 5642 | } | ||
| 5643 | case OperationType::Map: { | ||
| 5644 | ASSERT(virt_addr != 0); | ||
| 5645 | ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); | ||
| 5646 | m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr); | ||
| 5647 | |||
| 5648 | // Open references to pages, if we should. | ||
| 5649 | if (this->IsHeapPhysicalAddress(phys_addr)) { | ||
| 5650 | m_kernel.MemoryManager().Open(phys_addr, num_pages); | ||
| 5651 | } | ||
| 5652 | |||
| 5653 | R_SUCCEED(); | ||
| 5654 | } | ||
| 5655 | case OperationType::Separate: { | ||
| 5656 | // TODO: Unimplemented. | ||
| 5657 | R_SUCCEED(); | ||
| 5658 | } | ||
| 5659 | case OperationType::ChangePermissions: | ||
| 5660 | case OperationType::ChangePermissionsAndRefresh: | ||
| 5661 | case OperationType::ChangePermissionsAndRefreshAndFlush: | ||
| 5662 | R_SUCCEED(); | ||
| 5663 | default: | ||
| 5664 | UNREACHABLE(); | ||
| 5665 | } | ||
| 5666 | } | ||
| 5667 | |||
| 5668 | Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr, | ||
| 5669 | size_t num_pages, const KPageGroup& page_group, | ||
| 5670 | const KPageProperties properties, OperationType operation, | ||
| 5671 | bool reuse_ll) { | ||
| 5672 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 5673 | ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); | ||
| 5674 | ASSERT(num_pages > 0); | ||
| 5675 | ASSERT(num_pages == page_group.GetNumPages()); | ||
| 5676 | |||
| 5677 | // As we don't allocate page entries in guest memory, we don't need to allocate them from | ||
| 5678 | // the page list, and so it goes unused (along with page properties). | ||
| 5679 | |||
| 5680 | switch (operation) { | ||
| 5681 | case OperationType::MapGroup: | ||
| 5682 | case OperationType::MapFirstGroup: { | ||
| 5683 | // We want to maintain a new reference to every page in the group. | ||
| 5684 | KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); | ||
| 5685 | |||
| 5686 | for (const auto& node : page_group) { | ||
| 5687 | const size_t size{node.GetNumPages() * PageSize}; | ||
| 5688 | |||
| 5689 | // Map the pages. | ||
| 5690 | m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress()); | ||
| 5691 | |||
| 5692 | virt_addr += size; | ||
| 5693 | } | ||
| 5694 | |||
| 5695 | // We succeeded! We want to persist the reference to the pages. | ||
| 5696 | spg.CancelClose(); | ||
| 5697 | |||
| 5698 | R_SUCCEED(); | ||
| 5699 | } | ||
| 5700 | default: | ||
| 5701 | UNREACHABLE(); | ||
| 5702 | } | ||
| 5703 | } | ||
| 5704 | |||
| 5705 | void KPageTableBase::FinalizeUpdate(PageLinkedList* page_list) { | ||
| 5706 | while (page_list->Peek()) { | ||
| 5707 | [[maybe_unused]] auto page = page_list->Pop(); | ||
| 5708 | |||
| 5709 | // TODO: Free page entries once they are allocated in guest memory. | ||
| 5710 | // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page)); | ||
| 5711 | // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0); | ||
| 5712 | // this->GetPageTableManager().Free(page); | ||
| 5713 | } | ||
| 5714 | } | ||
| 5715 | |||
| 5716 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h new file mode 100644 index 000000000..ee2c41e67 --- /dev/null +++ b/src/core/hle/kernel/k_page_table_base.h | |||
| @@ -0,0 +1,759 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <memory> | ||
| 7 | |||
| 8 | #include "common/common_funcs.h" | ||
| 9 | #include "common/page_table.h" | ||
| 10 | #include "core/core.h" | ||
| 11 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 12 | #include "core/hle/kernel/k_light_lock.h" | ||
| 13 | #include "core/hle/kernel/k_memory_block.h" | ||
| 14 | #include "core/hle/kernel/k_memory_block_manager.h" | ||
| 15 | #include "core/hle/kernel/k_memory_layout.h" | ||
| 16 | #include "core/hle/kernel/k_memory_manager.h" | ||
| 17 | #include "core/hle/kernel/k_typed_address.h" | ||
| 18 | #include "core/hle/kernel/kernel.h" | ||
| 19 | #include "core/hle/result.h" | ||
| 20 | #include "core/memory.h" | ||
| 21 | |||
| 22 | namespace Kernel { | ||
| 23 | |||
| 24 | enum class DisableMergeAttribute : u8 { | ||
| 25 | None = (0U << 0), | ||
| 26 | |||
| 27 | DisableHead = (1U << 0), | ||
| 28 | DisableHeadAndBody = (1U << 1), | ||
| 29 | EnableHeadAndBody = (1U << 2), | ||
| 30 | DisableTail = (1U << 3), | ||
| 31 | EnableTail = (1U << 4), | ||
| 32 | EnableAndMergeHeadBodyTail = (1U << 5), | ||
| 33 | |||
| 34 | EnableHeadBodyTail = EnableHeadAndBody | EnableTail, | ||
| 35 | DisableHeadBodyTail = DisableHeadAndBody | DisableTail, | ||
| 36 | }; | ||
| 37 | DECLARE_ENUM_FLAG_OPERATORS(DisableMergeAttribute); | ||
| 38 | |||
| 39 | struct KPageProperties { | ||
| 40 | KMemoryPermission perm; | ||
| 41 | bool io; | ||
| 42 | bool uncached; | ||
| 43 | DisableMergeAttribute disable_merge_attributes; | ||
| 44 | }; | ||
| 45 | static_assert(std::is_trivial_v<KPageProperties>); | ||
| 46 | static_assert(sizeof(KPageProperties) == sizeof(u32)); | ||
| 47 | |||
| 48 | class KResourceLimit; | ||
| 49 | class KSystemResource; | ||
| 50 | |||
| 51 | class KPageTableBase { | ||
| 52 | YUZU_NON_COPYABLE(KPageTableBase); | ||
| 53 | YUZU_NON_MOVEABLE(KPageTableBase); | ||
| 54 | |||
| 55 | public: | ||
| 56 | using TraversalEntry = Common::PageTable::TraversalEntry; | ||
| 57 | using TraversalContext = Common::PageTable::TraversalContext; | ||
| 58 | |||
| 59 | class MemoryRange { | ||
| 60 | private: | ||
| 61 | KernelCore& m_kernel; | ||
| 62 | KPhysicalAddress m_address; | ||
| 63 | size_t m_size; | ||
| 64 | bool m_heap; | ||
| 65 | |||
| 66 | public: | ||
| 67 | explicit MemoryRange(KernelCore& kernel) | ||
| 68 | : m_kernel(kernel), m_address(0), m_size(0), m_heap(false) {} | ||
| 69 | |||
| 70 | void Set(KPhysicalAddress address, size_t size, bool heap) { | ||
| 71 | m_address = address; | ||
| 72 | m_size = size; | ||
| 73 | m_heap = heap; | ||
| 74 | } | ||
| 75 | |||
| 76 | KPhysicalAddress GetAddress() const { | ||
| 77 | return m_address; | ||
| 78 | } | ||
| 79 | size_t GetSize() const { | ||
| 80 | return m_size; | ||
| 81 | } | ||
| 82 | bool IsHeap() const { | ||
| 83 | return m_heap; | ||
| 84 | } | ||
| 85 | |||
| 86 | void Open(); | ||
| 87 | void Close(); | ||
| 88 | }; | ||
| 89 | |||
| 90 | protected: | ||
| 91 | enum MemoryFillValue : u8 { | ||
| 92 | MemoryFillValue_Zero = 0, | ||
| 93 | MemoryFillValue_Stack = 'X', | ||
| 94 | MemoryFillValue_Ipc = 'Y', | ||
| 95 | MemoryFillValue_Heap = 'Z', | ||
| 96 | }; | ||
| 97 | |||
| 98 | enum class OperationType { | ||
| 99 | Map = 0, | ||
| 100 | MapGroup = 1, | ||
| 101 | MapFirstGroup = 2, | ||
| 102 | Unmap = 3, | ||
| 103 | ChangePermissions = 4, | ||
| 104 | ChangePermissionsAndRefresh = 5, | ||
| 105 | ChangePermissionsAndRefreshAndFlush = 6, | ||
| 106 | Separate = 7, | ||
| 107 | }; | ||
| 108 | |||
| 109 | static constexpr size_t MaxPhysicalMapAlignment = 1_GiB; | ||
| 110 | static constexpr size_t RegionAlignment = 2_MiB; | ||
| 111 | static_assert(RegionAlignment == KernelAslrAlignment); | ||
| 112 | |||
| 113 | struct PageLinkedList { | ||
| 114 | private: | ||
| 115 | struct Node { | ||
| 116 | Node* m_next; | ||
| 117 | std::array<u8, PageSize - sizeof(Node*)> m_buffer; | ||
| 118 | }; | ||
| 119 | static_assert(std::is_trivial_v<Node>); | ||
| 120 | |||
| 121 | private: | ||
| 122 | Node* m_root{}; | ||
| 123 | |||
| 124 | public: | ||
| 125 | constexpr PageLinkedList() : m_root(nullptr) {} | ||
| 126 | |||
| 127 | void Push(Node* n) { | ||
| 128 | ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize)); | ||
| 129 | n->m_next = m_root; | ||
| 130 | m_root = n; | ||
| 131 | } | ||
| 132 | |||
| 133 | Node* Peek() const { | ||
| 134 | return m_root; | ||
| 135 | } | ||
| 136 | |||
| 137 | Node* Pop() { | ||
| 138 | Node* const r = m_root; | ||
| 139 | |||
| 140 | m_root = r->m_next; | ||
| 141 | r->m_next = nullptr; | ||
| 142 | |||
| 143 | return r; | ||
| 144 | } | ||
| 145 | }; | ||
| 146 | static_assert(std::is_trivially_destructible_v<PageLinkedList>); | ||
| 147 | |||
| 148 | static constexpr auto DefaultMemoryIgnoreAttr = | ||
| 149 | KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; | ||
| 150 | |||
| 151 | static constexpr size_t GetAddressSpaceWidth(Svc::CreateProcessFlag as_type) { | ||
| 152 | switch (static_cast<Svc::CreateProcessFlag>(as_type & | ||
| 153 | Svc::CreateProcessFlag::AddressSpaceMask)) { | ||
| 154 | case Svc::CreateProcessFlag::AddressSpace64Bit: | ||
| 155 | return 39; | ||
| 156 | case Svc::CreateProcessFlag::AddressSpace64BitDeprecated: | ||
| 157 | return 36; | ||
| 158 | case Svc::CreateProcessFlag::AddressSpace32Bit: | ||
| 159 | case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias: | ||
| 160 | return 32; | ||
| 161 | default: | ||
| 162 | UNREACHABLE(); | ||
| 163 | } | ||
| 164 | } | ||
| 165 | |||
| 166 | private: | ||
| 167 | class KScopedPageTableUpdater { | ||
| 168 | private: | ||
| 169 | KPageTableBase* m_pt; | ||
| 170 | PageLinkedList m_ll; | ||
| 171 | |||
| 172 | public: | ||
| 173 | explicit KScopedPageTableUpdater(KPageTableBase* pt) : m_pt(pt), m_ll() {} | ||
| 174 | explicit KScopedPageTableUpdater(KPageTableBase& pt) | ||
| 175 | : KScopedPageTableUpdater(std::addressof(pt)) {} | ||
| 176 | ~KScopedPageTableUpdater() { | ||
| 177 | m_pt->FinalizeUpdate(this->GetPageList()); | ||
| 178 | } | ||
| 179 | |||
| 180 | PageLinkedList* GetPageList() { | ||
| 181 | return std::addressof(m_ll); | ||
| 182 | } | ||
| 183 | }; | ||
| 184 | |||
| 185 | private: | ||
| 186 | KernelCore& m_kernel; | ||
| 187 | Core::System& m_system; | ||
| 188 | KProcessAddress m_address_space_start{}; | ||
| 189 | KProcessAddress m_address_space_end{}; | ||
| 190 | KProcessAddress m_heap_region_start{}; | ||
| 191 | KProcessAddress m_heap_region_end{}; | ||
| 192 | KProcessAddress m_current_heap_end{}; | ||
| 193 | KProcessAddress m_alias_region_start{}; | ||
| 194 | KProcessAddress m_alias_region_end{}; | ||
| 195 | KProcessAddress m_stack_region_start{}; | ||
| 196 | KProcessAddress m_stack_region_end{}; | ||
| 197 | KProcessAddress m_kernel_map_region_start{}; | ||
| 198 | KProcessAddress m_kernel_map_region_end{}; | ||
| 199 | KProcessAddress m_alias_code_region_start{}; | ||
| 200 | KProcessAddress m_alias_code_region_end{}; | ||
| 201 | KProcessAddress m_code_region_start{}; | ||
| 202 | KProcessAddress m_code_region_end{}; | ||
| 203 | size_t m_max_heap_size{}; | ||
| 204 | size_t m_mapped_physical_memory_size{}; | ||
| 205 | size_t m_mapped_unsafe_physical_memory{}; | ||
| 206 | size_t m_mapped_insecure_memory{}; | ||
| 207 | size_t m_mapped_ipc_server_memory{}; | ||
| 208 | mutable KLightLock m_general_lock; | ||
| 209 | mutable KLightLock m_map_physical_memory_lock; | ||
| 210 | KLightLock m_device_map_lock; | ||
| 211 | std::unique_ptr<Common::PageTable> m_impl{}; | ||
| 212 | Core::Memory::Memory* m_memory{}; | ||
| 213 | KMemoryBlockManager m_memory_block_manager{}; | ||
| 214 | u32 m_allocate_option{}; | ||
| 215 | u32 m_address_space_width{}; | ||
| 216 | bool m_is_kernel{}; | ||
| 217 | bool m_enable_aslr{}; | ||
| 218 | bool m_enable_device_address_space_merge{}; | ||
| 219 | KMemoryBlockSlabManager* m_memory_block_slab_manager{}; | ||
| 220 | KBlockInfoManager* m_block_info_manager{}; | ||
| 221 | KResourceLimit* m_resource_limit{}; | ||
| 222 | const KMemoryRegion* m_cached_physical_linear_region{}; | ||
| 223 | const KMemoryRegion* m_cached_physical_heap_region{}; | ||
| 224 | MemoryFillValue m_heap_fill_value{}; | ||
| 225 | MemoryFillValue m_ipc_fill_value{}; | ||
| 226 | MemoryFillValue m_stack_fill_value{}; | ||
| 227 | |||
| 228 | public: | ||
| 229 | explicit KPageTableBase(KernelCore& kernel); | ||
| 230 | ~KPageTableBase(); | ||
| 231 | |||
| 232 | Result InitializeForKernel(bool is_64_bit, KVirtualAddress start, KVirtualAddress end, | ||
| 233 | Core::Memory::Memory& memory); | ||
| 234 | Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr, | ||
| 235 | bool enable_device_address_space_merge, bool from_back, | ||
| 236 | KMemoryManager::Pool pool, KProcessAddress code_address, | ||
| 237 | size_t code_size, KSystemResource* system_resource, | ||
| 238 | KResourceLimit* resource_limit, Core::Memory::Memory& memory); | ||
| 239 | |||
| 240 | void Finalize(); | ||
| 241 | |||
| 242 | bool IsKernel() const { | ||
| 243 | return m_is_kernel; | ||
| 244 | } | ||
| 245 | bool IsAslrEnabled() const { | ||
| 246 | return m_enable_aslr; | ||
| 247 | } | ||
| 248 | |||
| 249 | bool Contains(KProcessAddress addr) const { | ||
| 250 | return m_address_space_start <= addr && addr <= m_address_space_end - 1; | ||
| 251 | } | ||
| 252 | |||
| 253 | bool Contains(KProcessAddress addr, size_t size) const { | ||
| 254 | return m_address_space_start <= addr && addr < addr + size && | ||
| 255 | addr + size - 1 <= m_address_space_end - 1; | ||
| 256 | } | ||
| 257 | |||
| 258 | bool IsInAliasRegion(KProcessAddress addr, size_t size) const { | ||
| 259 | return this->Contains(addr, size) && m_alias_region_start <= addr && | ||
| 260 | addr + size - 1 <= m_alias_region_end - 1; | ||
| 261 | } | ||
| 262 | |||
| 263 | bool IsInHeapRegion(KProcessAddress addr, size_t size) const { | ||
| 264 | return this->Contains(addr, size) && m_heap_region_start <= addr && | ||
| 265 | addr + size - 1 <= m_heap_region_end - 1; | ||
| 266 | } | ||
| 267 | |||
| 268 | bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { | ||
| 269 | // Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the | ||
| 270 | // alias code region. | ||
| 271 | return this->CanContain(addr, size, Svc::MemoryState::AliasCode); | ||
| 272 | } | ||
| 273 | |||
| 274 | KScopedLightLock AcquireDeviceMapLock() { | ||
| 275 | return KScopedLightLock(m_device_map_lock); | ||
| 276 | } | ||
| 277 | |||
| 278 | KProcessAddress GetRegionAddress(Svc::MemoryState state) const; | ||
| 279 | size_t GetRegionSize(Svc::MemoryState state) const; | ||
| 280 | bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const; | ||
| 281 | |||
| 282 | KProcessAddress GetRegionAddress(KMemoryState state) const { | ||
| 283 | return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 284 | } | ||
| 285 | size_t GetRegionSize(KMemoryState state) const { | ||
| 286 | return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 287 | } | ||
| 288 | bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { | ||
| 289 | return this->CanContain(addr, size, | ||
| 290 | static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 291 | } | ||
| 292 | |||
| 293 | public: | ||
| 294 | Core::Memory::Memory& GetMemory() { | ||
| 295 | return *m_memory; | ||
| 296 | } | ||
| 297 | |||
| 298 | Core::Memory::Memory& GetMemory() const { | ||
| 299 | return *m_memory; | ||
| 300 | } | ||
| 301 | |||
| 302 | Common::PageTable& GetImpl() { | ||
| 303 | return *m_impl; | ||
| 304 | } | ||
| 305 | |||
| 306 | Common::PageTable& GetImpl() const { | ||
| 307 | return *m_impl; | ||
| 308 | } | ||
| 309 | |||
| 310 | size_t GetNumGuardPages() const { | ||
| 311 | return this->IsKernel() ? 1 : 4; | ||
| 312 | } | ||
| 313 | |||
| 314 | protected: | ||
| 315 | // NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions | ||
| 316 | // in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived | ||
| 317 | // class, and this avoids unnecessary virtual function calls. | ||
| 318 | Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages, | ||
| 319 | KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, | ||
| 320 | OperationType operation, bool reuse_ll); | ||
| 321 | Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages, | ||
| 322 | const KPageGroup& page_group, const KPageProperties properties, | ||
| 323 | OperationType operation, bool reuse_ll); | ||
| 324 | void FinalizeUpdate(PageLinkedList* page_list); | ||
| 325 | |||
| 326 | bool IsLockedByCurrentThread() const { | ||
| 327 | return m_general_lock.IsLockedByCurrentThread(); | ||
| 328 | } | ||
| 329 | |||
| 330 | bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) { | ||
| 331 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 332 | |||
| 333 | return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress( | ||
| 334 | m_cached_physical_linear_region, phys_addr); | ||
| 335 | } | ||
| 336 | |||
| 337 | bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) { | ||
| 338 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 339 | |||
| 340 | return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress( | ||
| 341 | m_cached_physical_linear_region, phys_addr, size); | ||
| 342 | } | ||
| 343 | |||
| 344 | bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) { | ||
| 345 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 346 | |||
| 347 | return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region, | ||
| 348 | phys_addr); | ||
| 349 | } | ||
| 350 | |||
| 351 | bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) { | ||
| 352 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 353 | |||
| 354 | return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region, | ||
| 355 | phys_addr, size); | ||
| 356 | } | ||
| 357 | |||
| 358 | bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) { | ||
| 359 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 360 | |||
| 361 | return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region, | ||
| 362 | phys_addr); | ||
| 363 | } | ||
| 364 | |||
| 365 | bool ContainsPages(KProcessAddress addr, size_t num_pages) const { | ||
| 366 | return (m_address_space_start <= addr) && | ||
| 367 | (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && | ||
| 368 | (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); | ||
| 369 | } | ||
| 370 | |||
| 371 | private: | ||
| 372 | KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, | ||
| 373 | size_t num_pages, size_t alignment, size_t offset, | ||
| 374 | size_t guard_pages) const; | ||
| 375 | |||
| 376 | Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size, | ||
| 377 | KMemoryState state_mask, KMemoryState state, | ||
| 378 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 379 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | ||
| 380 | Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 381 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 382 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 383 | KMemoryAttribute attr) const { | ||
| 384 | R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, | ||
| 385 | perm, attr_mask, attr)); | ||
| 386 | } | ||
| 387 | |||
| 388 | Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, | ||
| 389 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 390 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | ||
| 391 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 392 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 393 | KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, | ||
| 394 | KMemoryState state_mask, KMemoryState state, | ||
| 395 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 396 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 397 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; | ||
| 398 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 399 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 400 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 401 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 402 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 403 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; | ||
| 404 | Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size, | ||
| 405 | KMemoryState state_mask, KMemoryState state, | ||
| 406 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 407 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 408 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { | ||
| 409 | R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, | ||
| 410 | state_mask, state, perm_mask, perm, attr_mask, attr, | ||
| 411 | ignore_attr)); | ||
| 412 | } | ||
| 413 | Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 414 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 415 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 416 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { | ||
| 417 | R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, | ||
| 418 | attr_mask, attr, ignore_attr)); | ||
| 419 | } | ||
| 420 | |||
| 421 | Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr, KProcessAddress addr, | ||
| 422 | size_t size, KMemoryState state_mask, KMemoryState state, | ||
| 423 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 424 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 425 | KMemoryPermission new_perm, KMemoryAttribute lock_attr); | ||
| 426 | Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 427 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 428 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 429 | KMemoryPermission new_perm, KMemoryAttribute lock_attr, | ||
| 430 | const KPageGroup* pg); | ||
| 431 | |||
| 432 | Result QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page, | ||
| 433 | KProcessAddress address) const; | ||
| 434 | |||
| 435 | Result QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size, | ||
| 436 | Svc::MemoryState state) const; | ||
| 437 | |||
| 438 | Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 439 | size_t num_pages, KMemoryPermission perm); | ||
| 440 | Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 441 | const KPageGroup& pg, const KPageProperties properties, bool reuse_ll); | ||
| 442 | |||
| 443 | void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, | ||
| 444 | const KPageGroup& pg); | ||
| 445 | |||
| 446 | Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages); | ||
| 447 | bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages); | ||
| 448 | |||
| 449 | Result GetContiguousMemoryRangeWithState(MemoryRange* out, KProcessAddress address, size_t size, | ||
| 450 | KMemoryState state_mask, KMemoryState state, | ||
| 451 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 452 | KMemoryAttribute attr_mask, KMemoryAttribute attr); | ||
| 453 | |||
| 454 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 455 | KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, | ||
| 456 | size_t region_num_pages, KMemoryState state, KMemoryPermission perm); | ||
| 457 | |||
| 458 | Result MapIoImpl(KProcessAddress* out, PageLinkedList* page_list, KPhysicalAddress phys_addr, | ||
| 459 | size_t size, KMemoryState state, KMemoryPermission perm); | ||
| 460 | Result ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr, size_t size, | ||
| 461 | KMemoryState state); | ||
| 462 | Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr, size_t size, | ||
| 463 | KMemoryState state); | ||
| 464 | |||
| 465 | Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, | ||
| 466 | KProcessAddress address, size_t size, KMemoryPermission test_perm, | ||
| 467 | KMemoryState dst_state); | ||
| 468 | Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr, | ||
| 469 | KMemoryPermission test_perm, KMemoryState dst_state, | ||
| 470 | KPageTableBase& src_page_table, bool send); | ||
| 471 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address, | ||
| 472 | size_t size, KMemoryPermission prot_perm); | ||
| 473 | |||
| 474 | size_t GetSize(KMemoryState state) const; | ||
| 475 | |||
| 476 | bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const { | ||
| 477 | // Validate pre-conditions. | ||
| 478 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 479 | |||
| 480 | return this->GetImpl().GetPhysicalAddress(out, virt_addr); | ||
| 481 | } | ||
| 482 | |||
| 483 | public: | ||
| 484 | bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress virt_addr) const { | ||
| 485 | // Validate pre-conditions. | ||
| 486 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 487 | |||
| 488 | // Acquire exclusive access to the table while doing address translation. | ||
| 489 | KScopedLightLock lk(m_general_lock); | ||
| 490 | |||
| 491 | return this->GetPhysicalAddressLocked(out, virt_addr); | ||
| 492 | } | ||
| 493 | |||
| 494 | KBlockInfoManager* GetBlockInfoManager() const { | ||
| 495 | return m_block_info_manager; | ||
| 496 | } | ||
| 497 | |||
| 498 | Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm); | ||
| 499 | Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, | ||
| 500 | Svc::MemoryPermission perm); | ||
| 501 | Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask, | ||
| 502 | KMemoryAttribute attr); | ||
| 503 | Result SetHeapSize(KProcessAddress* out, size_t size); | ||
| 504 | Result SetMaxHeapSize(size_t size); | ||
| 505 | Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info, | ||
| 506 | KProcessAddress addr) const; | ||
| 507 | Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) const; | ||
| 508 | Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const { | ||
| 509 | R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Static)); | ||
| 510 | } | ||
| 511 | Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const { | ||
| 512 | R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Io)); | ||
| 513 | } | ||
| 514 | Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 515 | Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 516 | Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 517 | Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 518 | Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm); | ||
| 519 | Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, | ||
| 520 | Svc::MemoryMapping mapping, Svc::MemoryPermission perm); | ||
| 521 | Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, | ||
| 522 | Svc::MemoryMapping mapping); | ||
| 523 | Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm); | ||
| 524 | Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm); | ||
| 525 | Result MapInsecureMemory(KProcessAddress address, size_t size); | ||
| 526 | Result UnmapInsecureMemory(KProcessAddress address, size_t size); | ||
| 527 | |||
| 528 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 529 | KPhysicalAddress phys_addr, KProcessAddress region_start, | ||
| 530 | size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { | ||
| 531 | R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, | ||
| 532 | region_num_pages, state, perm)); | ||
| 533 | } | ||
| 534 | |||
| 535 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 536 | KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { | ||
| 537 | R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, | ||
| 538 | this->GetRegionAddress(state), | ||
| 539 | this->GetRegionSize(state) / PageSize, state, perm)); | ||
| 540 | } | ||
| 541 | |||
| 542 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state, | ||
| 543 | KMemoryPermission perm) { | ||
| 544 | R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false, | ||
| 545 | this->GetRegionAddress(state), | ||
| 546 | this->GetRegionSize(state) / PageSize, state, perm)); | ||
| 547 | } | ||
| 548 | |||
| 549 | Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||
| 550 | KMemoryPermission perm); | ||
| 551 | Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state); | ||
| 552 | |||
| 553 | Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, | ||
| 554 | KProcessAddress region_start, size_t region_num_pages, KMemoryState state, | ||
| 555 | KMemoryPermission perm); | ||
| 556 | Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state, | ||
| 557 | KMemoryPermission perm); | ||
| 558 | Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state); | ||
| 559 | |||
| 560 | Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages, | ||
| 561 | KMemoryState state_mask, KMemoryState state, | ||
| 562 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 563 | KMemoryAttribute attr_mask, KMemoryAttribute attr); | ||
| 564 | |||
| 565 | Result InvalidateProcessDataCache(KProcessAddress address, size_t size); | ||
| 566 | Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size); | ||
| 567 | |||
| 568 | Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 569 | Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, | ||
| 570 | KMemoryState state); | ||
| 571 | |||
| 572 | Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 573 | Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, | ||
| 574 | KMemoryState state); | ||
| 575 | |||
| 576 | Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size, | ||
| 577 | KMemoryPermission perm, bool is_aligned, bool check_heap); | ||
| 578 | Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap); | ||
| 579 | |||
| 580 | Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size); | ||
| 581 | Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size); | ||
| 582 | |||
| 583 | Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out, | ||
| 584 | KProcessAddress address, size_t size, | ||
| 585 | KMemoryPermission perm, bool is_aligned); | ||
| 586 | Result OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out, KProcessAddress address, | ||
| 587 | size_t size); | ||
| 588 | |||
| 589 | Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size); | ||
| 590 | Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size); | ||
| 591 | |||
| 592 | Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, | ||
| 593 | KMemoryPermission perm); | ||
| 594 | Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg); | ||
| 595 | Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size); | ||
| 596 | Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg); | ||
| 597 | |||
| 598 | Result OpenMemoryRangeForProcessCacheOperation(MemoryRange* out, KProcessAddress address, | ||
| 599 | size_t size); | ||
| 600 | |||
| 601 | Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, | ||
| 602 | KProcessAddress src_addr, KMemoryState src_state_mask, | ||
| 603 | KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 604 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr); | ||
| 605 | Result CopyMemoryFromLinearToKernel(void* buffer, size_t size, KProcessAddress src_addr, | ||
| 606 | KMemoryState src_state_mask, KMemoryState src_state, | ||
| 607 | KMemoryPermission src_test_perm, | ||
| 608 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr); | ||
| 609 | Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, | ||
| 610 | KMemoryState dst_state_mask, KMemoryState dst_state, | ||
| 611 | KMemoryPermission dst_test_perm, | ||
| 612 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 613 | KProcessAddress src_addr); | ||
| 614 | Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, | ||
| 615 | KMemoryState dst_state_mask, KMemoryState dst_state, | ||
| 616 | KMemoryPermission dst_test_perm, | ||
| 617 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 618 | void* buffer); | ||
| 619 | Result CopyMemoryFromHeapToHeap(KPageTableBase& dst_page_table, KProcessAddress dst_addr, | ||
| 620 | size_t size, KMemoryState dst_state_mask, | ||
| 621 | KMemoryState dst_state, KMemoryPermission dst_test_perm, | ||
| 622 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 623 | KProcessAddress src_addr, KMemoryState src_state_mask, | ||
| 624 | KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 625 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr); | ||
| 626 | Result CopyMemoryFromHeapToHeapWithoutCheckDestination( | ||
| 627 | KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size, | ||
| 628 | KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm, | ||
| 629 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr, | ||
| 630 | KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 631 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr); | ||
| 632 | |||
| 633 | Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr, | ||
| 634 | KPageTableBase& src_page_table, KMemoryPermission test_perm, | ||
| 635 | KMemoryState dst_state, bool send); | ||
| 636 | Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state); | ||
| 637 | Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state); | ||
| 638 | |||
| 639 | Result MapPhysicalMemory(KProcessAddress address, size_t size); | ||
| 640 | Result UnmapPhysicalMemory(KProcessAddress address, size_t size); | ||
| 641 | |||
| 642 | Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size); | ||
| 643 | Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size); | ||
| 644 | |||
| 645 | Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase& src_pt, | ||
| 646 | KProcessAddress src_address); | ||
| 647 | |||
| 648 | public: | ||
| 649 | KProcessAddress GetAddressSpaceStart() const { | ||
| 650 | return m_address_space_start; | ||
| 651 | } | ||
| 652 | KProcessAddress GetHeapRegionStart() const { | ||
| 653 | return m_heap_region_start; | ||
| 654 | } | ||
| 655 | KProcessAddress GetAliasRegionStart() const { | ||
| 656 | return m_alias_region_start; | ||
| 657 | } | ||
| 658 | KProcessAddress GetStackRegionStart() const { | ||
| 659 | return m_stack_region_start; | ||
| 660 | } | ||
| 661 | KProcessAddress GetKernelMapRegionStart() const { | ||
| 662 | return m_kernel_map_region_start; | ||
| 663 | } | ||
| 664 | KProcessAddress GetCodeRegionStart() const { | ||
| 665 | return m_code_region_start; | ||
| 666 | } | ||
| 667 | KProcessAddress GetAliasCodeRegionStart() const { | ||
| 668 | return m_alias_code_region_start; | ||
| 669 | } | ||
| 670 | |||
| 671 | size_t GetAddressSpaceSize() const { | ||
| 672 | return m_address_space_end - m_address_space_start; | ||
| 673 | } | ||
| 674 | size_t GetHeapRegionSize() const { | ||
| 675 | return m_heap_region_end - m_heap_region_start; | ||
| 676 | } | ||
| 677 | size_t GetAliasRegionSize() const { | ||
| 678 | return m_alias_region_end - m_alias_region_start; | ||
| 679 | } | ||
| 680 | size_t GetStackRegionSize() const { | ||
| 681 | return m_stack_region_end - m_stack_region_start; | ||
| 682 | } | ||
| 683 | size_t GetKernelMapRegionSize() const { | ||
| 684 | return m_kernel_map_region_end - m_kernel_map_region_start; | ||
| 685 | } | ||
| 686 | size_t GetCodeRegionSize() const { | ||
| 687 | return m_code_region_end - m_code_region_start; | ||
| 688 | } | ||
| 689 | size_t GetAliasCodeRegionSize() const { | ||
| 690 | return m_alias_code_region_end - m_alias_code_region_start; | ||
| 691 | } | ||
| 692 | |||
| 693 | size_t GetNormalMemorySize() const { | ||
| 694 | // Lock the table. | ||
| 695 | KScopedLightLock lk(m_general_lock); | ||
| 696 | |||
| 697 | return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size; | ||
| 698 | } | ||
| 699 | |||
| 700 | size_t GetCodeSize() const; | ||
| 701 | size_t GetCodeDataSize() const; | ||
| 702 | size_t GetAliasCodeSize() const; | ||
| 703 | size_t GetAliasCodeDataSize() const; | ||
| 704 | |||
| 705 | u32 GetAllocateOption() const { | ||
| 706 | return m_allocate_option; | ||
| 707 | } | ||
| 708 | |||
| 709 | u32 GetAddressSpaceWidth() const { | ||
| 710 | return m_address_space_width; | ||
| 711 | } | ||
| 712 | |||
| 713 | public: | ||
| 714 | // Linear mapped | ||
| 715 | static u8* GetLinearMappedVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) { | ||
| 716 | return kernel.System().DeviceMemory().GetPointer<u8>(addr); | ||
| 717 | } | ||
| 718 | |||
| 719 | static KPhysicalAddress GetLinearMappedPhysicalAddress(KernelCore& kernel, | ||
| 720 | KVirtualAddress addr) { | ||
| 721 | return kernel.MemoryLayout().GetLinearPhysicalAddress(addr); | ||
| 722 | } | ||
| 723 | |||
| 724 | static KVirtualAddress GetLinearMappedVirtualAddress(KernelCore& kernel, | ||
| 725 | KPhysicalAddress addr) { | ||
| 726 | return kernel.MemoryLayout().GetLinearVirtualAddress(addr); | ||
| 727 | } | ||
| 728 | |||
| 729 | // Heap | ||
| 730 | static u8* GetHeapVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) { | ||
| 731 | return kernel.System().DeviceMemory().GetPointer<u8>(addr); | ||
| 732 | } | ||
| 733 | |||
| 734 | static KPhysicalAddress GetHeapPhysicalAddress(KernelCore& kernel, KVirtualAddress addr) { | ||
| 735 | return GetLinearMappedPhysicalAddress(kernel, addr); | ||
| 736 | } | ||
| 737 | |||
| 738 | static KVirtualAddress GetHeapVirtualAddress(KernelCore& kernel, KPhysicalAddress addr) { | ||
| 739 | return GetLinearMappedVirtualAddress(kernel, addr); | ||
| 740 | } | ||
| 741 | |||
| 742 | // Member heap | ||
| 743 | u8* GetHeapVirtualPointer(KPhysicalAddress addr) { | ||
| 744 | return GetHeapVirtualPointer(m_kernel, addr); | ||
| 745 | } | ||
| 746 | |||
| 747 | KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) { | ||
| 748 | return GetHeapPhysicalAddress(m_kernel, addr); | ||
| 749 | } | ||
| 750 | |||
| 751 | KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) { | ||
| 752 | return GetHeapVirtualAddress(m_kernel, addr); | ||
| 753 | } | ||
| 754 | |||
| 755 | // TODO: GetPageTableVirtualAddress | ||
| 756 | // TODO: GetPageTablePhysicalAddress | ||
| 757 | }; | ||
| 758 | |||
| 759 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 1f4b0755d..3cfb414e5 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -298,9 +298,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa | |||
| 298 | const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr); | 298 | const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr); |
| 299 | const bool enable_das_merge = | 299 | const bool enable_das_merge = |
| 300 | False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); | 300 | False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); |
| 301 | R_TRY(m_page_table.InitializeForProcess( | 301 | R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, |
| 302 | as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, | 302 | params.code_address, params.code_num_pages * PageSize, |
| 303 | params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetMemory())); | 303 | m_system_resource, res_limit, this->GetMemory())); |
| 304 | } | 304 | } |
| 305 | ON_RESULT_FAILURE_2 { | 305 | ON_RESULT_FAILURE_2 { |
| 306 | m_page_table.Finalize(); | 306 | m_page_table.Finalize(); |
| @@ -391,9 +391,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, | |||
| 391 | const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr); | 391 | const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr); |
| 392 | const bool enable_das_merge = | 392 | const bool enable_das_merge = |
| 393 | False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); | 393 | False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); |
| 394 | R_TRY(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge, | 394 | R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, |
| 395 | !enable_aslr, pool, params.code_address, code_size, | 395 | params.code_address, code_size, m_system_resource, res_limit, |
| 396 | m_system_resource, res_limit, this->GetMemory())); | 396 | this->GetMemory())); |
| 397 | } | 397 | } |
| 398 | ON_RESULT_FAILURE_2 { | 398 | ON_RESULT_FAILURE_2 { |
| 399 | m_page_table.Finalize(); | 399 | m_page_table.Finalize(); |
| @@ -1122,9 +1122,9 @@ Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ | |||
| 1122 | void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {} | 1122 | void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {} |
| 1123 | 1123 | ||
| 1124 | KProcess::KProcess(KernelCore& kernel) | 1124 | KProcess::KProcess(KernelCore& kernel) |
| 1125 | : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel.System()}, | 1125 | : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel}, |
| 1126 | m_state_lock{kernel}, m_list_lock{kernel}, m_cond_var{kernel.System()}, | 1126 | m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()}, |
| 1127 | m_address_arbiter{kernel.System()}, m_handle_table{kernel} {} | 1127 | m_handle_table{kernel} {} |
| 1128 | KProcess::~KProcess() = default; | 1128 | KProcess::~KProcess() = default; |
| 1129 | 1129 | ||
| 1130 | Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size, | 1130 | Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size, |
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index f9f755afa..8339465fd 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h | |||
| @@ -5,13 +5,14 @@ | |||
| 5 | 5 | ||
| 6 | #include <map> | 6 | #include <map> |
| 7 | 7 | ||
| 8 | #include "core/file_sys/program_metadata.h" | ||
| 8 | #include "core/hle/kernel/code_set.h" | 9 | #include "core/hle/kernel/code_set.h" |
| 9 | #include "core/hle/kernel/k_address_arbiter.h" | 10 | #include "core/hle/kernel/k_address_arbiter.h" |
| 10 | #include "core/hle/kernel/k_capabilities.h" | 11 | #include "core/hle/kernel/k_capabilities.h" |
| 11 | #include "core/hle/kernel/k_condition_variable.h" | 12 | #include "core/hle/kernel/k_condition_variable.h" |
| 12 | #include "core/hle/kernel/k_handle_table.h" | 13 | #include "core/hle/kernel/k_handle_table.h" |
| 13 | #include "core/hle/kernel/k_page_table.h" | ||
| 14 | #include "core/hle/kernel/k_page_table_manager.h" | 14 | #include "core/hle/kernel/k_page_table_manager.h" |
| 15 | #include "core/hle/kernel/k_process_page_table.h" | ||
| 15 | #include "core/hle/kernel/k_system_resource.h" | 16 | #include "core/hle/kernel/k_system_resource.h" |
| 16 | #include "core/hle/kernel/k_thread.h" | 17 | #include "core/hle/kernel/k_thread.h" |
| 17 | #include "core/hle/kernel/k_thread_local_page.h" | 18 | #include "core/hle/kernel/k_thread_local_page.h" |
| @@ -65,7 +66,7 @@ private: | |||
| 65 | using TLPIterator = TLPTree::iterator; | 66 | using TLPIterator = TLPTree::iterator; |
| 66 | 67 | ||
| 67 | private: | 68 | private: |
| 68 | KPageTable m_page_table; | 69 | KProcessPageTable m_page_table; |
| 69 | std::atomic<size_t> m_used_kernel_memory_size{}; | 70 | std::atomic<size_t> m_used_kernel_memory_size{}; |
| 70 | TLPTree m_fully_used_tlp_tree{}; | 71 | TLPTree m_fully_used_tlp_tree{}; |
| 71 | TLPTree m_partially_used_tlp_tree{}; | 72 | TLPTree m_partially_used_tlp_tree{}; |
| @@ -254,9 +255,8 @@ public: | |||
| 254 | return m_is_hbl; | 255 | return m_is_hbl; |
| 255 | } | 256 | } |
| 256 | 257 | ||
| 257 | Kernel::KMemoryManager::Direction GetAllocateOption() const { | 258 | u32 GetAllocateOption() const { |
| 258 | // TODO: property of the KPageTableBase | 259 | return m_page_table.GetAllocateOption(); |
| 259 | return KMemoryManager::Direction::FromFront; | ||
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | ThreadList& GetThreadList() { | 262 | ThreadList& GetThreadList() { |
| @@ -295,10 +295,10 @@ public: | |||
| 295 | return m_list_lock; | 295 | return m_list_lock; |
| 296 | } | 296 | } |
| 297 | 297 | ||
| 298 | KPageTable& GetPageTable() { | 298 | KProcessPageTable& GetPageTable() { |
| 299 | return m_page_table; | 299 | return m_page_table; |
| 300 | } | 300 | } |
| 301 | const KPageTable& GetPageTable() const { | 301 | const KProcessPageTable& GetPageTable() const { |
| 302 | return m_page_table; | 302 | return m_page_table; |
| 303 | } | 303 | } |
| 304 | 304 | ||
diff --git a/src/core/hle/kernel/k_process_page_table.h b/src/core/hle/kernel/k_process_page_table.h new file mode 100644 index 000000000..b7ae5abd0 --- /dev/null +++ b/src/core/hle/kernel/k_process_page_table.h | |||
| @@ -0,0 +1,480 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "core/hle/kernel/k_page_table.h" | ||
| 7 | #include "core/hle/kernel/k_scoped_lock.h" | ||
| 8 | #include "core/hle/kernel/svc_types.h" | ||
| 9 | |||
| 10 | namespace Core { | ||
| 11 | class ARM_Interface; | ||
| 12 | } | ||
| 13 | |||
| 14 | namespace Kernel { | ||
| 15 | |||
| 16 | class KProcessPageTable { | ||
| 17 | private: | ||
| 18 | KPageTable m_page_table; | ||
| 19 | |||
| 20 | public: | ||
| 21 | KProcessPageTable(KernelCore& kernel) : m_page_table(kernel) {} | ||
| 22 | |||
| 23 | Result Initialize(Svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, | ||
| 24 | bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, | ||
| 25 | size_t code_size, KSystemResource* system_resource, | ||
| 26 | KResourceLimit* resource_limit, Core::Memory::Memory& memory) { | ||
| 27 | R_RETURN(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge, | ||
| 28 | from_back, pool, code_address, code_size, | ||
| 29 | system_resource, resource_limit, memory)); | ||
| 30 | } | ||
| 31 | |||
| 32 | void Finalize() { | ||
| 33 | m_page_table.Finalize(); | ||
| 34 | } | ||
| 35 | |||
| 36 | Core::Memory::Memory& GetMemory() { | ||
| 37 | return m_page_table.GetMemory(); | ||
| 38 | } | ||
| 39 | |||
| 40 | Core::Memory::Memory& GetMemory() const { | ||
| 41 | return m_page_table.GetMemory(); | ||
| 42 | } | ||
| 43 | |||
| 44 | Common::PageTable& GetImpl() { | ||
| 45 | return m_page_table.GetImpl(); | ||
| 46 | } | ||
| 47 | |||
| 48 | Common::PageTable& GetImpl() const { | ||
| 49 | return m_page_table.GetImpl(); | ||
| 50 | } | ||
| 51 | |||
| 52 | size_t GetNumGuardPages() const { | ||
| 53 | return m_page_table.GetNumGuardPages(); | ||
| 54 | } | ||
| 55 | |||
| 56 | KScopedLightLock AcquireDeviceMapLock() { | ||
| 57 | return m_page_table.AcquireDeviceMapLock(); | ||
| 58 | } | ||
| 59 | |||
| 60 | Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm) { | ||
| 61 | R_RETURN(m_page_table.SetMemoryPermission(addr, size, perm)); | ||
| 62 | } | ||
| 63 | |||
| 64 | Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, | ||
| 65 | Svc::MemoryPermission perm) { | ||
| 66 | R_RETURN(m_page_table.SetProcessMemoryPermission(addr, size, perm)); | ||
| 67 | } | ||
| 68 | |||
| 69 | Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask, | ||
| 70 | KMemoryAttribute attr) { | ||
| 71 | R_RETURN(m_page_table.SetMemoryAttribute(addr, size, mask, attr)); | ||
| 72 | } | ||
| 73 | |||
| 74 | Result SetHeapSize(KProcessAddress* out, size_t size) { | ||
| 75 | R_RETURN(m_page_table.SetHeapSize(out, size)); | ||
| 76 | } | ||
| 77 | |||
| 78 | Result SetMaxHeapSize(size_t size) { | ||
| 79 | R_RETURN(m_page_table.SetMaxHeapSize(size)); | ||
| 80 | } | ||
| 81 | |||
| 82 | Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info, | ||
| 83 | KProcessAddress addr) const { | ||
| 84 | R_RETURN(m_page_table.QueryInfo(out_info, out_page_info, addr)); | ||
| 85 | } | ||
| 86 | |||
| 87 | Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) { | ||
| 88 | R_RETURN(m_page_table.QueryPhysicalAddress(out, address)); | ||
| 89 | } | ||
| 90 | |||
| 91 | Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) { | ||
| 92 | R_RETURN(m_page_table.QueryStaticMapping(out, address, size)); | ||
| 93 | } | ||
| 94 | |||
| 95 | Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) { | ||
| 96 | R_RETURN(m_page_table.QueryIoMapping(out, address, size)); | ||
| 97 | } | ||
| 98 | |||
| 99 | Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { | ||
| 100 | R_RETURN(m_page_table.MapMemory(dst_address, src_address, size)); | ||
| 101 | } | ||
| 102 | |||
| 103 | Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { | ||
| 104 | R_RETURN(m_page_table.UnmapMemory(dst_address, src_address, size)); | ||
| 105 | } | ||
| 106 | |||
| 107 | Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { | ||
| 108 | R_RETURN(m_page_table.MapCodeMemory(dst_address, src_address, size)); | ||
| 109 | } | ||
| 110 | |||
| 111 | Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { | ||
| 112 | R_RETURN(m_page_table.UnmapCodeMemory(dst_address, src_address, size)); | ||
| 113 | } | ||
| 114 | |||
| 115 | Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { | ||
| 116 | R_RETURN(m_page_table.MapIo(phys_addr, size, perm)); | ||
| 117 | } | ||
| 118 | |||
| 119 | Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, | ||
| 120 | Svc::MemoryMapping mapping, Svc::MemoryPermission perm) { | ||
| 121 | R_RETURN(m_page_table.MapIoRegion(dst_address, phys_addr, size, mapping, perm)); | ||
| 122 | } | ||
| 123 | |||
| 124 | Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, | ||
| 125 | Svc::MemoryMapping mapping) { | ||
| 126 | R_RETURN(m_page_table.UnmapIoRegion(dst_address, phys_addr, size, mapping)); | ||
| 127 | } | ||
| 128 | |||
| 129 | Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { | ||
| 130 | R_RETURN(m_page_table.MapStatic(phys_addr, size, perm)); | ||
| 131 | } | ||
| 132 | |||
| 133 | Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) { | ||
| 134 | R_RETURN(m_page_table.MapRegion(region_type, perm)); | ||
| 135 | } | ||
| 136 | |||
| 137 | Result MapInsecureMemory(KProcessAddress address, size_t size) { | ||
| 138 | R_RETURN(m_page_table.MapInsecureMemory(address, size)); | ||
| 139 | } | ||
| 140 | |||
| 141 | Result UnmapInsecureMemory(KProcessAddress address, size_t size) { | ||
| 142 | R_RETURN(m_page_table.UnmapInsecureMemory(address, size)); | ||
| 143 | } | ||
| 144 | |||
| 145 | Result MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state, | ||
| 146 | KMemoryPermission perm) { | ||
| 147 | R_RETURN(m_page_table.MapPageGroup(addr, pg, state, perm)); | ||
| 148 | } | ||
| 149 | |||
| 150 | Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state) { | ||
| 151 | R_RETURN(m_page_table.UnmapPageGroup(address, pg, state)); | ||
| 152 | } | ||
| 153 | |||
| 154 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 155 | KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { | ||
| 156 | R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm)); | ||
| 157 | } | ||
| 158 | |||
| 159 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state, | ||
| 160 | KMemoryPermission perm) { | ||
| 161 | R_RETURN(m_page_table.MapPages(out_addr, num_pages, state, perm)); | ||
| 162 | } | ||
| 163 | |||
| 164 | Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||
| 165 | KMemoryPermission perm) { | ||
| 166 | R_RETURN(m_page_table.MapPages(address, num_pages, state, perm)); | ||
| 167 | } | ||
| 168 | |||
| 169 | Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) { | ||
| 170 | R_RETURN(m_page_table.UnmapPages(addr, num_pages, state)); | ||
| 171 | } | ||
| 172 | |||
| 173 | Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages, | ||
| 174 | KMemoryState state_mask, KMemoryState state, | ||
| 175 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 176 | KMemoryAttribute attr_mask, KMemoryAttribute attr) { | ||
| 177 | R_RETURN(m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, | ||
| 178 | perm_mask, perm, attr_mask, attr)); | ||
| 179 | } | ||
| 180 | |||
| 181 | Result InvalidateProcessDataCache(KProcessAddress address, size_t size) { | ||
| 182 | R_RETURN(m_page_table.InvalidateProcessDataCache(address, size)); | ||
| 183 | } | ||
| 184 | |||
| 185 | Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { | ||
| 186 | R_RETURN(m_page_table.ReadDebugMemory(dst_address, src_address, size)); | ||
| 187 | } | ||
| 188 | |||
| 189 | Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, | ||
| 190 | KMemoryState state) { | ||
| 191 | R_RETURN(m_page_table.ReadDebugIoMemory(dst_address, src_address, size, state)); | ||
| 192 | } | ||
| 193 | |||
| 194 | Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { | ||
| 195 | R_RETURN(m_page_table.WriteDebugMemory(dst_address, src_address, size)); | ||
| 196 | } | ||
| 197 | |||
| 198 | Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, | ||
| 199 | KMemoryState state) { | ||
| 200 | R_RETURN(m_page_table.WriteDebugIoMemory(dst_address, src_address, size, state)); | ||
| 201 | } | ||
| 202 | |||
| 203 | Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size, | ||
| 204 | KMemoryPermission perm, bool is_aligned, bool check_heap) { | ||
| 205 | R_RETURN(m_page_table.LockForMapDeviceAddressSpace(out_is_io, address, size, perm, | ||
| 206 | is_aligned, check_heap)); | ||
| 207 | } | ||
| 208 | |||
| 209 | Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) { | ||
| 210 | R_RETURN(m_page_table.LockForUnmapDeviceAddressSpace(address, size, check_heap)); | ||
| 211 | } | ||
| 212 | |||
| 213 | Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) { | ||
| 214 | R_RETURN(m_page_table.UnlockForDeviceAddressSpace(address, size)); | ||
| 215 | } | ||
| 216 | |||
| 217 | Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) { | ||
| 218 | R_RETURN(m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size)); | ||
| 219 | } | ||
| 220 | |||
| 221 | Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out, | ||
| 222 | KProcessAddress address, size_t size, | ||
| 223 | KMemoryPermission perm, bool is_aligned) { | ||
| 224 | R_RETURN(m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm, | ||
| 225 | is_aligned)); | ||
| 226 | } | ||
| 227 | |||
| 228 | Result OpenMemoryRangeForUnmapDeviceAddressSpace(KPageTableBase::MemoryRange* out, | ||
| 229 | KProcessAddress address, size_t size) { | ||
| 230 | R_RETURN(m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size)); | ||
| 231 | } | ||
| 232 | |||
| 233 | Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size) { | ||
| 234 | R_RETURN(m_page_table.LockForIpcUserBuffer(out, address, size)); | ||
| 235 | } | ||
| 236 | |||
| 237 | Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) { | ||
| 238 | R_RETURN(m_page_table.UnlockForIpcUserBuffer(address, size)); | ||
| 239 | } | ||
| 240 | |||
| 241 | Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, | ||
| 242 | KMemoryPermission perm) { | ||
| 243 | R_RETURN(m_page_table.LockForTransferMemory(out, address, size, perm)); | ||
| 244 | } | ||
| 245 | |||
| 246 | Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg) { | ||
| 247 | R_RETURN(m_page_table.UnlockForTransferMemory(address, size, pg)); | ||
| 248 | } | ||
| 249 | |||
| 250 | Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) { | ||
| 251 | R_RETURN(m_page_table.LockForCodeMemory(out, address, size)); | ||
| 252 | } | ||
| 253 | |||
| 254 | Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg) { | ||
| 255 | R_RETURN(m_page_table.UnlockForCodeMemory(address, size, pg)); | ||
| 256 | } | ||
| 257 | |||
| 258 | Result OpenMemoryRangeForProcessCacheOperation(KPageTableBase::MemoryRange* out, | ||
| 259 | KProcessAddress address, size_t size) { | ||
| 260 | R_RETURN(m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size)); | ||
| 261 | } | ||
| 262 | |||
| 263 | Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, | ||
| 264 | KProcessAddress src_addr, KMemoryState src_state_mask, | ||
| 265 | KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 266 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { | ||
| 267 | R_RETURN(m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, | ||
| 268 | src_state, src_test_perm, src_attr_mask, | ||
| 269 | src_attr)); | ||
| 270 | } | ||
| 271 | |||
| 272 | Result CopyMemoryFromLinearToKernel(void* dst_addr, size_t size, KProcessAddress src_addr, | ||
| 273 | KMemoryState src_state_mask, KMemoryState src_state, | ||
| 274 | KMemoryPermission src_test_perm, | ||
| 275 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { | ||
| 276 | R_RETURN(m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, | ||
| 277 | src_state, src_test_perm, src_attr_mask, | ||
| 278 | src_attr)); | ||
| 279 | } | ||
| 280 | |||
| 281 | Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, | ||
| 282 | KMemoryState dst_state_mask, KMemoryState dst_state, | ||
| 283 | KMemoryPermission dst_test_perm, | ||
| 284 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 285 | KProcessAddress src_addr) { | ||
| 286 | R_RETURN(m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, | ||
| 287 | dst_test_perm, dst_attr_mask, dst_attr, | ||
| 288 | src_addr)); | ||
| 289 | } | ||
| 290 | |||
| 291 | Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, | ||
| 292 | KMemoryState dst_state_mask, KMemoryState dst_state, | ||
| 293 | KMemoryPermission dst_test_perm, | ||
| 294 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 295 | void* src_addr) { | ||
| 296 | R_RETURN(m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, | ||
| 297 | dst_state, dst_test_perm, dst_attr_mask, | ||
| 298 | dst_attr, src_addr)); | ||
| 299 | } | ||
| 300 | |||
| 301 | Result CopyMemoryFromHeapToHeap(KProcessPageTable& dst_page_table, KProcessAddress dst_addr, | ||
| 302 | size_t size, KMemoryState dst_state_mask, | ||
| 303 | KMemoryState dst_state, KMemoryPermission dst_test_perm, | ||
| 304 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 305 | KProcessAddress src_addr, KMemoryState src_state_mask, | ||
| 306 | KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 307 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { | ||
| 308 | R_RETURN(m_page_table.CopyMemoryFromHeapToHeap( | ||
| 309 | dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, | ||
| 310 | dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, | ||
| 311 | src_attr_mask, src_attr)); | ||
| 312 | } | ||
| 313 | |||
| 314 | Result CopyMemoryFromHeapToHeapWithoutCheckDestination( | ||
| 315 | KProcessPageTable& dst_page_table, KProcessAddress dst_addr, size_t size, | ||
| 316 | KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm, | ||
| 317 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr, | ||
| 318 | KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 319 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { | ||
| 320 | R_RETURN(m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination( | ||
| 321 | dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, | ||
| 322 | dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, | ||
| 323 | src_attr_mask, src_attr)); | ||
| 324 | } | ||
| 325 | |||
| 326 | Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr, | ||
| 327 | KProcessPageTable& src_page_table, KMemoryPermission test_perm, | ||
| 328 | KMemoryState dst_state, bool send) { | ||
| 329 | R_RETURN(m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table, | ||
| 330 | test_perm, dst_state, send)); | ||
| 331 | } | ||
| 332 | |||
| 333 | Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) { | ||
| 334 | R_RETURN(m_page_table.CleanupForIpcServer(address, size, dst_state)); | ||
| 335 | } | ||
| 336 | |||
| 337 | Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) { | ||
| 338 | R_RETURN(m_page_table.CleanupForIpcClient(address, size, dst_state)); | ||
| 339 | } | ||
| 340 | |||
| 341 | Result MapPhysicalMemory(KProcessAddress address, size_t size) { | ||
| 342 | R_RETURN(m_page_table.MapPhysicalMemory(address, size)); | ||
| 343 | } | ||
| 344 | |||
| 345 | Result UnmapPhysicalMemory(KProcessAddress address, size_t size) { | ||
| 346 | R_RETURN(m_page_table.UnmapPhysicalMemory(address, size)); | ||
| 347 | } | ||
| 348 | |||
| 349 | Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { | ||
| 350 | R_RETURN(m_page_table.MapPhysicalMemoryUnsafe(address, size)); | ||
| 351 | } | ||
| 352 | |||
| 353 | Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { | ||
| 354 | R_RETURN(m_page_table.UnmapPhysicalMemoryUnsafe(address, size)); | ||
| 355 | } | ||
| 356 | |||
| 357 | Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, | ||
| 358 | KProcessPageTable& src_page_table, KProcessAddress src_address) { | ||
| 359 | R_RETURN(m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table, | ||
| 360 | src_address)); | ||
| 361 | } | ||
| 362 | |||
| 363 | bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress address) { | ||
| 364 | return m_page_table.GetPhysicalAddress(out, address); | ||
| 365 | } | ||
| 366 | |||
| 367 | bool Contains(KProcessAddress addr, size_t size) const { | ||
| 368 | return m_page_table.Contains(addr, size); | ||
| 369 | } | ||
| 370 | |||
| 371 | bool IsInAliasRegion(KProcessAddress addr, size_t size) const { | ||
| 372 | return m_page_table.IsInAliasRegion(addr, size); | ||
| 373 | } | ||
| 374 | bool IsInHeapRegion(KProcessAddress addr, size_t size) const { | ||
| 375 | return m_page_table.IsInHeapRegion(addr, size); | ||
| 376 | } | ||
| 377 | bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { | ||
| 378 | return m_page_table.IsInUnsafeAliasRegion(addr, size); | ||
| 379 | } | ||
| 380 | |||
| 381 | bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { | ||
| 382 | return m_page_table.CanContain(addr, size, state); | ||
| 383 | } | ||
| 384 | |||
| 385 | KProcessAddress GetAddressSpaceStart() const { | ||
| 386 | return m_page_table.GetAddressSpaceStart(); | ||
| 387 | } | ||
| 388 | KProcessAddress GetHeapRegionStart() const { | ||
| 389 | return m_page_table.GetHeapRegionStart(); | ||
| 390 | } | ||
| 391 | KProcessAddress GetAliasRegionStart() const { | ||
| 392 | return m_page_table.GetAliasRegionStart(); | ||
| 393 | } | ||
| 394 | KProcessAddress GetStackRegionStart() const { | ||
| 395 | return m_page_table.GetStackRegionStart(); | ||
| 396 | } | ||
| 397 | KProcessAddress GetKernelMapRegionStart() const { | ||
| 398 | return m_page_table.GetKernelMapRegionStart(); | ||
| 399 | } | ||
| 400 | KProcessAddress GetCodeRegionStart() const { | ||
| 401 | return m_page_table.GetCodeRegionStart(); | ||
| 402 | } | ||
| 403 | KProcessAddress GetAliasCodeRegionStart() const { | ||
| 404 | return m_page_table.GetAliasCodeRegionStart(); | ||
| 405 | } | ||
| 406 | |||
| 407 | size_t GetAddressSpaceSize() const { | ||
| 408 | return m_page_table.GetAddressSpaceSize(); | ||
| 409 | } | ||
| 410 | size_t GetHeapRegionSize() const { | ||
| 411 | return m_page_table.GetHeapRegionSize(); | ||
| 412 | } | ||
| 413 | size_t GetAliasRegionSize() const { | ||
| 414 | return m_page_table.GetAliasRegionSize(); | ||
| 415 | } | ||
| 416 | size_t GetStackRegionSize() const { | ||
| 417 | return m_page_table.GetStackRegionSize(); | ||
| 418 | } | ||
| 419 | size_t GetKernelMapRegionSize() const { | ||
| 420 | return m_page_table.GetKernelMapRegionSize(); | ||
| 421 | } | ||
| 422 | size_t GetCodeRegionSize() const { | ||
| 423 | return m_page_table.GetCodeRegionSize(); | ||
| 424 | } | ||
| 425 | size_t GetAliasCodeRegionSize() const { | ||
| 426 | return m_page_table.GetAliasCodeRegionSize(); | ||
| 427 | } | ||
| 428 | |||
| 429 | size_t GetNormalMemorySize() const { | ||
| 430 | return m_page_table.GetNormalMemorySize(); | ||
| 431 | } | ||
| 432 | |||
| 433 | size_t GetCodeSize() const { | ||
| 434 | return m_page_table.GetCodeSize(); | ||
| 435 | } | ||
| 436 | size_t GetCodeDataSize() const { | ||
| 437 | return m_page_table.GetCodeDataSize(); | ||
| 438 | } | ||
| 439 | |||
| 440 | size_t GetAliasCodeSize() const { | ||
| 441 | return m_page_table.GetAliasCodeSize(); | ||
| 442 | } | ||
| 443 | size_t GetAliasCodeDataSize() const { | ||
| 444 | return m_page_table.GetAliasCodeDataSize(); | ||
| 445 | } | ||
| 446 | |||
| 447 | u32 GetAllocateOption() const { | ||
| 448 | return m_page_table.GetAllocateOption(); | ||
| 449 | } | ||
| 450 | |||
| 451 | u32 GetAddressSpaceWidth() const { | ||
| 452 | return m_page_table.GetAddressSpaceWidth(); | ||
| 453 | } | ||
| 454 | |||
| 455 | KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) { | ||
| 456 | return m_page_table.GetHeapPhysicalAddress(address); | ||
| 457 | } | ||
| 458 | |||
| 459 | u8* GetHeapVirtualPointer(KPhysicalAddress address) { | ||
| 460 | return m_page_table.GetHeapVirtualPointer(address); | ||
| 461 | } | ||
| 462 | |||
| 463 | KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) { | ||
| 464 | return m_page_table.GetHeapVirtualAddress(address); | ||
| 465 | } | ||
| 466 | |||
| 467 | KBlockInfoManager* GetBlockInfoManager() { | ||
| 468 | return m_page_table.GetBlockInfoManager(); | ||
| 469 | } | ||
| 470 | |||
| 471 | KPageTable& GetBasePageTable() { | ||
| 472 | return m_page_table; | ||
| 473 | } | ||
| 474 | |||
| 475 | const KPageTable& GetBasePageTable() const { | ||
| 476 | return m_page_table; | ||
| 477 | } | ||
| 478 | }; | ||
| 479 | |||
| 480 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index c64ceb530..3ea653163 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp | |||
| @@ -383,7 +383,7 @@ Result KServerSession::SendReply(bool is_hle) { | |||
| 383 | if (event != nullptr) { | 383 | if (event != nullptr) { |
| 384 | // // Get the client process/page table. | 384 | // // Get the client process/page table. |
| 385 | // KProcess *client_process = client_thread->GetOwnerProcess(); | 385 | // KProcess *client_process = client_thread->GetOwnerProcess(); |
| 386 | // KPageTable *client_page_table = std::addressof(client_process->PageTable()); | 386 | // KProcessPageTable *client_page_table = std::addressof(client_process->PageTable()); |
| 387 | 387 | ||
| 388 | // // If we need to, reply with an async error. | 388 | // // If we need to, reply with an async error. |
| 389 | // if (R_FAILED(client_result)) { | 389 | // if (R_FAILED(client_result)) { |
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp index 07e92aa80..b51941faf 100644 --- a/src/core/hle/kernel/k_system_resource.cpp +++ b/src/core/hle/kernel/k_system_resource.cpp | |||
| @@ -40,7 +40,7 @@ Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_l | |||
| 40 | 40 | ||
| 41 | // Get resource pointer. | 41 | // Get resource pointer. |
| 42 | KPhysicalAddress resource_paddr = | 42 | KPhysicalAddress resource_paddr = |
| 43 | KPageTable::GetHeapPhysicalAddress(m_kernel.MemoryLayout(), m_resource_address); | 43 | KPageTable::GetHeapPhysicalAddress(m_kernel, m_resource_address); |
| 44 | auto* resource = | 44 | auto* resource = |
| 45 | m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr); | 45 | m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr); |
| 46 | 46 | ||
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp index 2c45b4232..a632d1634 100644 --- a/src/core/hle/kernel/k_thread_local_page.cpp +++ b/src/core/hle/kernel/k_thread_local_page.cpp | |||
| @@ -37,8 +37,8 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) { | |||
| 37 | 37 | ||
| 38 | Result KThreadLocalPage::Finalize() { | 38 | Result KThreadLocalPage::Finalize() { |
| 39 | // Get the physical address of the page. | 39 | // Get the physical address of the page. |
| 40 | const KPhysicalAddress phys_addr = m_owner->GetPageTable().GetPhysicalAddr(m_virt_addr); | 40 | KPhysicalAddress phys_addr{}; |
| 41 | ASSERT(phys_addr); | 41 | ASSERT(m_owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), m_virt_addr)); |
| 42 | 42 | ||
| 43 | // Unmap the page. | 43 | // Unmap the page. |
| 44 | R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal)); | 44 | R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal)); |
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp deleted file mode 100644 index 773319ad8..000000000 --- a/src/core/hle/kernel/process_capability.cpp +++ /dev/null | |||
| @@ -1,389 +0,0 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include <bit> | ||
| 5 | |||
| 6 | #include "common/bit_util.h" | ||
| 7 | #include "common/logging/log.h" | ||
| 8 | #include "core/hle/kernel/k_handle_table.h" | ||
| 9 | #include "core/hle/kernel/k_page_table.h" | ||
| 10 | #include "core/hle/kernel/process_capability.h" | ||
| 11 | #include "core/hle/kernel/svc_results.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | namespace { | ||
| 15 | |||
| 16 | // clang-format off | ||
| 17 | |||
| 18 | // Shift offsets for kernel capability types. | ||
| 19 | enum : u32 { | ||
| 20 | CapabilityOffset_PriorityAndCoreNum = 3, | ||
| 21 | CapabilityOffset_Syscall = 4, | ||
| 22 | CapabilityOffset_MapPhysical = 6, | ||
| 23 | CapabilityOffset_MapIO = 7, | ||
| 24 | CapabilityOffset_MapRegion = 10, | ||
| 25 | CapabilityOffset_Interrupt = 11, | ||
| 26 | CapabilityOffset_ProgramType = 13, | ||
| 27 | CapabilityOffset_KernelVersion = 14, | ||
| 28 | CapabilityOffset_HandleTableSize = 15, | ||
| 29 | CapabilityOffset_Debug = 16, | ||
| 30 | }; | ||
| 31 | |||
| 32 | // Combined mask of all parameters that may be initialized only once. | ||
| 33 | constexpr u32 InitializeOnceMask = (1U << CapabilityOffset_PriorityAndCoreNum) | | ||
| 34 | (1U << CapabilityOffset_ProgramType) | | ||
| 35 | (1U << CapabilityOffset_KernelVersion) | | ||
| 36 | (1U << CapabilityOffset_HandleTableSize) | | ||
| 37 | (1U << CapabilityOffset_Debug); | ||
| 38 | |||
| 39 | // Packed kernel version indicating 10.4.0 | ||
| 40 | constexpr u32 PackedKernelVersion = 0x520000; | ||
| 41 | |||
| 42 | // Indicates possible types of capabilities that can be specified. | ||
| 43 | enum class CapabilityType : u32 { | ||
| 44 | Unset = 0U, | ||
| 45 | PriorityAndCoreNum = (1U << CapabilityOffset_PriorityAndCoreNum) - 1, | ||
| 46 | Syscall = (1U << CapabilityOffset_Syscall) - 1, | ||
| 47 | MapPhysical = (1U << CapabilityOffset_MapPhysical) - 1, | ||
| 48 | MapIO = (1U << CapabilityOffset_MapIO) - 1, | ||
| 49 | MapRegion = (1U << CapabilityOffset_MapRegion) - 1, | ||
| 50 | Interrupt = (1U << CapabilityOffset_Interrupt) - 1, | ||
| 51 | ProgramType = (1U << CapabilityOffset_ProgramType) - 1, | ||
| 52 | KernelVersion = (1U << CapabilityOffset_KernelVersion) - 1, | ||
| 53 | HandleTableSize = (1U << CapabilityOffset_HandleTableSize) - 1, | ||
| 54 | Debug = (1U << CapabilityOffset_Debug) - 1, | ||
| 55 | Ignorable = 0xFFFFFFFFU, | ||
| 56 | }; | ||
| 57 | |||
| 58 | // clang-format on | ||
| 59 | |||
| 60 | constexpr CapabilityType GetCapabilityType(u32 value) { | ||
| 61 | return static_cast<CapabilityType>((~value & (value + 1)) - 1); | ||
| 62 | } | ||
| 63 | |||
| 64 | u32 GetFlagBitOffset(CapabilityType type) { | ||
| 65 | const auto value = static_cast<u32>(type); | ||
| 66 | return static_cast<u32>(Common::BitSize<u32>() - static_cast<u32>(std::countl_zero(value))); | ||
| 67 | } | ||
| 68 | |||
| 69 | } // Anonymous namespace | ||
| 70 | |||
| 71 | Result ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities, | ||
| 72 | std::size_t num_capabilities, | ||
| 73 | KPageTable& page_table) { | ||
| 74 | Clear(); | ||
| 75 | |||
| 76 | // Allow all cores and priorities. | ||
| 77 | core_mask = 0xF; | ||
| 78 | priority_mask = 0xFFFFFFFFFFFFFFFF; | ||
| 79 | kernel_version = PackedKernelVersion; | ||
| 80 | |||
| 81 | return ParseCapabilities(capabilities, num_capabilities, page_table); | ||
| 82 | } | ||
| 83 | |||
| 84 | Result ProcessCapabilities::InitializeForUserProcess(const u32* capabilities, | ||
| 85 | std::size_t num_capabilities, | ||
| 86 | KPageTable& page_table) { | ||
| 87 | Clear(); | ||
| 88 | |||
| 89 | return ParseCapabilities(capabilities, num_capabilities, page_table); | ||
| 90 | } | ||
| 91 | |||
| 92 | void ProcessCapabilities::InitializeForMetadatalessProcess() { | ||
| 93 | // Allow all cores and priorities | ||
| 94 | core_mask = 0xF; | ||
| 95 | priority_mask = 0xFFFFFFFFFFFFFFFF; | ||
| 96 | kernel_version = PackedKernelVersion; | ||
| 97 | |||
| 98 | // Allow all system calls and interrupts. | ||
| 99 | svc_capabilities.set(); | ||
| 100 | interrupt_capabilities.set(); | ||
| 101 | |||
| 102 | // Allow using the maximum possible amount of handles | ||
| 103 | handle_table_size = static_cast<s32>(KHandleTable::MaxTableSize); | ||
| 104 | |||
| 105 | // Allow all debugging capabilities. | ||
| 106 | is_debuggable = true; | ||
| 107 | can_force_debug = true; | ||
| 108 | } | ||
| 109 | |||
| 110 | Result ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities, | ||
| 111 | KPageTable& page_table) { | ||
| 112 | u32 set_flags = 0; | ||
| 113 | u32 set_svc_bits = 0; | ||
| 114 | |||
| 115 | for (std::size_t i = 0; i < num_capabilities; ++i) { | ||
| 116 | const u32 descriptor = capabilities[i]; | ||
| 117 | const auto type = GetCapabilityType(descriptor); | ||
| 118 | |||
| 119 | if (type == CapabilityType::MapPhysical) { | ||
| 120 | i++; | ||
| 121 | |||
| 122 | // The MapPhysical type uses two descriptor flags for its parameters. | ||
| 123 | // If there's only one, then there's a problem. | ||
| 124 | if (i >= num_capabilities) { | ||
| 125 | LOG_ERROR(Kernel, "Invalid combination! i={}", i); | ||
| 126 | return ResultInvalidCombination; | ||
| 127 | } | ||
| 128 | |||
| 129 | const auto size_flags = capabilities[i]; | ||
| 130 | if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) { | ||
| 131 | LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags); | ||
| 132 | return ResultInvalidCombination; | ||
| 133 | } | ||
| 134 | |||
| 135 | const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table); | ||
| 136 | if (result.IsError()) { | ||
| 137 | LOG_ERROR(Kernel, "Failed to map physical flags! descriptor={}, size_flags={}", | ||
| 138 | descriptor, size_flags); | ||
| 139 | return result; | ||
| 140 | } | ||
| 141 | } else { | ||
| 142 | const auto result = | ||
| 143 | ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table); | ||
| 144 | if (result.IsError()) { | ||
| 145 | LOG_ERROR( | ||
| 146 | Kernel, | ||
| 147 | "Failed to parse capability flag! set_flags={}, set_svc_bits={}, descriptor={}", | ||
| 148 | set_flags, set_svc_bits, descriptor); | ||
| 149 | return result; | ||
| 150 | } | ||
| 151 | } | ||
| 152 | } | ||
| 153 | |||
| 154 | return ResultSuccess; | ||
| 155 | } | ||
| 156 | |||
| 157 | Result ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag, | ||
| 158 | KPageTable& page_table) { | ||
| 159 | const auto type = GetCapabilityType(flag); | ||
| 160 | |||
| 161 | if (type == CapabilityType::Unset) { | ||
| 162 | return ResultInvalidArgument; | ||
| 163 | } | ||
| 164 | |||
| 165 | // Bail early on ignorable entries, as one would expect, | ||
| 166 | // ignorable descriptors can be ignored. | ||
| 167 | if (type == CapabilityType::Ignorable) { | ||
| 168 | return ResultSuccess; | ||
| 169 | } | ||
| 170 | |||
| 171 | // Ensure that the give flag hasn't already been initialized before. | ||
| 172 | // If it has been, then bail. | ||
| 173 | const u32 flag_length = GetFlagBitOffset(type); | ||
| 174 | const u32 set_flag = 1U << flag_length; | ||
| 175 | if ((set_flag & set_flags & InitializeOnceMask) != 0) { | ||
| 176 | LOG_ERROR(Kernel, | ||
| 177 | "Attempted to initialize flags that may only be initialized once. set_flags={}", | ||
| 178 | set_flags); | ||
| 179 | return ResultInvalidCombination; | ||
| 180 | } | ||
| 181 | set_flags |= set_flag; | ||
| 182 | |||
| 183 | switch (type) { | ||
| 184 | case CapabilityType::PriorityAndCoreNum: | ||
| 185 | return HandlePriorityCoreNumFlags(flag); | ||
| 186 | case CapabilityType::Syscall: | ||
| 187 | return HandleSyscallFlags(set_svc_bits, flag); | ||
| 188 | case CapabilityType::MapIO: | ||
| 189 | return HandleMapIOFlags(flag, page_table); | ||
| 190 | case CapabilityType::MapRegion: | ||
| 191 | return HandleMapRegionFlags(flag, page_table); | ||
| 192 | case CapabilityType::Interrupt: | ||
| 193 | return HandleInterruptFlags(flag); | ||
| 194 | case CapabilityType::ProgramType: | ||
| 195 | return HandleProgramTypeFlags(flag); | ||
| 196 | case CapabilityType::KernelVersion: | ||
| 197 | return HandleKernelVersionFlags(flag); | ||
| 198 | case CapabilityType::HandleTableSize: | ||
| 199 | return HandleHandleTableFlags(flag); | ||
| 200 | case CapabilityType::Debug: | ||
| 201 | return HandleDebugFlags(flag); | ||
| 202 | default: | ||
| 203 | break; | ||
| 204 | } | ||
| 205 | |||
| 206 | LOG_ERROR(Kernel, "Invalid capability type! type={}", type); | ||
| 207 | return ResultInvalidArgument; | ||
| 208 | } | ||
| 209 | |||
| 210 | void ProcessCapabilities::Clear() { | ||
| 211 | svc_capabilities.reset(); | ||
| 212 | interrupt_capabilities.reset(); | ||
| 213 | |||
| 214 | core_mask = 0; | ||
| 215 | priority_mask = 0; | ||
| 216 | |||
| 217 | handle_table_size = 0; | ||
| 218 | kernel_version = 0; | ||
| 219 | |||
| 220 | program_type = ProgramType::SysModule; | ||
| 221 | |||
| 222 | is_debuggable = false; | ||
| 223 | can_force_debug = false; | ||
| 224 | } | ||
| 225 | |||
| 226 | Result ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) { | ||
| 227 | if (priority_mask != 0 || core_mask != 0) { | ||
| 228 | LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}", | ||
| 229 | priority_mask, core_mask); | ||
| 230 | return ResultInvalidArgument; | ||
| 231 | } | ||
| 232 | |||
| 233 | const u32 core_num_min = (flags >> 16) & 0xFF; | ||
| 234 | const u32 core_num_max = (flags >> 24) & 0xFF; | ||
| 235 | if (core_num_min > core_num_max) { | ||
| 236 | LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}", | ||
| 237 | core_num_min, core_num_max); | ||
| 238 | return ResultInvalidCombination; | ||
| 239 | } | ||
| 240 | |||
| 241 | const u32 priority_min = (flags >> 10) & 0x3F; | ||
| 242 | const u32 priority_max = (flags >> 4) & 0x3F; | ||
| 243 | if (priority_min > priority_max) { | ||
| 244 | LOG_ERROR(Kernel, | ||
| 245 | "Priority min is greater than priority max! priority_min={}, priority_max={}", | ||
| 246 | core_num_min, priority_max); | ||
| 247 | return ResultInvalidCombination; | ||
| 248 | } | ||
| 249 | |||
| 250 | // The switch only has 4 usable cores. | ||
| 251 | if (core_num_max >= 4) { | ||
| 252 | LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max); | ||
| 253 | return ResultInvalidCoreId; | ||
| 254 | } | ||
| 255 | |||
| 256 | const auto make_mask = [](u64 min, u64 max) { | ||
| 257 | const u64 range = max - min + 1; | ||
| 258 | const u64 mask = (1ULL << range) - 1; | ||
| 259 | |||
| 260 | return mask << min; | ||
| 261 | }; | ||
| 262 | |||
| 263 | core_mask = make_mask(core_num_min, core_num_max); | ||
| 264 | priority_mask = make_mask(priority_min, priority_max); | ||
| 265 | return ResultSuccess; | ||
| 266 | } | ||
| 267 | |||
| 268 | Result ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) { | ||
| 269 | const u32 index = flags >> 29; | ||
| 270 | const u32 svc_bit = 1U << index; | ||
| 271 | |||
| 272 | // If we've already set this svc before, bail. | ||
| 273 | if ((set_svc_bits & svc_bit) != 0) { | ||
| 274 | return ResultInvalidCombination; | ||
| 275 | } | ||
| 276 | set_svc_bits |= svc_bit; | ||
| 277 | |||
| 278 | const u32 svc_mask = (flags >> 5) & 0xFFFFFF; | ||
| 279 | for (u32 i = 0; i < 24; ++i) { | ||
| 280 | const u32 svc_number = index * 24 + i; | ||
| 281 | |||
| 282 | if ((svc_mask & (1U << i)) == 0) { | ||
| 283 | continue; | ||
| 284 | } | ||
| 285 | |||
| 286 | svc_capabilities[svc_number] = true; | ||
| 287 | } | ||
| 288 | |||
| 289 | return ResultSuccess; | ||
| 290 | } | ||
| 291 | |||
| 292 | Result ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags, | ||
| 293 | KPageTable& page_table) { | ||
| 294 | // TODO(Lioncache): Implement once the memory manager can handle this. | ||
| 295 | return ResultSuccess; | ||
| 296 | } | ||
| 297 | |||
| 298 | Result ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) { | ||
| 299 | // TODO(Lioncache): Implement once the memory manager can handle this. | ||
| 300 | return ResultSuccess; | ||
| 301 | } | ||
| 302 | |||
| 303 | Result ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) { | ||
| 304 | // TODO(Lioncache): Implement once the memory manager can handle this. | ||
| 305 | return ResultSuccess; | ||
| 306 | } | ||
| 307 | |||
| 308 | Result ProcessCapabilities::HandleInterruptFlags(u32 flags) { | ||
| 309 | constexpr u32 interrupt_ignore_value = 0x3FF; | ||
| 310 | const u32 interrupt0 = (flags >> 12) & 0x3FF; | ||
| 311 | const u32 interrupt1 = (flags >> 22) & 0x3FF; | ||
| 312 | |||
| 313 | for (u32 interrupt : {interrupt0, interrupt1}) { | ||
| 314 | if (interrupt == interrupt_ignore_value) { | ||
| 315 | continue; | ||
| 316 | } | ||
| 317 | |||
| 318 | // NOTE: | ||
| 319 | // This should be checking a generic interrupt controller value | ||
| 320 | // as part of the calculation, however, given we don't currently | ||
| 321 | // emulate that, it's sufficient to mark every interrupt as defined. | ||
| 322 | |||
| 323 | if (interrupt >= interrupt_capabilities.size()) { | ||
| 324 | LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}", | ||
| 325 | interrupt); | ||
| 326 | return ResultOutOfRange; | ||
| 327 | } | ||
| 328 | |||
| 329 | interrupt_capabilities[interrupt] = true; | ||
| 330 | } | ||
| 331 | |||
| 332 | return ResultSuccess; | ||
| 333 | } | ||
| 334 | |||
| 335 | Result ProcessCapabilities::HandleProgramTypeFlags(u32 flags) { | ||
| 336 | const u32 reserved = flags >> 17; | ||
| 337 | if (reserved != 0) { | ||
| 338 | LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); | ||
| 339 | return ResultReservedUsed; | ||
| 340 | } | ||
| 341 | |||
| 342 | program_type = static_cast<ProgramType>((flags >> 14) & 0b111); | ||
| 343 | return ResultSuccess; | ||
| 344 | } | ||
| 345 | |||
| 346 | Result ProcessCapabilities::HandleKernelVersionFlags(u32 flags) { | ||
| 347 | // Yes, the internal member variable is checked in the actual kernel here. | ||
| 348 | // This might look odd for options that are only allowed to be initialized | ||
| 349 | // just once, however the kernel has a separate initialization function for | ||
| 350 | // kernel processes and userland processes. The kernel variant sets this | ||
| 351 | // member variable ahead of time. | ||
| 352 | |||
| 353 | const u32 major_version = kernel_version >> 19; | ||
| 354 | |||
| 355 | if (major_version != 0 || flags < 0x80000) { | ||
| 356 | LOG_ERROR(Kernel, | ||
| 357 | "Kernel version is non zero or flags are too small! major_version={}, flags={}", | ||
| 358 | major_version, flags); | ||
| 359 | return ResultInvalidArgument; | ||
| 360 | } | ||
| 361 | |||
| 362 | kernel_version = flags; | ||
| 363 | return ResultSuccess; | ||
| 364 | } | ||
| 365 | |||
| 366 | Result ProcessCapabilities::HandleHandleTableFlags(u32 flags) { | ||
| 367 | const u32 reserved = flags >> 26; | ||
| 368 | if (reserved != 0) { | ||
| 369 | LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); | ||
| 370 | return ResultReservedUsed; | ||
| 371 | } | ||
| 372 | |||
| 373 | handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF); | ||
| 374 | return ResultSuccess; | ||
| 375 | } | ||
| 376 | |||
| 377 | Result ProcessCapabilities::HandleDebugFlags(u32 flags) { | ||
| 378 | const u32 reserved = flags >> 19; | ||
| 379 | if (reserved != 0) { | ||
| 380 | LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); | ||
| 381 | return ResultReservedUsed; | ||
| 382 | } | ||
| 383 | |||
| 384 | is_debuggable = (flags & 0x20000) != 0; | ||
| 385 | can_force_debug = (flags & 0x40000) != 0; | ||
| 386 | return ResultSuccess; | ||
| 387 | } | ||
| 388 | |||
| 389 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h deleted file mode 100644 index ff05dc5ff..000000000 --- a/src/core/hle/kernel/process_capability.h +++ /dev/null | |||
| @@ -1,266 +0,0 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <bitset> | ||
| 7 | |||
| 8 | #include "common/common_types.h" | ||
| 9 | |||
| 10 | union Result; | ||
| 11 | |||
| 12 | namespace Kernel { | ||
| 13 | |||
| 14 | class KPageTable; | ||
| 15 | |||
| 16 | /// The possible types of programs that may be indicated | ||
| 17 | /// by the program type capability descriptor. | ||
| 18 | enum class ProgramType { | ||
| 19 | SysModule, | ||
| 20 | Application, | ||
| 21 | Applet, | ||
| 22 | }; | ||
| 23 | |||
| 24 | /// Handles kernel capability descriptors that are provided by | ||
| 25 | /// application metadata. These descriptors provide information | ||
| 26 | /// that alters certain parameters for kernel process instance | ||
| 27 | /// that will run said application (or applet). | ||
| 28 | /// | ||
| 29 | /// Capabilities are a sequence of flag descriptors, that indicate various | ||
| 30 | /// configurations and constraints for a particular process. | ||
| 31 | /// | ||
| 32 | /// Flag types are indicated by a sequence of set low bits. E.g. the | ||
| 33 | /// types are indicated with the low bits as follows (where x indicates "don't care"): | ||
| 34 | /// | ||
| 35 | /// - Priority and core mask : 0bxxxxxxxxxxxx0111 | ||
| 36 | /// - Allowed service call mask: 0bxxxxxxxxxxx01111 | ||
| 37 | /// - Map physical memory : 0bxxxxxxxxx0111111 | ||
| 38 | /// - Map IO memory : 0bxxxxxxxx01111111 | ||
| 39 | /// - Interrupts : 0bxxxx011111111111 | ||
| 40 | /// - Application type : 0bxx01111111111111 | ||
| 41 | /// - Kernel version : 0bx011111111111111 | ||
| 42 | /// - Handle table size : 0b0111111111111111 | ||
| 43 | /// - Debugger flags : 0b1111111111111111 | ||
| 44 | /// | ||
| 45 | /// These are essentially a bit offset subtracted by 1 to create a mask. | ||
| 46 | /// e.g. The first entry in the above list is simply bit 3 (value 8 -> 0b1000) | ||
| 47 | /// subtracted by one (7 -> 0b0111) | ||
| 48 | /// | ||
| 49 | /// An example of a bit layout (using the map physical layout): | ||
| 50 | /// <example> | ||
| 51 | /// The MapPhysical type indicates a sequence entry pair of: | ||
| 52 | /// | ||
| 53 | /// [initial, memory_flags], where: | ||
| 54 | /// | ||
| 55 | /// initial: | ||
| 56 | /// bits: | ||
| 57 | /// 7-24: Starting page to map memory at. | ||
| 58 | /// 25 : Indicates if the memory should be mapped as read only. | ||
| 59 | /// | ||
| 60 | /// memory_flags: | ||
| 61 | /// bits: | ||
| 62 | /// 7-20 : Number of pages to map | ||
| 63 | /// 21-25: Seems to be reserved (still checked against though) | ||
| 64 | /// 26 : Whether or not the memory being mapped is IO memory, or physical memory | ||
| 65 | /// </example> | ||
| 66 | /// | ||
| 67 | class ProcessCapabilities { | ||
| 68 | public: | ||
| 69 | using InterruptCapabilities = std::bitset<1024>; | ||
| 70 | using SyscallCapabilities = std::bitset<192>; | ||
| 71 | |||
| 72 | ProcessCapabilities() = default; | ||
| 73 | ProcessCapabilities(const ProcessCapabilities&) = delete; | ||
| 74 | ProcessCapabilities(ProcessCapabilities&&) = default; | ||
| 75 | |||
| 76 | ProcessCapabilities& operator=(const ProcessCapabilities&) = delete; | ||
| 77 | ProcessCapabilities& operator=(ProcessCapabilities&&) = default; | ||
| 78 | |||
| 79 | /// Initializes this process capabilities instance for a kernel process. | ||
| 80 | /// | ||
| 81 | /// @param capabilities The capabilities to parse | ||
| 82 | /// @param num_capabilities The number of capabilities to parse. | ||
| 83 | /// @param page_table The memory manager to use for handling any mapping-related | ||
| 84 | /// operations (such as mapping IO memory, etc). | ||
| 85 | /// | ||
| 86 | /// @returns ResultSuccess if this capabilities instance was able to be initialized, | ||
| 87 | /// otherwise, an error code upon failure. | ||
| 88 | /// | ||
| 89 | Result InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities, | ||
| 90 | KPageTable& page_table); | ||
| 91 | |||
| 92 | /// Initializes this process capabilities instance for a userland process. | ||
| 93 | /// | ||
| 94 | /// @param capabilities The capabilities to parse. | ||
| 95 | /// @param num_capabilities The total number of capabilities to parse. | ||
| 96 | /// @param page_table The memory manager to use for handling any mapping-related | ||
| 97 | /// operations (such as mapping IO memory, etc). | ||
| 98 | /// | ||
| 99 | /// @returns ResultSuccess if this capabilities instance was able to be initialized, | ||
| 100 | /// otherwise, an error code upon failure. | ||
| 101 | /// | ||
| 102 | Result InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities, | ||
| 103 | KPageTable& page_table); | ||
| 104 | |||
| 105 | /// Initializes this process capabilities instance for a process that does not | ||
| 106 | /// have any metadata to parse. | ||
| 107 | /// | ||
| 108 | /// This is necessary, as we allow running raw executables, and the internal | ||
| 109 | /// kernel process capabilities also determine what CPU cores the process is | ||
| 110 | /// allowed to run on, and what priorities are allowed for threads. It also | ||
| 111 | /// determines the max handle table size, what the program type is, whether or | ||
| 112 | /// not the process can be debugged, or whether it's possible for a process to | ||
| 113 | /// forcibly debug another process. | ||
| 114 | /// | ||
| 115 | /// Given the above, this essentially enables all capabilities across the board | ||
| 116 | /// for the process. It allows the process to: | ||
| 117 | /// | ||
| 118 | /// - Run on any core | ||
| 119 | /// - Use any thread priority | ||
| 120 | /// - Use the maximum amount of handles a process is allowed to. | ||
| 121 | /// - Be debuggable | ||
| 122 | /// - Forcibly debug other processes. | ||
| 123 | /// | ||
| 124 | /// Note that this is not a behavior that the kernel allows a process to do via | ||
| 125 | /// a single function like this. This is yuzu-specific behavior to handle | ||
| 126 | /// executables with no capability descriptors whatsoever to derive behavior from. | ||
| 127 | /// It being yuzu-specific is why this is also not the default behavior and not | ||
| 128 | /// done by default in the constructor. | ||
| 129 | /// | ||
| 130 | void InitializeForMetadatalessProcess(); | ||
| 131 | |||
| 132 | /// Gets the allowable core mask | ||
| 133 | u64 GetCoreMask() const { | ||
| 134 | return core_mask; | ||
| 135 | } | ||
| 136 | |||
| 137 | /// Gets the allowable priority mask | ||
| 138 | u64 GetPriorityMask() const { | ||
| 139 | return priority_mask; | ||
| 140 | } | ||
| 141 | |||
| 142 | /// Gets the SVC access permission bits | ||
| 143 | const SyscallCapabilities& GetServiceCapabilities() const { | ||
| 144 | return svc_capabilities; | ||
| 145 | } | ||
| 146 | |||
| 147 | /// Gets the valid interrupt bits. | ||
| 148 | const InterruptCapabilities& GetInterruptCapabilities() const { | ||
| 149 | return interrupt_capabilities; | ||
| 150 | } | ||
| 151 | |||
| 152 | /// Gets the program type for this process. | ||
| 153 | ProgramType GetProgramType() const { | ||
| 154 | return program_type; | ||
| 155 | } | ||
| 156 | |||
| 157 | /// Gets the number of total allowable handles for the process' handle table. | ||
| 158 | s32 GetHandleTableSize() const { | ||
| 159 | return handle_table_size; | ||
| 160 | } | ||
| 161 | |||
| 162 | /// Gets the kernel version value. | ||
| 163 | u32 GetKernelVersion() const { | ||
| 164 | return kernel_version; | ||
| 165 | } | ||
| 166 | |||
| 167 | /// Whether or not this process can be debugged. | ||
| 168 | bool IsDebuggable() const { | ||
| 169 | return is_debuggable; | ||
| 170 | } | ||
| 171 | |||
| 172 | /// Whether or not this process can forcibly debug another | ||
| 173 | /// process, even if that process is not considered debuggable. | ||
| 174 | bool CanForceDebug() const { | ||
| 175 | return can_force_debug; | ||
| 176 | } | ||
| 177 | |||
| 178 | private: | ||
| 179 | /// Attempts to parse a given sequence of capability descriptors. | ||
| 180 | /// | ||
| 181 | /// @param capabilities The sequence of capability descriptors to parse. | ||
| 182 | /// @param num_capabilities The number of descriptors within the given sequence. | ||
| 183 | /// @param page_table The memory manager that will perform any memory | ||
| 184 | /// mapping if necessary. | ||
| 185 | /// | ||
| 186 | /// @return ResultSuccess if no errors occur, otherwise an error code. | ||
| 187 | /// | ||
| 188 | Result ParseCapabilities(const u32* capabilities, std::size_t num_capabilities, | ||
| 189 | KPageTable& page_table); | ||
| 190 | |||
| 191 | /// Attempts to parse a capability descriptor that is only represented by a | ||
| 192 | /// single flag set. | ||
| 193 | /// | ||
| 194 | /// @param set_flags Running set of flags that are used to catch | ||
| 195 | /// flags being initialized more than once when they shouldn't be. | ||
| 196 | /// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask. | ||
| 197 | /// @param flag The flag to attempt to parse. | ||
| 198 | /// @param page_table The memory manager that will perform any memory | ||
| 199 | /// mapping if necessary. | ||
| 200 | /// | ||
| 201 | /// @return ResultSuccess if no errors occurred, otherwise an error code. | ||
| 202 | /// | ||
| 203 | Result ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag, | ||
| 204 | KPageTable& page_table); | ||
| 205 | |||
| 206 | /// Clears the internal state of this process capability instance. Necessary, | ||
| 207 | /// to have a sane starting point due to us allowing running executables without | ||
| 208 | /// configuration metadata. We assume a process is not going to have metadata, | ||
| 209 | /// and if it turns out that the process does, in fact, have metadata, then | ||
| 210 | /// we attempt to parse it. Thus, we need this to reset data members back to | ||
| 211 | /// a good state. | ||
| 212 | /// | ||
| 213 | /// DO NOT ever make this a public member function. This isn't an invariant | ||
| 214 | /// anything external should depend upon (and if anything comes to rely on it, | ||
| 215 | /// you should immediately be questioning the design of that thing, not this | ||
| 216 | /// class. If the kernel itself can run without depending on behavior like that, | ||
| 217 | /// then so can yuzu). | ||
| 218 | /// | ||
| 219 | void Clear(); | ||
| 220 | |||
| 221 | /// Handles flags related to the priority and core number capability flags. | ||
| 222 | Result HandlePriorityCoreNumFlags(u32 flags); | ||
| 223 | |||
| 224 | /// Handles flags related to determining the allowable SVC mask. | ||
| 225 | Result HandleSyscallFlags(u32& set_svc_bits, u32 flags); | ||
| 226 | |||
| 227 | /// Handles flags related to mapping physical memory pages. | ||
| 228 | Result HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table); | ||
| 229 | |||
| 230 | /// Handles flags related to mapping IO pages. | ||
| 231 | Result HandleMapIOFlags(u32 flags, KPageTable& page_table); | ||
| 232 | |||
| 233 | /// Handles flags related to mapping physical memory regions. | ||
| 234 | Result HandleMapRegionFlags(u32 flags, KPageTable& page_table); | ||
| 235 | |||
| 236 | /// Handles flags related to the interrupt capability flags. | ||
| 237 | Result HandleInterruptFlags(u32 flags); | ||
| 238 | |||
| 239 | /// Handles flags related to the program type. | ||
| 240 | Result HandleProgramTypeFlags(u32 flags); | ||
| 241 | |||
| 242 | /// Handles flags related to the handle table size. | ||
| 243 | Result HandleHandleTableFlags(u32 flags); | ||
| 244 | |||
| 245 | /// Handles flags related to the kernel version capability flags. | ||
| 246 | Result HandleKernelVersionFlags(u32 flags); | ||
| 247 | |||
| 248 | /// Handles flags related to debug-specific capabilities. | ||
| 249 | Result HandleDebugFlags(u32 flags); | ||
| 250 | |||
| 251 | SyscallCapabilities svc_capabilities; | ||
| 252 | InterruptCapabilities interrupt_capabilities; | ||
| 253 | |||
| 254 | u64 core_mask = 0; | ||
| 255 | u64 priority_mask = 0; | ||
| 256 | |||
| 257 | s32 handle_table_size = 0; | ||
| 258 | u32 kernel_version = 0; | ||
| 259 | |||
| 260 | ProgramType program_type = ProgramType::SysModule; | ||
| 261 | |||
| 262 | bool is_debuggable = false; | ||
| 263 | bool can_force_debug = false; | ||
| 264 | }; | ||
| 265 | |||
| 266 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp index 97f1210de..4ca62860d 100644 --- a/src/core/hle/kernel/svc/svc_memory.cpp +++ b/src/core/hle/kernel/svc/svc_memory.cpp | |||
| @@ -29,7 +29,8 @@ constexpr bool IsValidAddressRange(u64 address, u64 size) { | |||
| 29 | // Helper function that performs the common sanity checks for svcMapMemory | 29 | // Helper function that performs the common sanity checks for svcMapMemory |
| 30 | // and svcUnmapMemory. This is doable, as both functions perform their sanitizing | 30 | // and svcUnmapMemory. This is doable, as both functions perform their sanitizing |
| 31 | // in the same order. | 31 | // in the same order. |
| 32 | Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) { | 32 | Result MapUnmapMemorySanityChecks(const KProcessPageTable& manager, u64 dst_addr, u64 src_addr, |
| 33 | u64 size) { | ||
| 33 | if (!Common::Is4KBAligned(dst_addr)) { | 34 | if (!Common::Is4KBAligned(dst_addr)) { |
| 34 | LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); | 35 | LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); |
| 35 | R_THROW(ResultInvalidAddress); | 36 | R_THROW(ResultInvalidAddress); |
| @@ -123,7 +124,8 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask, | |||
| 123 | R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); | 124 | R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); |
| 124 | 125 | ||
| 125 | // Set the memory attribute. | 126 | // Set the memory attribute. |
| 126 | R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr)); | 127 | R_RETURN(page_table.SetMemoryAttribute(address, size, static_cast<KMemoryAttribute>(mask), |
| 128 | static_cast<KMemoryAttribute>(attr))); | ||
| 127 | } | 129 | } |
| 128 | 130 | ||
| 129 | /// Maps a memory range into a different range. | 131 | /// Maps a memory range into a different range. |
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp index 99330d02a..793e9f8d0 100644 --- a/src/core/hle/kernel/svc/svc_physical_memory.cpp +++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp | |||
| @@ -16,7 +16,14 @@ Result SetHeapSize(Core::System& system, u64* out_address, u64 size) { | |||
| 16 | R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize); | 16 | R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize); |
| 17 | 17 | ||
| 18 | // Set the heap size. | 18 | // Set the heap size. |
| 19 | R_RETURN(GetCurrentProcess(system.Kernel()).GetPageTable().SetHeapSize(out_address, size)); | 19 | KProcessAddress address{}; |
| 20 | R_TRY(GetCurrentProcess(system.Kernel()) | ||
| 21 | .GetPageTable() | ||
| 22 | .SetHeapSize(std::addressof(address), size)); | ||
| 23 | |||
| 24 | // We succeeded. | ||
| 25 | *out_address = GetInteger(address); | ||
| 26 | R_SUCCEED(); | ||
| 20 | } | 27 | } |
| 21 | 28 | ||
| 22 | /// Maps memory at a desired address | 29 | /// Maps memory at a desired address |
diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp index 07cd48175..e1427947b 100644 --- a/src/core/hle/kernel/svc/svc_process_memory.cpp +++ b/src/core/hle/kernel/svc/svc_process_memory.cpp | |||
| @@ -247,8 +247,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d | |||
| 247 | R_THROW(ResultInvalidCurrentMemory); | 247 | R_THROW(ResultInvalidCurrentMemory); |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size, | 250 | R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size)); |
| 251 | KPageTable::ICacheInvalidationStrategy::InvalidateAll)); | ||
| 252 | } | 251 | } |
| 253 | 252 | ||
| 254 | Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address, | 253 | Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address, |
diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp index 51af06e97..816dcb8d0 100644 --- a/src/core/hle/kernel/svc/svc_query_memory.cpp +++ b/src/core/hle/kernel/svc/svc_query_memory.cpp | |||
| @@ -31,12 +31,12 @@ Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageIn | |||
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | auto& current_memory{GetCurrentMemory(system.Kernel())}; | 33 | auto& current_memory{GetCurrentMemory(system.Kernel())}; |
| 34 | const auto memory_info{process->GetPageTable().QueryInfo(address).GetSvcMemoryInfo()}; | ||
| 35 | 34 | ||
| 36 | current_memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info)); | 35 | KMemoryInfo mem_info; |
| 36 | R_TRY(process->GetPageTable().QueryInfo(std::addressof(mem_info), out_page_info, address)); | ||
| 37 | 37 | ||
| 38 | //! This is supposed to be part of the QueryInfo call. | 38 | const auto svc_mem_info = mem_info.GetSvcMemoryInfo(); |
| 39 | *out_page_info = {}; | 39 | current_memory.WriteBlock(out_memory_info, std::addressof(svc_mem_info), sizeof(svc_mem_info)); |
| 40 | 40 | ||
| 41 | R_SUCCEED(); | 41 | R_SUCCEED(); |
| 42 | } | 42 | } |
diff --git a/src/core/hle/result.h b/src/core/hle/result.h index dd0b27f47..749f51f69 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h | |||
| @@ -407,3 +407,34 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess; | |||
| 407 | 407 | ||
| 408 | /// Evaluates a boolean expression, and succeeds if that expression is true. | 408 | /// Evaluates a boolean expression, and succeeds if that expression is true. |
| 409 | #define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess) | 409 | #define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess) |
| 410 | |||
| 411 | #define R_TRY_CATCH(res_expr) \ | ||
| 412 | { \ | ||
| 413 | const auto R_CURRENT_RESULT = (res_expr); \ | ||
| 414 | if (R_FAILED(R_CURRENT_RESULT)) { \ | ||
| 415 | if (false) | ||
| 416 | |||
| 417 | #define R_END_TRY_CATCH \ | ||
| 418 | else if (R_FAILED(R_CURRENT_RESULT)) { \ | ||
| 419 | R_THROW(R_CURRENT_RESULT); \ | ||
| 420 | } \ | ||
| 421 | } \ | ||
| 422 | } | ||
| 423 | |||
| 424 | #define R_CATCH_ALL() \ | ||
| 425 | } \ | ||
| 426 | else if (R_FAILED(R_CURRENT_RESULT)) { \ | ||
| 427 | if (true) | ||
| 428 | |||
| 429 | #define R_CATCH(res_expr) \ | ||
| 430 | } \ | ||
| 431 | else if ((res_expr) == (R_CURRENT_RESULT)) { \ | ||
| 432 | if (true) | ||
| 433 | |||
| 434 | #define R_CONVERT(catch_type, convert_type) \ | ||
| 435 | R_CATCH(catch_type) { R_THROW(static_cast<Result>(convert_type)); } | ||
| 436 | |||
| 437 | #define R_CONVERT_ALL(convert_type) \ | ||
| 438 | R_CATCH_ALL() { R_THROW(static_cast<Result>(convert_type)); } | ||
| 439 | |||
| 440 | #define R_ASSERT(res_expr) ASSERT(R_SUCCEEDED(res_expr)) | ||
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index c73035c77..97b6a9385 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp | |||
| @@ -286,9 +286,14 @@ public: | |||
| 286 | rb.Push(ResultSuccess); | 286 | rb.Push(ResultSuccess); |
| 287 | } | 287 | } |
| 288 | 288 | ||
| 289 | bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const { | 289 | bool ValidateRegionForMap(Kernel::KProcessPageTable& page_table, VAddr start, |
| 290 | std::size_t size) const { | ||
| 290 | const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; | 291 | const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; |
| 291 | const auto start_info{page_table.QueryInfo(start - 1)}; | 292 | |
| 293 | Kernel::KMemoryInfo start_info; | ||
| 294 | Kernel::Svc::PageInfo page_info; | ||
| 295 | R_ASSERT( | ||
| 296 | page_table.QueryInfo(std::addressof(start_info), std::addressof(page_info), start - 1)); | ||
| 292 | 297 | ||
| 293 | if (start_info.GetState() != Kernel::KMemoryState::Free) { | 298 | if (start_info.GetState() != Kernel::KMemoryState::Free) { |
| 294 | return {}; | 299 | return {}; |
| @@ -298,7 +303,9 @@ public: | |||
| 298 | return {}; | 303 | return {}; |
| 299 | } | 304 | } |
| 300 | 305 | ||
| 301 | const auto end_info{page_table.QueryInfo(start + size)}; | 306 | Kernel::KMemoryInfo end_info; |
| 307 | R_ASSERT(page_table.QueryInfo(std::addressof(end_info), std::addressof(page_info), | ||
| 308 | start + size)); | ||
| 302 | 309 | ||
| 303 | if (end_info.GetState() != Kernel::KMemoryState::Free) { | 310 | if (end_info.GetState() != Kernel::KMemoryState::Free) { |
| 304 | return {}; | 311 | return {}; |
| @@ -307,7 +314,7 @@ public: | |||
| 307 | return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize()); | 314 | return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize()); |
| 308 | } | 315 | } |
| 309 | 316 | ||
| 310 | Result GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) { | 317 | Result GetAvailableMapRegion(Kernel::KProcessPageTable& page_table, u64 size, VAddr& out_addr) { |
| 311 | size = Common::AlignUp(size, Kernel::PageSize); | 318 | size = Common::AlignUp(size, Kernel::PageSize); |
| 312 | size += page_table.GetNumGuardPages() * Kernel::PageSize * 4; | 319 | size += page_table.GetNumGuardPages() * Kernel::PageSize * 4; |
| 313 | 320 | ||
| @@ -391,12 +398,8 @@ public: | |||
| 391 | 398 | ||
| 392 | if (bss_size) { | 399 | if (bss_size) { |
| 393 | auto block_guard = detail::ScopeExit([&] { | 400 | auto block_guard = detail::ScopeExit([&] { |
| 394 | page_table.UnmapCodeMemory( | 401 | page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size); |
| 395 | addr + nro_size, bss_addr, bss_size, | 402 | page_table.UnmapCodeMemory(addr, nro_addr, nro_size); |
| 396 | Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange); | ||
| 397 | page_table.UnmapCodeMemory( | ||
| 398 | addr, nro_addr, nro_size, | ||
| 399 | Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange); | ||
| 400 | }); | 403 | }); |
| 401 | 404 | ||
| 402 | const Result result{page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)}; | 405 | const Result result{page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)}; |
| @@ -578,21 +581,17 @@ public: | |||
| 578 | auto& page_table{system.ApplicationProcess()->GetPageTable()}; | 581 | auto& page_table{system.ApplicationProcess()->GetPageTable()}; |
| 579 | 582 | ||
| 580 | if (info.bss_size != 0) { | 583 | if (info.bss_size != 0) { |
| 581 | R_TRY(page_table.UnmapCodeMemory( | 584 | R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size + |
| 582 | info.nro_address + info.text_size + info.ro_size + info.data_size, info.bss_address, | 585 | info.data_size, |
| 583 | info.bss_size, Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); | 586 | info.bss_address, info.bss_size)); |
| 584 | } | 587 | } |
| 585 | 588 | ||
| 586 | R_TRY(page_table.UnmapCodeMemory( | 589 | R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size, |
| 587 | info.nro_address + info.text_size + info.ro_size, | 590 | info.src_addr + info.text_size + info.ro_size, |
| 588 | info.src_addr + info.text_size + info.ro_size, info.data_size, | 591 | info.data_size)); |
| 589 | Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); | 592 | R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size, |
| 590 | R_TRY(page_table.UnmapCodeMemory( | 593 | info.src_addr + info.text_size, info.ro_size)); |
| 591 | info.nro_address + info.text_size, info.src_addr + info.text_size, info.ro_size, | 594 | R_TRY(page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size)); |
| 592 | Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); | ||
| 593 | R_TRY(page_table.UnmapCodeMemory( | ||
| 594 | info.nro_address, info.src_addr, info.text_size, | ||
| 595 | Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); | ||
| 596 | return ResultSuccess; | 595 | return ResultSuccess; |
| 597 | } | 596 | } |
| 598 | 597 | ||
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index fa5273402..84b60a928 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -41,7 +41,7 @@ struct Memory::Impl { | |||
| 41 | explicit Impl(Core::System& system_) : system{system_} {} | 41 | explicit Impl(Core::System& system_) : system{system_} {} |
| 42 | 42 | ||
| 43 | void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) { | 43 | void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) { |
| 44 | current_page_table = &process.GetPageTable().PageTableImpl(); | 44 | current_page_table = &process.GetPageTable().GetImpl(); |
| 45 | current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer(); | 45 | current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer(); |
| 46 | 46 | ||
| 47 | const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth(); | 47 | const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth(); |
| @@ -195,7 +195,7 @@ struct Memory::Impl { | |||
| 195 | 195 | ||
| 196 | bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped, | 196 | bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped, |
| 197 | auto on_memory, auto on_rasterizer, auto increment) { | 197 | auto on_memory, auto on_rasterizer, auto increment) { |
| 198 | const auto& page_table = system.ApplicationProcess()->GetPageTable().PageTableImpl(); | 198 | const auto& page_table = system.ApplicationProcess()->GetPageTable().GetImpl(); |
| 199 | std::size_t remaining_size = size; | 199 | std::size_t remaining_size = size; |
| 200 | std::size_t page_index = addr >> YUZU_PAGEBITS; | 200 | std::size_t page_index = addr >> YUZU_PAGEBITS; |
| 201 | std::size_t page_offset = addr & YUZU_PAGEMASK; | 201 | std::size_t page_offset = addr & YUZU_PAGEMASK; |
| @@ -826,7 +826,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress b | |||
| 826 | 826 | ||
| 827 | bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const { | 827 | bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const { |
| 828 | const Kernel::KProcess& process = *system.ApplicationProcess(); | 828 | const Kernel::KProcess& process = *system.ApplicationProcess(); |
| 829 | const auto& page_table = process.GetPageTable().PageTableImpl(); | 829 | const auto& page_table = process.GetPageTable().GetImpl(); |
| 830 | const size_t page = vaddr >> YUZU_PAGEBITS; | 830 | const size_t page = vaddr >> YUZU_PAGEBITS; |
| 831 | if (page >= page_table.pointers.size()) { | 831 | if (page >= page_table.pointers.size()) { |
| 832 | return false; | 832 | return false; |