diff options
Diffstat (limited to '')
| -rw-r--r-- | src/core/hle/kernel/k_page_group.h | 1 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 107 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.h | 4 |
3 files changed, 54 insertions, 58 deletions
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h index b0b243e7d..c07f17663 100644 --- a/src/core/hle/kernel/k_page_group.h +++ b/src/core/hle/kernel/k_page_group.h | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | namespace Kernel { | 14 | namespace Kernel { |
| 15 | 15 | ||
| 16 | class KBlockInfoManager; | 16 | class KBlockInfoManager; |
| 17 | class KernelCore; | ||
| 17 | class KPageGroup; | 18 | class KPageGroup; |
| 18 | 19 | ||
| 19 | class KBlockInfo { | 20 | class KBlockInfo { |
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 83131774c..9c7ac22dc 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -941,9 +941,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
| 941 | 941 | ||
| 942 | ON_RESULT_FAILURE { | 942 | ON_RESULT_FAILURE { |
| 943 | if (cur_mapped_addr != dst_addr) { | 943 | if (cur_mapped_addr != dst_addr) { |
| 944 | // HACK: Manually close the pages. | ||
| 945 | HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize); | ||
| 946 | |||
| 947 | ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, | 944 | ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, |
| 948 | KMemoryPermission::None, OperationType::Unmap) | 945 | KMemoryPermission::None, OperationType::Unmap) |
| 949 | .IsSuccess()); | 946 | .IsSuccess()); |
| @@ -1019,9 +1016,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
| 1019 | // Map the page. | 1016 | // Map the page. |
| 1020 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); | 1017 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); |
| 1021 | 1018 | ||
| 1022 | // HACK: Manually open the pages. | ||
| 1023 | HACK_OpenPages(start_partial_page, 1); | ||
| 1024 | |||
| 1025 | // Update tracking extents. | 1019 | // Update tracking extents. |
| 1026 | cur_mapped_addr += PageSize; | 1020 | cur_mapped_addr += PageSize; |
| 1027 | cur_block_addr += PageSize; | 1021 | cur_block_addr += PageSize; |
| @@ -1050,9 +1044,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
| 1050 | R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, | 1044 | R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, |
| 1051 | cur_block_addr)); | 1045 | cur_block_addr)); |
| 1052 | 1046 | ||
| 1053 | // HACK: Manually open the pages. | ||
| 1054 | HACK_OpenPages(cur_block_addr, cur_block_size / PageSize); | ||
| 1055 | |||
| 1056 | // Update tracking extents. | 1047 | // Update tracking extents. |
| 1057 | cur_mapped_addr += cur_block_size; | 1048 | cur_mapped_addr += cur_block_size; |
| 1058 | cur_block_addr = next_entry.phys_addr; | 1049 | cur_block_addr = next_entry.phys_addr; |
| @@ -1072,9 +1063,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
| 1072 | R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, | 1063 | R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, |
| 1073 | cur_block_addr)); | 1064 | cur_block_addr)); |
| 1074 | 1065 | ||
| 1075 | // HACK: Manually open the pages. | ||
| 1076 | HACK_OpenPages(cur_block_addr, last_block_size / PageSize); | ||
| 1077 | |||
| 1078 | // Update tracking extents. | 1066 | // Update tracking extents. |
| 1079 | cur_mapped_addr += last_block_size; | 1067 | cur_mapped_addr += last_block_size; |
| 1080 | cur_block_addr += last_block_size; | 1068 | cur_block_addr += last_block_size; |
| @@ -1106,9 +1094,6 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add | |||
| 1106 | 1094 | ||
| 1107 | // Map the page. | 1095 | // Map the page. |
| 1108 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); | 1096 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); |
| 1109 | |||
| 1110 | // HACK: Manually open the pages. | ||
| 1111 | HACK_OpenPages(end_partial_page, 1); | ||
| 1112 | } | 1097 | } |
| 1113 | 1098 | ||
| 1114 | // Update memory blocks to reflect our changes | 1099 | // Update memory blocks to reflect our changes |
| @@ -1210,9 +1195,6 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState | |||
| 1210 | const size_t aligned_size = aligned_end - aligned_start; | 1195 | const size_t aligned_size = aligned_end - aligned_start; |
| 1211 | const size_t aligned_num_pages = aligned_size / PageSize; | 1196 | const size_t aligned_num_pages = aligned_size / PageSize; |
| 1212 | 1197 | ||
| 1213 | // HACK: Manually close the pages. | ||
| 1214 | HACK_ClosePages(aligned_start, aligned_num_pages); | ||
| 1215 | |||
| 1216 | // Unmap the pages. | 1198 | // Unmap the pages. |
| 1217 | R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); | 1199 | R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); |
| 1218 | 1200 | ||
| @@ -1500,17 +1482,6 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi | |||
| 1500 | } | 1482 | } |
| 1501 | } | 1483 | } |
| 1502 | 1484 | ||
| 1503 | void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) { | ||
| 1504 | m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages); | ||
| 1505 | } | ||
| 1506 | |||
| 1507 | void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) { | ||
| 1508 | for (size_t index = 0; index < num_pages; ++index) { | ||
| 1509 | const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize)); | ||
| 1510 | m_system.Kernel().MemoryManager().Close(paddr, 1); | ||
| 1511 | } | ||
| 1512 | } | ||
| 1513 | |||
| 1514 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | 1485 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { |
| 1515 | // Lock the physical memory lock. | 1486 | // Lock the physical memory lock. |
| 1516 | KScopedLightLock phys_lk(m_map_physical_memory_lock); | 1487 | KScopedLightLock phys_lk(m_map_physical_memory_lock); |
| @@ -1679,9 +1650,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 1679 | last_unmap_address + 1 - cur_address) / | 1650 | last_unmap_address + 1 - cur_address) / |
| 1680 | PageSize; | 1651 | PageSize; |
| 1681 | 1652 | ||
| 1682 | // HACK: Manually close the pages. | ||
| 1683 | HACK_ClosePages(cur_address, cur_pages); | ||
| 1684 | |||
| 1685 | // Unmap. | 1653 | // Unmap. |
| 1686 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | 1654 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, |
| 1687 | OperationType::Unmap) | 1655 | OperationType::Unmap) |
| @@ -1741,10 +1709,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 1741 | // Map whatever we can. | 1709 | // Map whatever we can. |
| 1742 | const size_t cur_pages = std::min(pg_pages, map_pages); | 1710 | const size_t cur_pages = std::min(pg_pages, map_pages); |
| 1743 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, | 1711 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, |
| 1744 | OperationType::Map, pg_phys_addr)); | 1712 | OperationType::MapFirst, pg_phys_addr)); |
| 1745 | |||
| 1746 | // HACK: Manually open the pages. | ||
| 1747 | HACK_OpenPages(pg_phys_addr, cur_pages); | ||
| 1748 | 1713 | ||
| 1749 | // Advance. | 1714 | // Advance. |
| 1750 | cur_address += cur_pages * PageSize; | 1715 | cur_address += cur_pages * PageSize; |
| @@ -1887,9 +1852,6 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1887 | last_address + 1 - cur_address) / | 1852 | last_address + 1 - cur_address) / |
| 1888 | PageSize; | 1853 | PageSize; |
| 1889 | 1854 | ||
| 1890 | // HACK: Manually close the pages. | ||
| 1891 | HACK_ClosePages(cur_address, cur_pages); | ||
| 1892 | |||
| 1893 | // Unmap. | 1855 | // Unmap. |
| 1894 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) | 1856 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) |
| 1895 | .IsSuccess()); | 1857 | .IsSuccess()); |
| @@ -2609,11 +2571,23 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ | |||
| 2609 | if (is_map_only) { | 2571 | if (is_map_only) { |
| 2610 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | 2572 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); |
| 2611 | } else { | 2573 | } else { |
| 2612 | KPageGroup page_group{m_kernel, m_block_info_manager}; | 2574 | // Create a page group tohold the pages we allocate. |
| 2613 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( | 2575 | KPageGroup pg{m_kernel, m_block_info_manager}; |
| 2614 | &page_group, needed_num_pages, | 2576 | |
| 2615 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | 2577 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( |
| 2616 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | 2578 | &pg, needed_num_pages, |
| 2579 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); | ||
| 2580 | |||
| 2581 | // Ensure that the page group is closed when we're done working with it. | ||
| 2582 | SCOPE_EXIT({ pg.Close(); }); | ||
| 2583 | |||
| 2584 | // Clear all pages. | ||
| 2585 | for (const auto& it : pg) { | ||
| 2586 | std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), | ||
| 2587 | m_heap_fill_value, it.GetSize()); | ||
| 2588 | } | ||
| 2589 | |||
| 2590 | R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup)); | ||
| 2617 | } | 2591 | } |
| 2618 | 2592 | ||
| 2619 | // Update the blocks. | 2593 | // Update the blocks. |
| @@ -2794,19 +2768,28 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_ | |||
| 2794 | ASSERT(num_pages > 0); | 2768 | ASSERT(num_pages > 0); |
| 2795 | ASSERT(num_pages == page_group.GetNumPages()); | 2769 | ASSERT(num_pages == page_group.GetNumPages()); |
| 2796 | 2770 | ||
| 2797 | for (const auto& node : page_group) { | 2771 | switch (operation) { |
| 2798 | const size_t size{node.GetNumPages() * PageSize}; | 2772 | case OperationType::MapGroup: { |
| 2773 | // We want to maintain a new reference to every page in the group. | ||
| 2774 | KScopedPageGroup spg(page_group); | ||
| 2775 | |||
| 2776 | for (const auto& node : page_group) { | ||
| 2777 | const size_t size{node.GetNumPages() * PageSize}; | ||
| 2799 | 2778 | ||
| 2800 | switch (operation) { | 2779 | // Map the pages. |
| 2801 | case OperationType::MapGroup: | ||
| 2802 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); | 2780 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); |
| 2803 | break; | 2781 | |
| 2804 | default: | 2782 | addr += size; |
| 2805 | ASSERT(false); | ||
| 2806 | break; | ||
| 2807 | } | 2783 | } |
| 2808 | 2784 | ||
| 2809 | addr += size; | 2785 | // We succeeded! We want to persist the reference to the pages. |
| 2786 | spg.CancelClose(); | ||
| 2787 | |||
| 2788 | break; | ||
| 2789 | } | ||
| 2790 | default: | ||
| 2791 | ASSERT(false); | ||
| 2792 | break; | ||
| 2810 | } | 2793 | } |
| 2811 | 2794 | ||
| 2812 | R_SUCCEED(); | 2795 | R_SUCCEED(); |
| @@ -2821,13 +2804,29 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, | |||
| 2821 | ASSERT(ContainsPages(addr, num_pages)); | 2804 | ASSERT(ContainsPages(addr, num_pages)); |
| 2822 | 2805 | ||
| 2823 | switch (operation) { | 2806 | switch (operation) { |
| 2824 | case OperationType::Unmap: | 2807 | case OperationType::Unmap: { |
| 2808 | // Ensure that any pages we track close on exit. | ||
| 2809 | KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()}; | ||
| 2810 | SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); | ||
| 2811 | |||
| 2812 | this->AddRegionToPages(addr, num_pages, pages_to_close); | ||
| 2825 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); | 2813 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); |
| 2826 | break; | 2814 | break; |
| 2815 | } | ||
| 2816 | case OperationType::MapFirst: | ||
| 2827 | case OperationType::Map: { | 2817 | case OperationType::Map: { |
| 2828 | ASSERT(map_addr); | 2818 | ASSERT(map_addr); |
| 2829 | ASSERT(Common::IsAligned(map_addr, PageSize)); | 2819 | ASSERT(Common::IsAligned(map_addr, PageSize)); |
| 2830 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); | 2820 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); |
| 2821 | |||
| 2822 | // Open references to pages, if we should. | ||
| 2823 | if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { | ||
| 2824 | if (operation == OperationType::MapFirst) { | ||
| 2825 | m_kernel.MemoryManager().OpenFirst(map_addr, num_pages); | ||
| 2826 | } else { | ||
| 2827 | m_kernel.MemoryManager().Open(map_addr, num_pages); | ||
| 2828 | } | ||
| 2829 | } | ||
| 2831 | break; | 2830 | break; |
| 2832 | } | 2831 | } |
| 2833 | case OperationType::Separate: { | 2832 | case OperationType::Separate: { |
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 5df5ba1a9..0a454b05b 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -265,10 +265,6 @@ private: | |||
| 265 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, | 265 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, |
| 266 | size_t size, KMemoryPermission prot_perm); | 266 | size_t size, KMemoryPermission prot_perm); |
| 267 | 267 | ||
| 268 | // HACK: These will be removed once we automatically manage page reference counts. | ||
| 269 | void HACK_OpenPages(PAddr phys_addr, size_t num_pages); | ||
| 270 | void HACK_ClosePages(VAddr virt_addr, size_t num_pages); | ||
| 271 | |||
| 272 | mutable KLightLock m_general_lock; | 268 | mutable KLightLock m_general_lock; |
| 273 | mutable KLightLock m_map_physical_memory_lock; | 269 | mutable KLightLock m_map_physical_memory_lock; |
| 274 | 270 | ||