summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar bunnei2023-01-25 22:27:48 -0800
committerGravatar GitHub2023-01-25 22:27:48 -0800
commit2158ccda3b52d53b7485b954dc878735a58de085 (patch)
treefabf49d31ba3ee3f19cb84da7236ab36fced2be5
parentMerge pull request #9681 from Morph1984/nice-one-qt6 (diff)
parentkernel: KPageTable: update (diff)
downloadyuzu-2158ccda3b52d53b7485b954dc878735a58de085.tar.gz
yuzu-2158ccda3b52d53b7485b954dc878735a58de085.tar.xz
yuzu-2158ccda3b52d53b7485b954dc878735a58de085.zip
Merge pull request #9604 from liamwhite/pt
kernel: KPageTable: update
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp12
-rw-r--r--src/core/hle/kernel/k_page_table.cpp544
-rw-r--r--src/core/hle/kernel/k_page_table.h86
-rw-r--r--src/core/hle/kernel/k_process.cpp40
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp6
-rw-r--r--src/core/hle/kernel/svc.cpp4
6 files changed, 477 insertions, 215 deletions
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index d9da1e600..884eba001 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -74,7 +74,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
74 R_UNLESS(!m_is_mapped, ResultInvalidState); 74 R_UNLESS(!m_is_mapped, ResultInvalidState);
75 75
76 // Map the memory. 76 // Map the memory.
77 R_TRY(kernel.CurrentProcess()->PageTable().MapPages( 77 R_TRY(kernel.CurrentProcess()->PageTable().MapPageGroup(
78 address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); 78 address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
79 79
80 // Mark ourselves as mapped. 80 // Mark ourselves as mapped.
@@ -91,8 +91,8 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
91 KScopedLightLock lk(m_lock); 91 KScopedLightLock lk(m_lock);
92 92
93 // Unmap the memory. 93 // Unmap the memory.
94 R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group, 94 R_TRY(kernel.CurrentProcess()->PageTable().UnmapPageGroup(address, *m_page_group,
95 KMemoryState::CodeOut)); 95 KMemoryState::CodeOut));
96 96
97 // Mark ourselves as unmapped. 97 // Mark ourselves as unmapped.
98 m_is_mapped = false; 98 m_is_mapped = false;
@@ -125,8 +125,8 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
125 } 125 }
126 126
127 // Map the memory. 127 // Map the memory.
128 R_TRY( 128 R_TRY(m_owner->PageTable().MapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode,
129 m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm)); 129 k_perm));
130 130
131 // Mark ourselves as mapped. 131 // Mark ourselves as mapped.
132 m_is_owner_mapped = true; 132 m_is_owner_mapped = true;
@@ -142,7 +142,7 @@ Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
142 KScopedLightLock lk(m_lock); 142 KScopedLightLock lk(m_lock);
143 143
144 // Unmap the memory. 144 // Unmap the memory.
145 R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode)); 145 R_TRY(m_owner->PageTable().UnmapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode));
146 146
147 // Mark ourselves as unmapped. 147 // Mark ourselves as unmapped.
148 m_is_owner_mapped = false; 148 m_is_owner_mapped = false;
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 9c7ac22dc..fbd28f5f3 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -435,6 +435,9 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
435 KPageGroup pg{m_kernel, m_block_info_manager}; 435 KPageGroup pg{m_kernel, m_block_info_manager};
436 AddRegionToPages(src_address, num_pages, pg); 436 AddRegionToPages(src_address, num_pages, pg);
437 437
438 // We're going to perform an update, so create a helper.
439 KScopedPageTableUpdater updater(this);
440
438 // Reprotect the source as kernel-read/not mapped. 441 // Reprotect the source as kernel-read/not mapped.
439 const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead | 442 const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
440 KMemoryPermission::NotMapped); 443 KMemoryPermission::NotMapped);
@@ -447,7 +450,10 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
447 }); 450 });
448 451
449 // Map the alias pages. 452 // Map the alias pages.
450 R_TRY(MapPages(dst_address, pg, new_perm)); 453 const KPageProperties dst_properties = {new_perm, false, false,
454 DisableMergeAttribute::DisableHead};
455 R_TRY(
456 this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
451 457
452 // We successfully mapped the alias pages, so we don't need to unprotect the src pages on 458 // We successfully mapped the alias pages, so we don't need to unprotect the src pages on
453 // failure. 459 // failure.
@@ -1881,7 +1887,8 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
1881 R_SUCCEED(); 1887 R_SUCCEED();
1882} 1888}
1883 1889
1884Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) { 1890Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
1891 size_t size) {
1885 // Lock the table. 1892 // Lock the table.
1886 KScopedLightLock lk(m_general_lock); 1893 KScopedLightLock lk(m_general_lock);
1887 1894
@@ -1902,53 +1909,73 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)
1902 KMemoryAttribute::None)); 1909 KMemoryAttribute::None));
1903 1910
1904 // Create an update allocator for the source. 1911 // Create an update allocator for the source.
1905 Result src_allocator_result{ResultSuccess}; 1912 Result src_allocator_result;
1906 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), 1913 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
1907 m_memory_block_slab_manager, 1914 m_memory_block_slab_manager,
1908 num_src_allocator_blocks); 1915 num_src_allocator_blocks);
1909 R_TRY(src_allocator_result); 1916 R_TRY(src_allocator_result);
1910 1917
1911 // Create an update allocator for the destination. 1918 // Create an update allocator for the destination.
1912 Result dst_allocator_result{ResultSuccess}; 1919 Result dst_allocator_result;
1913 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), 1920 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
1914 m_memory_block_slab_manager, 1921 m_memory_block_slab_manager,
1915 num_dst_allocator_blocks); 1922 num_dst_allocator_blocks);
1916 R_TRY(dst_allocator_result); 1923 R_TRY(dst_allocator_result);
1917 1924
1918 // Map the memory. 1925 // Map the memory.
1919 KPageGroup page_linked_list{m_kernel, m_block_info_manager};
1920 const size_t num_pages{size / PageSize};
1921 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
1922 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
1923 const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
1924
1925 AddRegionToPages(src_address, num_pages, page_linked_list);
1926 { 1926 {
1927 // Determine the number of pages being operated on.
1928 const size_t num_pages = size / PageSize;
1929
1930 // Create page groups for the memory being unmapped.
1931 KPageGroup pg{m_kernel, m_block_info_manager};
1932
1933 // Create the page group representing the source.
1934 R_TRY(this->MakePageGroup(pg, src_address, num_pages));
1935
1936 // We're going to perform an update, so create a helper.
1937 KScopedPageTableUpdater updater(this);
1938
1927 // Reprotect the source as kernel-read/not mapped. 1939 // Reprotect the source as kernel-read/not mapped.
1928 auto block_guard = detail::ScopeExit([&] { 1940 const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
1929 Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, 1941 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
1930 OperationType::ChangePermissions); 1942 const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
1931 }); 1943 const KPageProperties src_properties = {new_src_perm, false, false,
1932 R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions)); 1944 DisableMergeAttribute::DisableHeadBodyTail};
1933 R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite)); 1945 R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
1946 OperationType::ChangePermissions));
1934 1947
1935 block_guard.Cancel(); 1948 // Ensure that we unprotect the source pages on failure.
1936 } 1949 ON_RESULT_FAILURE {
1950 const KPageProperties unprotect_properties = {
1951 KMemoryPermission::UserReadWrite, false, false,
1952 DisableMergeAttribute::EnableHeadBodyTail};
1953 ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm,
1954 OperationType::ChangePermissions) == ResultSuccess);
1955 };
1937 1956
1938 // Apply the memory block updates. 1957 // Map the alias pages.
1939 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, 1958 const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
1940 new_src_perm, new_src_attr, 1959 DisableMergeAttribute::DisableHead};
1941 KMemoryBlockDisableMergeAttribute::Locked, 1960 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
1942 KMemoryBlockDisableMergeAttribute::None); 1961 false));
1943 m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, 1962
1944 KMemoryState::Stack, KMemoryPermission::UserReadWrite, 1963 // Apply the memory block updates.
1945 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, 1964 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
1946 KMemoryBlockDisableMergeAttribute::None); 1965 src_state, new_src_perm, new_src_attr,
1966 KMemoryBlockDisableMergeAttribute::Locked,
1967 KMemoryBlockDisableMergeAttribute::None);
1968 m_memory_block_manager.Update(
1969 std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
1970 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
1971 KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
1972 }
1947 1973
1948 R_SUCCEED(); 1974 R_SUCCEED();
1949} 1975}
1950 1976
1951Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) { 1977Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
1978 size_t size) {
1952 // Lock the table. 1979 // Lock the table.
1953 KScopedLightLock lk(m_general_lock); 1980 KScopedLightLock lk(m_general_lock);
1954 1981
@@ -1970,108 +1997,208 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
1970 KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); 1997 KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
1971 1998
1972 // Create an update allocator for the source. 1999 // Create an update allocator for the source.
1973 Result src_allocator_result{ResultSuccess}; 2000 Result src_allocator_result;
1974 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), 2001 KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
1975 m_memory_block_slab_manager, 2002 m_memory_block_slab_manager,
1976 num_src_allocator_blocks); 2003 num_src_allocator_blocks);
1977 R_TRY(src_allocator_result); 2004 R_TRY(src_allocator_result);
1978 2005
1979 // Create an update allocator for the destination. 2006 // Create an update allocator for the destination.
1980 Result dst_allocator_result{ResultSuccess}; 2007 Result dst_allocator_result;
1981 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), 2008 KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
1982 m_memory_block_slab_manager, 2009 m_memory_block_slab_manager,
1983 num_dst_allocator_blocks); 2010 num_dst_allocator_blocks);
1984 R_TRY(dst_allocator_result); 2011 R_TRY(dst_allocator_result);
1985 2012
1986 KPageGroup src_pages{m_kernel, m_block_info_manager}; 2013 // Unmap the memory.
1987 KPageGroup dst_pages{m_kernel, m_block_info_manager}; 2014 {
1988 const size_t num_pages{size / PageSize}; 2015 // Determine the number of pages being operated on.
2016 const size_t num_pages = size / PageSize;
1989 2017
1990 AddRegionToPages(src_address, num_pages, src_pages); 2018 // Create page groups for the memory being unmapped.
1991 AddRegionToPages(dst_address, num_pages, dst_pages); 2019 KPageGroup pg{m_kernel, m_block_info_manager};
1992 2020
1993 R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion); 2021 // Create the page group representing the destination.
2022 R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
1994 2023
1995 { 2024 // Ensure the page group is the valid for the source.
1996 auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); 2025 R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
1997 2026
1998 R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); 2027 // We're going to perform an update, so create a helper.
1999 R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, 2028 KScopedPageTableUpdater updater(this);
2000 OperationType::ChangePermissions));
2001 2029
2002 block_guard.Cancel(); 2030 // Unmap the aliased copy of the pages.
2003 } 2031 const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
2032 DisableMergeAttribute::None};
2033 R_TRY(
2034 this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap));
2035
2036 // Ensure that we re-map the aliased pages on failure.
2037 ON_RESULT_FAILURE {
2038 this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
2039 };
2004 2040
2005 // Apply the memory block updates. 2041 // Try to set the permissions for the source pages back to what they should be.
2006 m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, 2042 const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
2007 KMemoryPermission::UserReadWrite, KMemoryAttribute::None, 2043 DisableMergeAttribute::EnableAndMergeHeadBodyTail};
2008 KMemoryBlockDisableMergeAttribute::None, 2044 R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
2009 KMemoryBlockDisableMergeAttribute::Locked); 2045 OperationType::ChangePermissions));
2010 m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, 2046
2011 KMemoryState::None, KMemoryPermission::None, 2047 // Apply the memory block updates.
2012 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, 2048 m_memory_block_manager.Update(
2013 KMemoryBlockDisableMergeAttribute::Normal); 2049 std::addressof(src_allocator), src_address, num_pages, src_state,
2050 KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
2051 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
2052 m_memory_block_manager.Update(
2053 std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
2054 KMemoryPermission::None, KMemoryAttribute::None,
2055 KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
2056 }
2014 2057
2015 R_SUCCEED(); 2058 R_SUCCEED();
2016} 2059}
2017 2060
2018Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, 2061Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
2019 KMemoryPermission perm) { 2062 size_t num_pages, KMemoryPermission perm) {
2020 ASSERT(this->IsLockedByCurrentThread()); 2063 ASSERT(this->IsLockedByCurrentThread());
2021 2064
2022 VAddr cur_addr{addr}; 2065 // Create a page group to hold the pages we allocate.
2066 KPageGroup pg{m_kernel, m_block_info_manager};
2023 2067
2024 for (const auto& node : page_linked_list) { 2068 // Allocate the pages.
2025 if (const auto result{ 2069 R_TRY(
2026 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; 2070 m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
2027 result.IsError()) {
2028 const size_t num_pages{(addr - cur_addr) / PageSize};
2029 2071
2030 ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) 2072 // Ensure that the page group is closed when we're done working with it.
2031 .IsSuccess()); 2073 SCOPE_EXIT({ pg.Close(); });
2032 2074
2033 R_RETURN(result); 2075 // Clear all pages.
2076 for (const auto& it : pg) {
2077 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
2078 it.GetSize());
2079 }
2080
2081 // Map the pages.
2082 R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup));
2083}
2084
2085Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
2086 const KPageGroup& pg, const KPageProperties properties,
2087 bool reuse_ll) {
2088 ASSERT(this->IsLockedByCurrentThread());
2089
2090 // Note the current address, so that we can iterate.
2091 const KProcessAddress start_address = address;
2092 KProcessAddress cur_address = address;
2093
2094 // Ensure that we clean up on failure.
2095 ON_RESULT_FAILURE {
2096 ASSERT(!reuse_ll);
2097 if (cur_address != start_address) {
2098 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
2099 DisableMergeAttribute::None};
2100 ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize,
2101 unmap_properties.perm, OperationType::Unmap) == ResultSuccess);
2034 } 2102 }
2103 };
2035 2104
2036 cur_addr += node.GetNumPages() * PageSize; 2105 // Iterate, mapping all pages in the group.
2106 for (const auto& block : pg) {
2107 // Map and advance.
2108 const KPageProperties cur_properties =
2109 (cur_address == start_address)
2110 ? properties
2111 : KPageProperties{properties.perm, properties.io, properties.uncached,
2112 DisableMergeAttribute::None};
2113 this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map,
2114 block.GetAddress());
2115 cur_address += block.GetSize();
2037 } 2116 }
2038 2117
2118 // We succeeded!
2039 R_SUCCEED(); 2119 R_SUCCEED();
2040} 2120}
2041 2121
2042Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, 2122void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
2043 KMemoryPermission perm) { 2123 const KPageGroup& pg) {
2044 // Check that the map is in range. 2124 ASSERT(this->IsLockedByCurrentThread());
2045 const size_t num_pages{page_linked_list.GetNumPages()};
2046 const size_t size{num_pages * PageSize};
2047 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
2048 2125
2049 // Lock the table. 2126 // Note the current address, so that we can iterate.
2050 KScopedLightLock lk(m_general_lock); 2127 const KProcessAddress start_address = address;
2128 const KProcessAddress last_address = start_address + size - 1;
2129 const KProcessAddress end_address = last_address + 1;
2051 2130
2052 // Check the memory state. 2131 // Iterate over the memory.
2053 R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, 2132 auto pg_it = pg.begin();
2054 KMemoryPermission::None, KMemoryPermission::None, 2133 ASSERT(pg_it != pg.end());
2055 KMemoryAttribute::None, KMemoryAttribute::None));
2056 2134
2057 // Create an update allocator. 2135 KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
2058 Result allocator_result{ResultSuccess}; 2136 size_t pg_pages = pg_it->GetNumPages();
2059 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2060 m_memory_block_slab_manager);
2061 2137
2062 // Map the pages. 2138 auto it = m_memory_block_manager.FindIterator(start_address);
2063 R_TRY(MapPages(address, page_linked_list, perm)); 2139 while (true) {
2140 // Check that the iterator is valid.
2141 ASSERT(it != m_memory_block_manager.end());
2064 2142
2065 // Update the blocks. 2143 // Get the memory info.
2066 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, 2144 const KMemoryInfo info = it->GetMemoryInfo();
2067 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2068 KMemoryBlockDisableMergeAttribute::None);
2069 2145
2070 R_SUCCEED(); 2146 // Determine the range to map.
2147 KProcessAddress map_address = std::max(info.GetAddress(), start_address);
2148 const KProcessAddress map_end_address = std::min(info.GetEndAddress(), end_address);
2149 ASSERT(map_end_address != map_address);
2150
2151 // Determine if we should disable head merge.
2152 const bool disable_head_merge =
2153 info.GetAddress() >= start_address &&
2154 True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
2155 const KPageProperties map_properties = {
2156 info.GetPermission(), false, false,
2157 disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
2158
2159 // While we have pages to map, map them.
2160 size_t map_pages = (map_end_address - map_address) / PageSize;
2161 while (map_pages > 0) {
2162 // Check if we're at the end of the physical block.
2163 if (pg_pages == 0) {
2164 // Ensure there are more pages to map.
2165 ASSERT(pg_it != pg.end());
2166
2167 // Advance our physical block.
2168 ++pg_it;
2169 pg_phys_addr = pg_it->GetAddress();
2170 pg_pages = pg_it->GetNumPages();
2171 }
2172
2173 // Map whatever we can.
2174 const size_t cur_pages = std::min(pg_pages, map_pages);
2175 ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map,
2176 pg_phys_addr) == ResultSuccess);
2177
2178 // Advance.
2179 map_address += cur_pages * PageSize;
2180 map_pages -= cur_pages;
2181
2182 pg_phys_addr += cur_pages * PageSize;
2183 pg_pages -= cur_pages;
2184 }
2185
2186 // Check if we're done.
2187 if (last_address <= info.GetLastAddress()) {
2188 break;
2189 }
2190
2191 // Advance.
2192 ++it;
2193 }
2194
2195 // Check that we re-mapped precisely the page group.
2196 ASSERT((++pg_it) == pg.end());
2071} 2197}
2072 2198
2073Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, 2199Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
2074 bool is_pa_valid, VAddr region_start, size_t region_num_pages, 2200 KPhysicalAddress phys_addr, bool is_pa_valid,
2201 KProcessAddress region_start, size_t region_num_pages,
2075 KMemoryState state, KMemoryPermission perm) { 2202 KMemoryState state, KMemoryPermission perm) {
2076 ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); 2203 ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
2077 2204
@@ -2084,26 +2211,30 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment,
2084 KScopedLightLock lk(m_general_lock); 2211 KScopedLightLock lk(m_general_lock);
2085 2212
2086 // Find a random address to map at. 2213 // Find a random address to map at.
2087 VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, 2214 KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
2088 this->GetNumGuardPages()); 2215 0, this->GetNumGuardPages());
2089 R_UNLESS(addr != 0, ResultOutOfMemory); 2216 R_UNLESS(addr != 0, ResultOutOfMemory);
2090 ASSERT(Common::IsAligned(addr, alignment)); 2217 ASSERT(Common::IsAligned(addr, alignment));
2091 ASSERT(this->CanContain(addr, num_pages * PageSize, state)); 2218 ASSERT(this->CanContain(addr, num_pages * PageSize, state));
2092 ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, 2219 ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
2093 KMemoryPermission::None, KMemoryPermission::None, 2220 KMemoryPermission::None, KMemoryPermission::None,
2094 KMemoryAttribute::None, KMemoryAttribute::None) 2221 KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
2095 .IsSuccess());
2096 2222
2097 // Create an update allocator. 2223 // Create an update allocator.
2098 Result allocator_result{ResultSuccess}; 2224 Result allocator_result;
2099 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), 2225 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2100 m_memory_block_slab_manager); 2226 m_memory_block_slab_manager);
2227 R_TRY(allocator_result);
2228
2229 // We're going to perform an update, so create a helper.
2230 KScopedPageTableUpdater updater(this);
2101 2231
2102 // Perform mapping operation. 2232 // Perform mapping operation.
2103 if (is_pa_valid) { 2233 if (is_pa_valid) {
2104 R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); 2234 const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
2235 R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr));
2105 } else { 2236 } else {
2106 UNIMPLEMENTED(); 2237 R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
2107 } 2238 }
2108 2239
2109 // Update the blocks. 2240 // Update the blocks.
@@ -2116,28 +2247,45 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment,
2116 R_SUCCEED(); 2247 R_SUCCEED();
2117} 2248}
2118 2249
2119Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { 2250Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
2120 ASSERT(this->IsLockedByCurrentThread()); 2251 KMemoryPermission perm) {
2252 // Check that the map is in range.
2253 const size_t size = num_pages * PageSize;
2254 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
2121 2255
2122 VAddr cur_addr{addr}; 2256 // Lock the table.
2257 KScopedLightLock lk(m_general_lock);
2123 2258
2124 for (const auto& node : page_linked_list) { 2259 // Check the memory state.
2125 if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, 2260 size_t num_allocator_blocks;
2126 OperationType::Unmap)}; 2261 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
2127 result.IsError()) { 2262 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
2128 R_RETURN(result); 2263 KMemoryPermission::None, KMemoryAttribute::None,
2129 } 2264 KMemoryAttribute::None));
2130 2265
2131 cur_addr += node.GetNumPages() * PageSize; 2266 // Create an update allocator.
2132 } 2267 Result allocator_result;
2268 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2269 m_memory_block_slab_manager, num_allocator_blocks);
2270 R_TRY(allocator_result);
2271
2272 // We're going to perform an update, so create a helper.
2273 KScopedPageTableUpdater updater(this);
2274
2275 // Map the pages.
2276 R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
2277
2278 // Update the blocks.
2279 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
2280 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2281 KMemoryBlockDisableMergeAttribute::None);
2133 2282
2134 R_SUCCEED(); 2283 R_SUCCEED();
2135} 2284}
2136 2285
2137Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { 2286Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
2138 // Check that the unmap is in range. 2287 // Check that the unmap is in range.
2139 const size_t num_pages{page_linked_list.GetNumPages()}; 2288 const size_t size = num_pages * PageSize;
2140 const size_t size{num_pages * PageSize};
2141 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); 2289 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
2142 2290
2143 // Lock the table. 2291 // Lock the table.
@@ -2151,13 +2299,18 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo
2151 KMemoryAttribute::None)); 2299 KMemoryAttribute::None));
2152 2300
2153 // Create an update allocator. 2301 // Create an update allocator.
2154 Result allocator_result{ResultSuccess}; 2302 Result allocator_result;
2155 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), 2303 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2156 m_memory_block_slab_manager, num_allocator_blocks); 2304 m_memory_block_slab_manager, num_allocator_blocks);
2157 R_TRY(allocator_result); 2305 R_TRY(allocator_result);
2158 2306
2307 // We're going to perform an update, so create a helper.
2308 KScopedPageTableUpdater updater(this);
2309
2159 // Perform the unmap. 2310 // Perform the unmap.
2160 R_TRY(UnmapPages(address, page_linked_list)); 2311 const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
2312 DisableMergeAttribute::None};
2313 R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap));
2161 2314
2162 // Update the blocks. 2315 // Update the blocks.
2163 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, 2316 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
@@ -2168,29 +2321,130 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo
2168 R_SUCCEED(); 2321 R_SUCCEED();
2169} 2322}
2170 2323
2171Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) { 2324Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
2172 // Check that the unmap is in range. 2325 KProcessAddress region_start, size_t region_num_pages,
2326 KMemoryState state, KMemoryPermission perm) {
2327 ASSERT(!this->IsLockedByCurrentThread());
2328
2329 // Ensure this is a valid map request.
2330 const size_t num_pages = pg.GetNumPages();
2331 R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
2332 ResultInvalidCurrentMemory);
2333 R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
2334
2335 // Lock the table.
2336 KScopedLightLock lk(m_general_lock);
2337
2338 // Find a random address to map at.
2339 KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
2340 0, this->GetNumGuardPages());
2341 R_UNLESS(addr != 0, ResultOutOfMemory);
2342 ASSERT(this->CanContain(addr, num_pages * PageSize, state));
2343 ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
2344 KMemoryPermission::None, KMemoryPermission::None,
2345 KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
2346
2347 // Create an update allocator.
2348 Result allocator_result;
2349 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2350 m_memory_block_slab_manager);
2351 R_TRY(allocator_result);
2352
2353 // We're going to perform an update, so create a helper.
2354 KScopedPageTableUpdater updater(this);
2355
2356 // Perform mapping operation.
2357 const KPageProperties properties = {perm, state == KMemoryState::Io, false,
2358 DisableMergeAttribute::DisableHead};
2359 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
2360
2361 // Update the blocks.
2362 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
2363 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2364 KMemoryBlockDisableMergeAttribute::None);
2365
2366 // We successfully mapped the pages.
2367 *out_addr = addr;
2368 R_SUCCEED();
2369}
2370
2371Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
2372 KMemoryPermission perm) {
2373 ASSERT(!this->IsLockedByCurrentThread());
2374
2375 // Ensure this is a valid map request.
2376 const size_t num_pages = pg.GetNumPages();
2173 const size_t size = num_pages * PageSize; 2377 const size_t size = num_pages * PageSize;
2174 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); 2378 R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
2175 2379
2176 // Lock the table. 2380 // Lock the table.
2177 KScopedLightLock lk(m_general_lock); 2381 KScopedLightLock lk(m_general_lock);
2178 2382
2179 // Check the memory state. 2383 // Check if state allows us to map.
2180 size_t num_allocator_blocks{}; 2384 size_t num_allocator_blocks;
2385 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
2386 KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
2387 KMemoryPermission::None, KMemoryAttribute::None,
2388 KMemoryAttribute::None));
2389
2390 // Create an update allocator.
2391 Result allocator_result;
2392 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2393 m_memory_block_slab_manager, num_allocator_blocks);
2394 R_TRY(allocator_result);
2395
2396 // We're going to perform an update, so create a helper.
2397 KScopedPageTableUpdater updater(this);
2398
2399 // Perform mapping operation.
2400 const KPageProperties properties = {perm, state == KMemoryState::Io, false,
2401 DisableMergeAttribute::DisableHead};
2402 R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
2403
2404 // Update the blocks.
2405 m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
2406 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2407 KMemoryBlockDisableMergeAttribute::None);
2408
2409 // We successfully mapped the pages.
2410 R_SUCCEED();
2411}
2412
2413Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
2414 KMemoryState state) {
2415 ASSERT(!this->IsLockedByCurrentThread());
2416
2417 // Ensure this is a valid unmap request.
2418 const size_t num_pages = pg.GetNumPages();
2419 const size_t size = num_pages * PageSize;
2420 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
2421
2422 // Lock the table.
2423 KScopedLightLock lk(m_general_lock);
2424
2425 // Check if state allows us to unmap.
2426 size_t num_allocator_blocks;
2181 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, 2427 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
2182 KMemoryState::All, state, KMemoryPermission::None, 2428 KMemoryState::All, state, KMemoryPermission::None,
2183 KMemoryPermission::None, KMemoryAttribute::All, 2429 KMemoryPermission::None, KMemoryAttribute::All,
2184 KMemoryAttribute::None)); 2430 KMemoryAttribute::None));
2185 2431
2432 // Check that the page group is valid.
2433 R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
2434
2186 // Create an update allocator. 2435 // Create an update allocator.
2187 Result allocator_result{ResultSuccess}; 2436 Result allocator_result;
2188 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), 2437 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2189 m_memory_block_slab_manager, num_allocator_blocks); 2438 m_memory_block_slab_manager, num_allocator_blocks);
2190 R_TRY(allocator_result); 2439 R_TRY(allocator_result);
2191 2440
2192 // Perform the unmap. 2441 // We're going to perform an update, so create a helper.
2193 R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); 2442 KScopedPageTableUpdater updater(this);
2443
2444 // Perform unmapping operation.
2445 const KPageProperties properties = {KMemoryPermission::None, false, false,
2446 DisableMergeAttribute::None};
2447 R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap));
2194 2448
2195 // Update the blocks. 2449 // Update the blocks.
2196 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, 2450 m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
@@ -2550,54 +2804,6 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
2550 } 2804 }
2551} 2805}
2552 2806
2553ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align,
2554 bool is_map_only, VAddr region_start,
2555 size_t region_num_pages, KMemoryState state,
2556 KMemoryPermission perm, PAddr map_addr) {
2557 KScopedLightLock lk(m_general_lock);
2558
2559 R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state),
2560 ResultInvalidCurrentMemory);
2561 R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory);
2562 const VAddr addr{
2563 AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
2564 R_UNLESS(addr, ResultOutOfMemory);
2565
2566 // Create an update allocator.
2567 Result allocator_result{ResultSuccess};
2568 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
2569 m_memory_block_slab_manager);
2570
2571 if (is_map_only) {
2572 R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
2573 } else {
2574 // Create a page group tohold the pages we allocate.
2575 KPageGroup pg{m_kernel, m_block_info_manager};
2576
2577 R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
2578 &pg, needed_num_pages,
2579 KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
2580
2581 // Ensure that the page group is closed when we're done working with it.
2582 SCOPE_EXIT({ pg.Close(); });
2583
2584 // Clear all pages.
2585 for (const auto& it : pg) {
2586 std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()),
2587 m_heap_fill_value, it.GetSize());
2588 }
2589
2590 R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup));
2591 }
2592
2593 // Update the blocks.
2594 m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm,
2595 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
2596 KMemoryBlockDisableMergeAttribute::None);
2597
2598 return addr;
2599}
2600
2601Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, 2807Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
2602 KMemoryPermission perm, bool is_aligned, 2808 KMemoryPermission perm, bool is_aligned,
2603 bool check_heap) { 2809 bool check_heap) {
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 0a454b05b..367dab613 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -24,12 +24,36 @@ class System;
24 24
25namespace Kernel { 25namespace Kernel {
26 26
27enum class DisableMergeAttribute : u8 {
28 None = (0U << 0),
29 DisableHead = (1U << 0),
30 DisableHeadAndBody = (1U << 1),
31 EnableHeadAndBody = (1U << 2),
32 DisableTail = (1U << 3),
33 EnableTail = (1U << 4),
34 EnableAndMergeHeadBodyTail = (1U << 5),
35 EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
36 DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
37};
38
39struct KPageProperties {
40 KMemoryPermission perm;
41 bool io;
42 bool uncached;
43 DisableMergeAttribute disable_merge_attributes;
44};
45static_assert(std::is_trivial_v<KPageProperties>);
46static_assert(sizeof(KPageProperties) == sizeof(u32));
47
27class KBlockInfoManager; 48class KBlockInfoManager;
28class KMemoryBlockManager; 49class KMemoryBlockManager;
29class KResourceLimit; 50class KResourceLimit;
30class KSystemResource; 51class KSystemResource;
31 52
32class KPageTable final { 53class KPageTable final {
54protected:
55 struct PageLinkedList;
56
33public: 57public:
34 enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll }; 58 enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
35 59
@@ -57,27 +81,12 @@ public:
57 Result UnmapPhysicalMemory(VAddr addr, size_t size); 81 Result UnmapPhysicalMemory(VAddr addr, size_t size);
58 Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); 82 Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
59 Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); 83 Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
60 Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
61 KMemoryPermission perm);
62 Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
63 KMemoryState state, KMemoryPermission perm) {
64 R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
65 this->GetRegionAddress(state),
66 this->GetRegionSize(state) / PageSize, state, perm));
67 }
68 Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
69 Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
70 Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); 84 Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
71 KMemoryInfo QueryInfo(VAddr addr); 85 KMemoryInfo QueryInfo(VAddr addr);
72 Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); 86 Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
73 Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); 87 Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
74 Result SetMaxHeapSize(size_t size); 88 Result SetMaxHeapSize(size_t size);
75 Result SetHeapSize(VAddr* out, size_t size); 89 Result SetHeapSize(VAddr* out, size_t size);
76 ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
77 VAddr region_start, size_t region_num_pages,
78 KMemoryState state, KMemoryPermission perm,
79 PAddr map_addr = 0);
80
81 Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, 90 Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
82 KMemoryPermission perm, bool is_aligned, bool check_heap); 91 KMemoryPermission perm, bool is_aligned, bool check_heap);
83 Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap); 92 Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap);
@@ -113,6 +122,40 @@ public:
113 122
114 bool CanContain(VAddr addr, size_t size, KMemoryState state) const; 123 bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
115 124
125 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
126 KPhysicalAddress phys_addr, KProcessAddress region_start,
127 size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
128 R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
129 region_num_pages, state, perm));
130 }
131
132 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
133 KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
134 R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
135 this->GetRegionAddress(state),
136 this->GetRegionSize(state) / PageSize, state, perm));
137 }
138
139 Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
140 KMemoryPermission perm) {
141 R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
142 this->GetRegionAddress(state),
143 this->GetRegionSize(state) / PageSize, state, perm));
144 }
145
146 Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
147 KMemoryPermission perm);
148 Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
149
150 Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
151 KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
152 KMemoryPermission perm);
153 Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
154 KMemoryPermission perm);
155 Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
156 void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
157 const KPageGroup& pg);
158
116protected: 159protected:
117 struct PageLinkedList { 160 struct PageLinkedList {
118 private: 161 private:
@@ -166,11 +209,9 @@ private:
166 static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = 209 static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
167 KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; 210 KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
168 211
169 Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); 212 Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
170 Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, 213 KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
171 bool is_pa_valid, VAddr region_start, size_t region_num_pages, 214 size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
172 KMemoryState state, KMemoryPermission perm);
173 Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
174 bool IsRegionContiguous(VAddr addr, u64 size) const; 215 bool IsRegionContiguous(VAddr addr, u64 size) const;
175 void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); 216 void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
176 KMemoryInfo QueryInfoImpl(VAddr addr); 217 KMemoryInfo QueryInfoImpl(VAddr addr);
@@ -265,6 +306,11 @@ private:
265 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, 306 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
266 size_t size, KMemoryPermission prot_perm); 307 size_t size, KMemoryPermission prot_perm);
267 308
309 Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
310 size_t num_pages, KMemoryPermission perm);
311 Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
312 const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
313
268 mutable KLightLock m_general_lock; 314 mutable KLightLock m_general_lock;
269 mutable KLightLock m_map_physical_memory_lock; 315 mutable KLightLock m_map_physical_memory_lock;
270 316
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index a1abf5d68..e201bb0cd 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -417,9 +417,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
417} 417}
418 418
419void KProcess::Run(s32 main_thread_priority, u64 stack_size) { 419void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
420 AllocateMainThreadStack(stack_size); 420 ASSERT(AllocateMainThreadStack(stack_size) == ResultSuccess);
421 resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); 421 resource_limit->Reserve(LimitableResource::ThreadCountMax, 1);
422 resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, main_thread_stack_size);
423 422
424 const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; 423 const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
425 ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); 424 ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
@@ -675,20 +674,31 @@ void KProcess::ChangeState(State new_state) {
675} 674}
676 675
677Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { 676Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
678 ASSERT(stack_size); 677 // Ensure that we haven't already allocated stack.
679 678 ASSERT(main_thread_stack_size == 0);
680 // The kernel always ensures that the given stack size is page aligned. 679
681 main_thread_stack_size = Common::AlignUp(stack_size, PageSize); 680 // Ensure that we're allocating a valid stack.
682 681 stack_size = Common::AlignUp(stack_size, PageSize);
683 const VAddr start{page_table.GetStackRegionStart()}; 682 // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory);
684 const std::size_t size{page_table.GetStackRegionEnd() - start}; 683 R_UNLESS(stack_size + image_size >= image_size, ResultOutOfMemory);
685 684
686 CASCADE_RESULT(main_thread_stack_top, 685 // Place a tentative reservation of memory for our new stack.
687 page_table.AllocateAndMapMemory( 686 KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax,
688 main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, 687 stack_size);
689 KMemoryState::Stack, KMemoryPermission::UserReadWrite)); 688 R_UNLESS(mem_reservation.Succeeded(), ResultLimitReached);
689
690 // Allocate and map our stack.
691 if (stack_size) {
692 KProcessAddress stack_bottom;
693 R_TRY(page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize,
694 KMemoryState::Stack, KMemoryPermission::UserReadWrite));
695
696 main_thread_stack_top = stack_bottom + stack_size;
697 main_thread_stack_size = stack_size;
698 }
690 699
691 main_thread_stack_top += main_thread_stack_size; 700 // We succeeded! Commit our memory reservation.
701 mem_reservation.Commit();
692 702
693 R_SUCCEED(); 703 R_SUCCEED();
694} 704}
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 3cf2b5d91..df505edfe 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -94,15 +94,15 @@ Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t m
94 R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission); 94 R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission);
95 } 95 }
96 96
97 return target_process.PageTable().MapPages(address, *page_group, KMemoryState::Shared, 97 return target_process.PageTable().MapPageGroup(address, *page_group, KMemoryState::Shared,
98 ConvertToKMemoryPermission(map_perm)); 98 ConvertToKMemoryPermission(map_perm));
99} 99}
100 100
101Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) { 101Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
102 // Validate the size. 102 // Validate the size.
103 R_UNLESS(size == unmap_size, ResultInvalidSize); 103 R_UNLESS(size == unmap_size, ResultInvalidSize);
104 104
105 return target_process.PageTable().UnmapPages(address, *page_group, KMemoryState::Shared); 105 return target_process.PageTable().UnmapPageGroup(address, *page_group, KMemoryState::Shared);
106} 106}
107 107
108} // namespace Kernel 108} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index aca442196..67fa5d71c 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1492,8 +1492,8 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p
1492 KMemoryAttribute::All, KMemoryAttribute::None)); 1492 KMemoryAttribute::All, KMemoryAttribute::None));
1493 1493
1494 // Map the group. 1494 // Map the group.
1495 R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode, 1495 R_TRY(dst_pt.MapPageGroup(dst_address, pg, KMemoryState::SharedCode,
1496 KMemoryPermission::UserReadWrite)); 1496 KMemoryPermission::UserReadWrite));
1497 1497
1498 return ResultSuccess; 1498 return ResultSuccess;
1499} 1499}