diff options
Diffstat (limited to 'src')
24 files changed, 521 insertions, 248 deletions
diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp index 6e9812e6e..0e06468da 100644 --- a/src/core/hid/emulated_controller.cpp +++ b/src/core/hid/emulated_controller.cpp | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | 11 | ||
| 12 | namespace Core::HID { | 12 | namespace Core::HID { |
| 13 | constexpr s32 HID_JOYSTICK_MAX = 0x7fff; | 13 | constexpr s32 HID_JOYSTICK_MAX = 0x7fff; |
| 14 | constexpr s32 HID_JOYSTICK_MIN = 0x7ffe; | ||
| 15 | constexpr s32 HID_TRIGGER_MAX = 0x7fff; | 14 | constexpr s32 HID_TRIGGER_MAX = 0x7fff; |
| 16 | // Use a common UUID for TAS and Virtual Gamepad | 15 | // Use a common UUID for TAS and Virtual Gamepad |
| 17 | constexpr Common::UUID TAS_UUID = | 16 | constexpr Common::UUID TAS_UUID = |
| @@ -864,16 +863,9 @@ void EmulatedController::SetStick(const Common::Input::CallbackStatus& callback, | |||
| 864 | return; | 863 | return; |
| 865 | } | 864 | } |
| 866 | 865 | ||
| 867 | const auto FloatToShort = [](float a) { | ||
| 868 | if (a > 0) { | ||
| 869 | return static_cast<s32>(a * HID_JOYSTICK_MAX); | ||
| 870 | } | ||
| 871 | return static_cast<s32>(a * HID_JOYSTICK_MIN); | ||
| 872 | }; | ||
| 873 | |||
| 874 | const AnalogStickState stick{ | 866 | const AnalogStickState stick{ |
| 875 | .x = FloatToShort(controller.stick_values[index].x.value), | 867 | .x = static_cast<s32>(controller.stick_values[index].x.value * HID_JOYSTICK_MAX), |
| 876 | .y = FloatToShort(controller.stick_values[index].y.value), | 868 | .y = static_cast<s32>(controller.stick_values[index].y.value * HID_JOYSTICK_MAX), |
| 877 | }; | 869 | }; |
| 878 | 870 | ||
| 879 | switch (index) { | 871 | switch (index) { |
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index d9da1e600..884eba001 100644 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp | |||
| @@ -74,7 +74,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) { | |||
| 74 | R_UNLESS(!m_is_mapped, ResultInvalidState); | 74 | R_UNLESS(!m_is_mapped, ResultInvalidState); |
| 75 | 75 | ||
| 76 | // Map the memory. | 76 | // Map the memory. |
| 77 | R_TRY(kernel.CurrentProcess()->PageTable().MapPages( | 77 | R_TRY(kernel.CurrentProcess()->PageTable().MapPageGroup( |
| 78 | address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); | 78 | address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); |
| 79 | 79 | ||
| 80 | // Mark ourselves as mapped. | 80 | // Mark ourselves as mapped. |
| @@ -91,8 +91,8 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) { | |||
| 91 | KScopedLightLock lk(m_lock); | 91 | KScopedLightLock lk(m_lock); |
| 92 | 92 | ||
| 93 | // Unmap the memory. | 93 | // Unmap the memory. |
| 94 | R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group, | 94 | R_TRY(kernel.CurrentProcess()->PageTable().UnmapPageGroup(address, *m_page_group, |
| 95 | KMemoryState::CodeOut)); | 95 | KMemoryState::CodeOut)); |
| 96 | 96 | ||
| 97 | // Mark ourselves as unmapped. | 97 | // Mark ourselves as unmapped. |
| 98 | m_is_mapped = false; | 98 | m_is_mapped = false; |
| @@ -125,8 +125,8 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission | |||
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | // Map the memory. | 127 | // Map the memory. |
| 128 | R_TRY( | 128 | R_TRY(m_owner->PageTable().MapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode, |
| 129 | m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm)); | 129 | k_perm)); |
| 130 | 130 | ||
| 131 | // Mark ourselves as mapped. | 131 | // Mark ourselves as mapped. |
| 132 | m_is_owner_mapped = true; | 132 | m_is_owner_mapped = true; |
| @@ -142,7 +142,7 @@ Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { | |||
| 142 | KScopedLightLock lk(m_lock); | 142 | KScopedLightLock lk(m_lock); |
| 143 | 143 | ||
| 144 | // Unmap the memory. | 144 | // Unmap the memory. |
| 145 | R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode)); | 145 | R_TRY(m_owner->PageTable().UnmapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode)); |
| 146 | 146 | ||
| 147 | // Mark ourselves as unmapped. | 147 | // Mark ourselves as unmapped. |
| 148 | m_is_owner_mapped = false; | 148 | m_is_owner_mapped = false; |
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 9c7ac22dc..fbd28f5f3 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -435,6 +435,9 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si | |||
| 435 | KPageGroup pg{m_kernel, m_block_info_manager}; | 435 | KPageGroup pg{m_kernel, m_block_info_manager}; |
| 436 | AddRegionToPages(src_address, num_pages, pg); | 436 | AddRegionToPages(src_address, num_pages, pg); |
| 437 | 437 | ||
| 438 | // We're going to perform an update, so create a helper. | ||
| 439 | KScopedPageTableUpdater updater(this); | ||
| 440 | |||
| 438 | // Reprotect the source as kernel-read/not mapped. | 441 | // Reprotect the source as kernel-read/not mapped. |
| 439 | const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead | | 442 | const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead | |
| 440 | KMemoryPermission::NotMapped); | 443 | KMemoryPermission::NotMapped); |
| @@ -447,7 +450,10 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si | |||
| 447 | }); | 450 | }); |
| 448 | 451 | ||
| 449 | // Map the alias pages. | 452 | // Map the alias pages. |
| 450 | R_TRY(MapPages(dst_address, pg, new_perm)); | 453 | const KPageProperties dst_properties = {new_perm, false, false, |
| 454 | DisableMergeAttribute::DisableHead}; | ||
| 455 | R_TRY( | ||
| 456 | this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false)); | ||
| 451 | 457 | ||
| 452 | // We successfully mapped the alias pages, so we don't need to unprotect the src pages on | 458 | // We successfully mapped the alias pages, so we don't need to unprotect the src pages on |
| 453 | // failure. | 459 | // failure. |
| @@ -1881,7 +1887,8 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1881 | R_SUCCEED(); | 1887 | R_SUCCEED(); |
| 1882 | } | 1888 | } |
| 1883 | 1889 | ||
| 1884 | Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) { | 1890 | Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, |
| 1891 | size_t size) { | ||
| 1885 | // Lock the table. | 1892 | // Lock the table. |
| 1886 | KScopedLightLock lk(m_general_lock); | 1893 | KScopedLightLock lk(m_general_lock); |
| 1887 | 1894 | ||
| @@ -1902,53 +1909,73 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) | |||
| 1902 | KMemoryAttribute::None)); | 1909 | KMemoryAttribute::None)); |
| 1903 | 1910 | ||
| 1904 | // Create an update allocator for the source. | 1911 | // Create an update allocator for the source. |
| 1905 | Result src_allocator_result{ResultSuccess}; | 1912 | Result src_allocator_result; |
| 1906 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | 1913 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), |
| 1907 | m_memory_block_slab_manager, | 1914 | m_memory_block_slab_manager, |
| 1908 | num_src_allocator_blocks); | 1915 | num_src_allocator_blocks); |
| 1909 | R_TRY(src_allocator_result); | 1916 | R_TRY(src_allocator_result); |
| 1910 | 1917 | ||
| 1911 | // Create an update allocator for the destination. | 1918 | // Create an update allocator for the destination. |
| 1912 | Result dst_allocator_result{ResultSuccess}; | 1919 | Result dst_allocator_result; |
| 1913 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | 1920 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), |
| 1914 | m_memory_block_slab_manager, | 1921 | m_memory_block_slab_manager, |
| 1915 | num_dst_allocator_blocks); | 1922 | num_dst_allocator_blocks); |
| 1916 | R_TRY(dst_allocator_result); | 1923 | R_TRY(dst_allocator_result); |
| 1917 | 1924 | ||
| 1918 | // Map the memory. | 1925 | // Map the memory. |
| 1919 | KPageGroup page_linked_list{m_kernel, m_block_info_manager}; | ||
| 1920 | const size_t num_pages{size / PageSize}; | ||
| 1921 | const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( | ||
| 1922 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | ||
| 1923 | const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; | ||
| 1924 | |||
| 1925 | AddRegionToPages(src_address, num_pages, page_linked_list); | ||
| 1926 | { | 1926 | { |
| 1927 | // Determine the number of pages being operated on. | ||
| 1928 | const size_t num_pages = size / PageSize; | ||
| 1929 | |||
| 1930 | // Create page groups for the memory being unmapped. | ||
| 1931 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 1932 | |||
| 1933 | // Create the page group representing the source. | ||
| 1934 | R_TRY(this->MakePageGroup(pg, src_address, num_pages)); | ||
| 1935 | |||
| 1936 | // We're going to perform an update, so create a helper. | ||
| 1937 | KScopedPageTableUpdater updater(this); | ||
| 1938 | |||
| 1927 | // Reprotect the source as kernel-read/not mapped. | 1939 | // Reprotect the source as kernel-read/not mapped. |
| 1928 | auto block_guard = detail::ScopeExit([&] { | 1940 | const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( |
| 1929 | Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, | 1941 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); |
| 1930 | OperationType::ChangePermissions); | 1942 | const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; |
| 1931 | }); | 1943 | const KPageProperties src_properties = {new_src_perm, false, false, |
| 1932 | R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions)); | 1944 | DisableMergeAttribute::DisableHeadBodyTail}; |
| 1933 | R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite)); | 1945 | R_TRY(this->Operate(src_address, num_pages, src_properties.perm, |
| 1946 | OperationType::ChangePermissions)); | ||
| 1934 | 1947 | ||
| 1935 | block_guard.Cancel(); | 1948 | // Ensure that we unprotect the source pages on failure. |
| 1936 | } | 1949 | ON_RESULT_FAILURE { |
| 1950 | const KPageProperties unprotect_properties = { | ||
| 1951 | KMemoryPermission::UserReadWrite, false, false, | ||
| 1952 | DisableMergeAttribute::EnableHeadBodyTail}; | ||
| 1953 | ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm, | ||
| 1954 | OperationType::ChangePermissions) == ResultSuccess); | ||
| 1955 | }; | ||
| 1937 | 1956 | ||
| 1938 | // Apply the memory block updates. | 1957 | // Map the alias pages. |
| 1939 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, | 1958 | const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false, |
| 1940 | new_src_perm, new_src_attr, | 1959 | DisableMergeAttribute::DisableHead}; |
| 1941 | KMemoryBlockDisableMergeAttribute::Locked, | 1960 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties, |
| 1942 | KMemoryBlockDisableMergeAttribute::None); | 1961 | false)); |
| 1943 | m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | 1962 | |
| 1944 | KMemoryState::Stack, KMemoryPermission::UserReadWrite, | 1963 | // Apply the memory block updates. |
| 1945 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | 1964 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, |
| 1946 | KMemoryBlockDisableMergeAttribute::None); | 1965 | src_state, new_src_perm, new_src_attr, |
| 1966 | KMemoryBlockDisableMergeAttribute::Locked, | ||
| 1967 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1968 | m_memory_block_manager.Update( | ||
| 1969 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack, | ||
| 1970 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 1971 | KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); | ||
| 1972 | } | ||
| 1947 | 1973 | ||
| 1948 | R_SUCCEED(); | 1974 | R_SUCCEED(); |
| 1949 | } | 1975 | } |
| 1950 | 1976 | ||
| 1951 | Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) { | 1977 | Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, |
| 1978 | size_t size) { | ||
| 1952 | // Lock the table. | 1979 | // Lock the table. |
| 1953 | KScopedLightLock lk(m_general_lock); | 1980 | KScopedLightLock lk(m_general_lock); |
| 1954 | 1981 | ||
| @@ -1970,108 +1997,208 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size | |||
| 1970 | KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); | 1997 | KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); |
| 1971 | 1998 | ||
| 1972 | // Create an update allocator for the source. | 1999 | // Create an update allocator for the source. |
| 1973 | Result src_allocator_result{ResultSuccess}; | 2000 | Result src_allocator_result; |
| 1974 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | 2001 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), |
| 1975 | m_memory_block_slab_manager, | 2002 | m_memory_block_slab_manager, |
| 1976 | num_src_allocator_blocks); | 2003 | num_src_allocator_blocks); |
| 1977 | R_TRY(src_allocator_result); | 2004 | R_TRY(src_allocator_result); |
| 1978 | 2005 | ||
| 1979 | // Create an update allocator for the destination. | 2006 | // Create an update allocator for the destination. |
| 1980 | Result dst_allocator_result{ResultSuccess}; | 2007 | Result dst_allocator_result; |
| 1981 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | 2008 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), |
| 1982 | m_memory_block_slab_manager, | 2009 | m_memory_block_slab_manager, |
| 1983 | num_dst_allocator_blocks); | 2010 | num_dst_allocator_blocks); |
| 1984 | R_TRY(dst_allocator_result); | 2011 | R_TRY(dst_allocator_result); |
| 1985 | 2012 | ||
| 1986 | KPageGroup src_pages{m_kernel, m_block_info_manager}; | 2013 | // Unmap the memory. |
| 1987 | KPageGroup dst_pages{m_kernel, m_block_info_manager}; | 2014 | { |
| 1988 | const size_t num_pages{size / PageSize}; | 2015 | // Determine the number of pages being operated on. |
| 2016 | const size_t num_pages = size / PageSize; | ||
| 1989 | 2017 | ||
| 1990 | AddRegionToPages(src_address, num_pages, src_pages); | 2018 | // Create page groups for the memory being unmapped. |
| 1991 | AddRegionToPages(dst_address, num_pages, dst_pages); | 2019 | KPageGroup pg{m_kernel, m_block_info_manager}; |
| 1992 | 2020 | ||
| 1993 | R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion); | 2021 | // Create the page group representing the destination. |
| 2022 | R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); | ||
| 1994 | 2023 | ||
| 1995 | { | 2024 | // Ensure the page group is the valid for the source. |
| 1996 | auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); | 2025 | R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion); |
| 1997 | 2026 | ||
| 1998 | R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); | 2027 | // We're going to perform an update, so create a helper. |
| 1999 | R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, | 2028 | KScopedPageTableUpdater updater(this); |
| 2000 | OperationType::ChangePermissions)); | ||
| 2001 | 2029 | ||
| 2002 | block_guard.Cancel(); | 2030 | // Unmap the aliased copy of the pages. |
| 2003 | } | 2031 | const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false, |
| 2032 | DisableMergeAttribute::None}; | ||
| 2033 | R_TRY( | ||
| 2034 | this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap)); | ||
| 2035 | |||
| 2036 | // Ensure that we re-map the aliased pages on failure. | ||
| 2037 | ON_RESULT_FAILURE { | ||
| 2038 | this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg); | ||
| 2039 | }; | ||
| 2004 | 2040 | ||
| 2005 | // Apply the memory block updates. | 2041 | // Try to set the permissions for the source pages back to what they should be. |
| 2006 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, | 2042 | const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false, |
| 2007 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | 2043 | DisableMergeAttribute::EnableAndMergeHeadBodyTail}; |
| 2008 | KMemoryBlockDisableMergeAttribute::None, | 2044 | R_TRY(this->Operate(src_address, num_pages, src_properties.perm, |
| 2009 | KMemoryBlockDisableMergeAttribute::Locked); | 2045 | OperationType::ChangePermissions)); |
| 2010 | m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | 2046 | |
| 2011 | KMemoryState::None, KMemoryPermission::None, | 2047 | // Apply the memory block updates. |
| 2012 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | 2048 | m_memory_block_manager.Update( |
| 2013 | KMemoryBlockDisableMergeAttribute::Normal); | 2049 | std::addressof(src_allocator), src_address, num_pages, src_state, |
| 2050 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 2051 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); | ||
| 2052 | m_memory_block_manager.Update( | ||
| 2053 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, | ||
| 2054 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2055 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2056 | } | ||
| 2014 | 2057 | ||
| 2015 | R_SUCCEED(); | 2058 | R_SUCCEED(); |
| 2016 | } | 2059 | } |
| 2017 | 2060 | ||
| 2018 | Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, | 2061 | Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, |
| 2019 | KMemoryPermission perm) { | 2062 | size_t num_pages, KMemoryPermission perm) { |
| 2020 | ASSERT(this->IsLockedByCurrentThread()); | 2063 | ASSERT(this->IsLockedByCurrentThread()); |
| 2021 | 2064 | ||
| 2022 | VAddr cur_addr{addr}; | 2065 | // Create a page group to hold the pages we allocate. |
| 2066 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 2023 | 2067 | ||
| 2024 | for (const auto& node : page_linked_list) { | 2068 | // Allocate the pages. |
| 2025 | if (const auto result{ | 2069 | R_TRY( |
| 2026 | Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; | 2070 | m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option)); |
| 2027 | result.IsError()) { | ||
| 2028 | const size_t num_pages{(addr - cur_addr) / PageSize}; | ||
| 2029 | 2071 | ||
| 2030 | ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) | 2072 | // Ensure that the page group is closed when we're done working with it. |
| 2031 | .IsSuccess()); | 2073 | SCOPE_EXIT({ pg.Close(); }); |
| 2032 | 2074 | ||
| 2033 | R_RETURN(result); | 2075 | // Clear all pages. |
| 2076 | for (const auto& it : pg) { | ||
| 2077 | std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, | ||
| 2078 | it.GetSize()); | ||
| 2079 | } | ||
| 2080 | |||
| 2081 | // Map the pages. | ||
| 2082 | R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup)); | ||
| 2083 | } | ||
| 2084 | |||
| 2085 | Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 2086 | const KPageGroup& pg, const KPageProperties properties, | ||
| 2087 | bool reuse_ll) { | ||
| 2088 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 2089 | |||
| 2090 | // Note the current address, so that we can iterate. | ||
| 2091 | const KProcessAddress start_address = address; | ||
| 2092 | KProcessAddress cur_address = address; | ||
| 2093 | |||
| 2094 | // Ensure that we clean up on failure. | ||
| 2095 | ON_RESULT_FAILURE { | ||
| 2096 | ASSERT(!reuse_ll); | ||
| 2097 | if (cur_address != start_address) { | ||
| 2098 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2099 | DisableMergeAttribute::None}; | ||
| 2100 | ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize, | ||
| 2101 | unmap_properties.perm, OperationType::Unmap) == ResultSuccess); | ||
| 2034 | } | 2102 | } |
| 2103 | }; | ||
| 2035 | 2104 | ||
| 2036 | cur_addr += node.GetNumPages() * PageSize; | 2105 | // Iterate, mapping all pages in the group. |
| 2106 | for (const auto& block : pg) { | ||
| 2107 | // Map and advance. | ||
| 2108 | const KPageProperties cur_properties = | ||
| 2109 | (cur_address == start_address) | ||
| 2110 | ? properties | ||
| 2111 | : KPageProperties{properties.perm, properties.io, properties.uncached, | ||
| 2112 | DisableMergeAttribute::None}; | ||
| 2113 | this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map, | ||
| 2114 | block.GetAddress()); | ||
| 2115 | cur_address += block.GetSize(); | ||
| 2037 | } | 2116 | } |
| 2038 | 2117 | ||
| 2118 | // We succeeded! | ||
| 2039 | R_SUCCEED(); | 2119 | R_SUCCEED(); |
| 2040 | } | 2120 | } |
| 2041 | 2121 | ||
| 2042 | Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, | 2122 | void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, |
| 2043 | KMemoryPermission perm) { | 2123 | const KPageGroup& pg) { |
| 2044 | // Check that the map is in range. | 2124 | ASSERT(this->IsLockedByCurrentThread()); |
| 2045 | const size_t num_pages{page_linked_list.GetNumPages()}; | ||
| 2046 | const size_t size{num_pages * PageSize}; | ||
| 2047 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||
| 2048 | 2125 | ||
| 2049 | // Lock the table. | 2126 | // Note the current address, so that we can iterate. |
| 2050 | KScopedLightLock lk(m_general_lock); | 2127 | const KProcessAddress start_address = address; |
| 2128 | const KProcessAddress last_address = start_address + size - 1; | ||
| 2129 | const KProcessAddress end_address = last_address + 1; | ||
| 2051 | 2130 | ||
| 2052 | // Check the memory state. | 2131 | // Iterate over the memory. |
| 2053 | R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, | 2132 | auto pg_it = pg.begin(); |
| 2054 | KMemoryPermission::None, KMemoryPermission::None, | 2133 | ASSERT(pg_it != pg.end()); |
| 2055 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2056 | 2134 | ||
| 2057 | // Create an update allocator. | 2135 | KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); |
| 2058 | Result allocator_result{ResultSuccess}; | 2136 | size_t pg_pages = pg_it->GetNumPages(); |
| 2059 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2060 | m_memory_block_slab_manager); | ||
| 2061 | 2137 | ||
| 2062 | // Map the pages. | 2138 | auto it = m_memory_block_manager.FindIterator(start_address); |
| 2063 | R_TRY(MapPages(address, page_linked_list, perm)); | 2139 | while (true) { |
| 2140 | // Check that the iterator is valid. | ||
| 2141 | ASSERT(it != m_memory_block_manager.end()); | ||
| 2064 | 2142 | ||
| 2065 | // Update the blocks. | 2143 | // Get the memory info. |
| 2066 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, | 2144 | const KMemoryInfo info = it->GetMemoryInfo(); |
| 2067 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2068 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2069 | 2145 | ||
| 2070 | R_SUCCEED(); | 2146 | // Determine the range to map. |
| 2147 | KProcessAddress map_address = std::max(info.GetAddress(), start_address); | ||
| 2148 | const KProcessAddress map_end_address = std::min(info.GetEndAddress(), end_address); | ||
| 2149 | ASSERT(map_end_address != map_address); | ||
| 2150 | |||
| 2151 | // Determine if we should disable head merge. | ||
| 2152 | const bool disable_head_merge = | ||
| 2153 | info.GetAddress() >= start_address && | ||
| 2154 | True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2155 | const KPageProperties map_properties = { | ||
| 2156 | info.GetPermission(), false, false, | ||
| 2157 | disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None}; | ||
| 2158 | |||
| 2159 | // While we have pages to map, map them. | ||
| 2160 | size_t map_pages = (map_end_address - map_address) / PageSize; | ||
| 2161 | while (map_pages > 0) { | ||
| 2162 | // Check if we're at the end of the physical block. | ||
| 2163 | if (pg_pages == 0) { | ||
| 2164 | // Ensure there are more pages to map. | ||
| 2165 | ASSERT(pg_it != pg.end()); | ||
| 2166 | |||
| 2167 | // Advance our physical block. | ||
| 2168 | ++pg_it; | ||
| 2169 | pg_phys_addr = pg_it->GetAddress(); | ||
| 2170 | pg_pages = pg_it->GetNumPages(); | ||
| 2171 | } | ||
| 2172 | |||
| 2173 | // Map whatever we can. | ||
| 2174 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 2175 | ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map, | ||
| 2176 | pg_phys_addr) == ResultSuccess); | ||
| 2177 | |||
| 2178 | // Advance. | ||
| 2179 | map_address += cur_pages * PageSize; | ||
| 2180 | map_pages -= cur_pages; | ||
| 2181 | |||
| 2182 | pg_phys_addr += cur_pages * PageSize; | ||
| 2183 | pg_pages -= cur_pages; | ||
| 2184 | } | ||
| 2185 | |||
| 2186 | // Check if we're done. | ||
| 2187 | if (last_address <= info.GetLastAddress()) { | ||
| 2188 | break; | ||
| 2189 | } | ||
| 2190 | |||
| 2191 | // Advance. | ||
| 2192 | ++it; | ||
| 2193 | } | ||
| 2194 | |||
| 2195 | // Check that we re-mapped precisely the page group. | ||
| 2196 | ASSERT((++pg_it) == pg.end()); | ||
| 2071 | } | 2197 | } |
| 2072 | 2198 | ||
| 2073 | Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, | 2199 | Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, |
| 2074 | bool is_pa_valid, VAddr region_start, size_t region_num_pages, | 2200 | KPhysicalAddress phys_addr, bool is_pa_valid, |
| 2201 | KProcessAddress region_start, size_t region_num_pages, | ||
| 2075 | KMemoryState state, KMemoryPermission perm) { | 2202 | KMemoryState state, KMemoryPermission perm) { |
| 2076 | ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); | 2203 | ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); |
| 2077 | 2204 | ||
| @@ -2084,26 +2211,30 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, | |||
| 2084 | KScopedLightLock lk(m_general_lock); | 2211 | KScopedLightLock lk(m_general_lock); |
| 2085 | 2212 | ||
| 2086 | // Find a random address to map at. | 2213 | // Find a random address to map at. |
| 2087 | VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, | 2214 | KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, |
| 2088 | this->GetNumGuardPages()); | 2215 | 0, this->GetNumGuardPages()); |
| 2089 | R_UNLESS(addr != 0, ResultOutOfMemory); | 2216 | R_UNLESS(addr != 0, ResultOutOfMemory); |
| 2090 | ASSERT(Common::IsAligned(addr, alignment)); | 2217 | ASSERT(Common::IsAligned(addr, alignment)); |
| 2091 | ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | 2218 | ASSERT(this->CanContain(addr, num_pages * PageSize, state)); |
| 2092 | ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, | 2219 | ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, |
| 2093 | KMemoryPermission::None, KMemoryPermission::None, | 2220 | KMemoryPermission::None, KMemoryPermission::None, |
| 2094 | KMemoryAttribute::None, KMemoryAttribute::None) | 2221 | KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess); |
| 2095 | .IsSuccess()); | ||
| 2096 | 2222 | ||
| 2097 | // Create an update allocator. | 2223 | // Create an update allocator. |
| 2098 | Result allocator_result{ResultSuccess}; | 2224 | Result allocator_result; |
| 2099 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 2225 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 2100 | m_memory_block_slab_manager); | 2226 | m_memory_block_slab_manager); |
| 2227 | R_TRY(allocator_result); | ||
| 2228 | |||
| 2229 | // We're going to perform an update, so create a helper. | ||
| 2230 | KScopedPageTableUpdater updater(this); | ||
| 2101 | 2231 | ||
| 2102 | // Perform mapping operation. | 2232 | // Perform mapping operation. |
| 2103 | if (is_pa_valid) { | 2233 | if (is_pa_valid) { |
| 2104 | R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); | 2234 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; |
| 2235 | R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr)); | ||
| 2105 | } else { | 2236 | } else { |
| 2106 | UNIMPLEMENTED(); | 2237 | R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm)); |
| 2107 | } | 2238 | } |
| 2108 | 2239 | ||
| 2109 | // Update the blocks. | 2240 | // Update the blocks. |
| @@ -2116,28 +2247,45 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, | |||
| 2116 | R_SUCCEED(); | 2247 | R_SUCCEED(); |
| 2117 | } | 2248 | } |
| 2118 | 2249 | ||
| 2119 | Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { | 2250 | Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, |
| 2120 | ASSERT(this->IsLockedByCurrentThread()); | 2251 | KMemoryPermission perm) { |
| 2252 | // Check that the map is in range. | ||
| 2253 | const size_t size = num_pages * PageSize; | ||
| 2254 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||
| 2121 | 2255 | ||
| 2122 | VAddr cur_addr{addr}; | 2256 | // Lock the table. |
| 2257 | KScopedLightLock lk(m_general_lock); | ||
| 2123 | 2258 | ||
| 2124 | for (const auto& node : page_linked_list) { | 2259 | // Check the memory state. |
| 2125 | if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, | 2260 | size_t num_allocator_blocks; |
| 2126 | OperationType::Unmap)}; | 2261 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, |
| 2127 | result.IsError()) { | 2262 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, |
| 2128 | R_RETURN(result); | 2263 | KMemoryPermission::None, KMemoryAttribute::None, |
| 2129 | } | 2264 | KMemoryAttribute::None)); |
| 2130 | 2265 | ||
| 2131 | cur_addr += node.GetNumPages() * PageSize; | 2266 | // Create an update allocator. |
| 2132 | } | 2267 | Result allocator_result; |
| 2268 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2269 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2270 | R_TRY(allocator_result); | ||
| 2271 | |||
| 2272 | // We're going to perform an update, so create a helper. | ||
| 2273 | KScopedPageTableUpdater updater(this); | ||
| 2274 | |||
| 2275 | // Map the pages. | ||
| 2276 | R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm)); | ||
| 2277 | |||
| 2278 | // Update the blocks. | ||
| 2279 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, | ||
| 2280 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2281 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2133 | 2282 | ||
| 2134 | R_SUCCEED(); | 2283 | R_SUCCEED(); |
| 2135 | } | 2284 | } |
| 2136 | 2285 | ||
| 2137 | Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { | 2286 | Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { |
| 2138 | // Check that the unmap is in range. | 2287 | // Check that the unmap is in range. |
| 2139 | const size_t num_pages{page_linked_list.GetNumPages()}; | 2288 | const size_t size = num_pages * PageSize; |
| 2140 | const size_t size{num_pages * PageSize}; | ||
| 2141 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 2289 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| 2142 | 2290 | ||
| 2143 | // Lock the table. | 2291 | // Lock the table. |
| @@ -2151,13 +2299,18 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo | |||
| 2151 | KMemoryAttribute::None)); | 2299 | KMemoryAttribute::None)); |
| 2152 | 2300 | ||
| 2153 | // Create an update allocator. | 2301 | // Create an update allocator. |
| 2154 | Result allocator_result{ResultSuccess}; | 2302 | Result allocator_result; |
| 2155 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 2303 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 2156 | m_memory_block_slab_manager, num_allocator_blocks); | 2304 | m_memory_block_slab_manager, num_allocator_blocks); |
| 2157 | R_TRY(allocator_result); | 2305 | R_TRY(allocator_result); |
| 2158 | 2306 | ||
| 2307 | // We're going to perform an update, so create a helper. | ||
| 2308 | KScopedPageTableUpdater updater(this); | ||
| 2309 | |||
| 2159 | // Perform the unmap. | 2310 | // Perform the unmap. |
| 2160 | R_TRY(UnmapPages(address, page_linked_list)); | 2311 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, |
| 2312 | DisableMergeAttribute::None}; | ||
| 2313 | R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap)); | ||
| 2161 | 2314 | ||
| 2162 | // Update the blocks. | 2315 | // Update the blocks. |
| 2163 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | 2316 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, |
| @@ -2168,29 +2321,130 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo | |||
| 2168 | R_SUCCEED(); | 2321 | R_SUCCEED(); |
| 2169 | } | 2322 | } |
| 2170 | 2323 | ||
| 2171 | Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) { | 2324 | Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, |
| 2172 | // Check that the unmap is in range. | 2325 | KProcessAddress region_start, size_t region_num_pages, |
| 2326 | KMemoryState state, KMemoryPermission perm) { | ||
| 2327 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2328 | |||
| 2329 | // Ensure this is a valid map request. | ||
| 2330 | const size_t num_pages = pg.GetNumPages(); | ||
| 2331 | R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), | ||
| 2332 | ResultInvalidCurrentMemory); | ||
| 2333 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | ||
| 2334 | |||
| 2335 | // Lock the table. | ||
| 2336 | KScopedLightLock lk(m_general_lock); | ||
| 2337 | |||
| 2338 | // Find a random address to map at. | ||
| 2339 | KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, | ||
| 2340 | 0, this->GetNumGuardPages()); | ||
| 2341 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2342 | ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | ||
| 2343 | ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, | ||
| 2344 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2345 | KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess); | ||
| 2346 | |||
| 2347 | // Create an update allocator. | ||
| 2348 | Result allocator_result; | ||
| 2349 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2350 | m_memory_block_slab_manager); | ||
| 2351 | R_TRY(allocator_result); | ||
| 2352 | |||
| 2353 | // We're going to perform an update, so create a helper. | ||
| 2354 | KScopedPageTableUpdater updater(this); | ||
| 2355 | |||
| 2356 | // Perform mapping operation. | ||
| 2357 | const KPageProperties properties = {perm, state == KMemoryState::Io, false, | ||
| 2358 | DisableMergeAttribute::DisableHead}; | ||
| 2359 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | ||
| 2360 | |||
| 2361 | // Update the blocks. | ||
| 2362 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2363 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2364 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2365 | |||
| 2366 | // We successfully mapped the pages. | ||
| 2367 | *out_addr = addr; | ||
| 2368 | R_SUCCEED(); | ||
| 2369 | } | ||
| 2370 | |||
| 2371 | Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state, | ||
| 2372 | KMemoryPermission perm) { | ||
| 2373 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2374 | |||
| 2375 | // Ensure this is a valid map request. | ||
| 2376 | const size_t num_pages = pg.GetNumPages(); | ||
| 2173 | const size_t size = num_pages * PageSize; | 2377 | const size_t size = num_pages * PageSize; |
| 2174 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 2378 | R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); |
| 2175 | 2379 | ||
| 2176 | // Lock the table. | 2380 | // Lock the table. |
| 2177 | KScopedLightLock lk(m_general_lock); | 2381 | KScopedLightLock lk(m_general_lock); |
| 2178 | 2382 | ||
| 2179 | // Check the memory state. | 2383 | // Check if state allows us to map. |
| 2180 | size_t num_allocator_blocks{}; | 2384 | size_t num_allocator_blocks; |
| 2385 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size, | ||
| 2386 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2387 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2388 | KMemoryAttribute::None)); | ||
| 2389 | |||
| 2390 | // Create an update allocator. | ||
| 2391 | Result allocator_result; | ||
| 2392 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2393 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2394 | R_TRY(allocator_result); | ||
| 2395 | |||
| 2396 | // We're going to perform an update, so create a helper. | ||
| 2397 | KScopedPageTableUpdater updater(this); | ||
| 2398 | |||
| 2399 | // Perform mapping operation. | ||
| 2400 | const KPageProperties properties = {perm, state == KMemoryState::Io, false, | ||
| 2401 | DisableMergeAttribute::DisableHead}; | ||
| 2402 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | ||
| 2403 | |||
| 2404 | // Update the blocks. | ||
| 2405 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2406 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2407 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2408 | |||
| 2409 | // We successfully mapped the pages. | ||
| 2410 | R_SUCCEED(); | ||
| 2411 | } | ||
| 2412 | |||
| 2413 | Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, | ||
| 2414 | KMemoryState state) { | ||
| 2415 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2416 | |||
| 2417 | // Ensure this is a valid unmap request. | ||
| 2418 | const size_t num_pages = pg.GetNumPages(); | ||
| 2419 | const size_t size = num_pages * PageSize; | ||
| 2420 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||
| 2421 | |||
| 2422 | // Lock the table. | ||
| 2423 | KScopedLightLock lk(m_general_lock); | ||
| 2424 | |||
| 2425 | // Check if state allows us to unmap. | ||
| 2426 | size_t num_allocator_blocks; | ||
| 2181 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | 2427 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, |
| 2182 | KMemoryState::All, state, KMemoryPermission::None, | 2428 | KMemoryState::All, state, KMemoryPermission::None, |
| 2183 | KMemoryPermission::None, KMemoryAttribute::All, | 2429 | KMemoryPermission::None, KMemoryAttribute::All, |
| 2184 | KMemoryAttribute::None)); | 2430 | KMemoryAttribute::None)); |
| 2185 | 2431 | ||
| 2432 | // Check that the page group is valid. | ||
| 2433 | R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory); | ||
| 2434 | |||
| 2186 | // Create an update allocator. | 2435 | // Create an update allocator. |
| 2187 | Result allocator_result{ResultSuccess}; | 2436 | Result allocator_result; |
| 2188 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 2437 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 2189 | m_memory_block_slab_manager, num_allocator_blocks); | 2438 | m_memory_block_slab_manager, num_allocator_blocks); |
| 2190 | R_TRY(allocator_result); | 2439 | R_TRY(allocator_result); |
| 2191 | 2440 | ||
| 2192 | // Perform the unmap. | 2441 | // We're going to perform an update, so create a helper. |
| 2193 | R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); | 2442 | KScopedPageTableUpdater updater(this); |
| 2443 | |||
| 2444 | // Perform unmapping operation. | ||
| 2445 | const KPageProperties properties = {KMemoryPermission::None, false, false, | ||
| 2446 | DisableMergeAttribute::None}; | ||
| 2447 | R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap)); | ||
| 2194 | 2448 | ||
| 2195 | // Update the blocks. | 2449 | // Update the blocks. |
| 2196 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | 2450 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, |
| @@ -2550,54 +2804,6 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | |||
| 2550 | } | 2804 | } |
| 2551 | } | 2805 | } |
| 2552 | 2806 | ||
| 2553 | ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align, | ||
| 2554 | bool is_map_only, VAddr region_start, | ||
| 2555 | size_t region_num_pages, KMemoryState state, | ||
| 2556 | KMemoryPermission perm, PAddr map_addr) { | ||
| 2557 | KScopedLightLock lk(m_general_lock); | ||
| 2558 | |||
| 2559 | R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state), | ||
| 2560 | ResultInvalidCurrentMemory); | ||
| 2561 | R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory); | ||
| 2562 | const VAddr addr{ | ||
| 2563 | AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; | ||
| 2564 | R_UNLESS(addr, ResultOutOfMemory); | ||
| 2565 | |||
| 2566 | // Create an update allocator. | ||
| 2567 | Result allocator_result{ResultSuccess}; | ||
| 2568 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2569 | m_memory_block_slab_manager); | ||
| 2570 | |||
| 2571 | if (is_map_only) { | ||
| 2572 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | ||
| 2573 | } else { | ||
| 2574 | // Create a page group tohold the pages we allocate. | ||
| 2575 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 2576 | |||
| 2577 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | ||
| 2578 | &pg, needed_num_pages, | ||
| 2579 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); | ||
| 2580 | |||
| 2581 | // Ensure that the page group is closed when we're done working with it. | ||
| 2582 | SCOPE_EXIT({ pg.Close(); }); | ||
| 2583 | |||
| 2584 | // Clear all pages. | ||
| 2585 | for (const auto& it : pg) { | ||
| 2586 | std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), | ||
| 2587 | m_heap_fill_value, it.GetSize()); | ||
| 2588 | } | ||
| 2589 | |||
| 2590 | R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup)); | ||
| 2591 | } | ||
| 2592 | |||
| 2593 | // Update the blocks. | ||
| 2594 | m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, | ||
| 2595 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2596 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2597 | |||
| 2598 | return addr; | ||
| 2599 | } | ||
| 2600 | |||
| 2601 | Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, | 2807 | Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, |
| 2602 | KMemoryPermission perm, bool is_aligned, | 2808 | KMemoryPermission perm, bool is_aligned, |
| 2603 | bool check_heap) { | 2809 | bool check_heap) { |
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 0a454b05b..367dab613 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -24,12 +24,36 @@ class System; | |||
| 24 | 24 | ||
| 25 | namespace Kernel { | 25 | namespace Kernel { |
| 26 | 26 | ||
| 27 | enum class DisableMergeAttribute : u8 { | ||
| 28 | None = (0U << 0), | ||
| 29 | DisableHead = (1U << 0), | ||
| 30 | DisableHeadAndBody = (1U << 1), | ||
| 31 | EnableHeadAndBody = (1U << 2), | ||
| 32 | DisableTail = (1U << 3), | ||
| 33 | EnableTail = (1U << 4), | ||
| 34 | EnableAndMergeHeadBodyTail = (1U << 5), | ||
| 35 | EnableHeadBodyTail = EnableHeadAndBody | EnableTail, | ||
| 36 | DisableHeadBodyTail = DisableHeadAndBody | DisableTail, | ||
| 37 | }; | ||
| 38 | |||
| 39 | struct KPageProperties { | ||
| 40 | KMemoryPermission perm; | ||
| 41 | bool io; | ||
| 42 | bool uncached; | ||
| 43 | DisableMergeAttribute disable_merge_attributes; | ||
| 44 | }; | ||
| 45 | static_assert(std::is_trivial_v<KPageProperties>); | ||
| 46 | static_assert(sizeof(KPageProperties) == sizeof(u32)); | ||
| 47 | |||
| 27 | class KBlockInfoManager; | 48 | class KBlockInfoManager; |
| 28 | class KMemoryBlockManager; | 49 | class KMemoryBlockManager; |
| 29 | class KResourceLimit; | 50 | class KResourceLimit; |
| 30 | class KSystemResource; | 51 | class KSystemResource; |
| 31 | 52 | ||
| 32 | class KPageTable final { | 53 | class KPageTable final { |
| 54 | protected: | ||
| 55 | struct PageLinkedList; | ||
| 56 | |||
| 33 | public: | 57 | public: |
| 34 | enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll }; | 58 | enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll }; |
| 35 | 59 | ||
| @@ -57,27 +81,12 @@ public: | |||
| 57 | Result UnmapPhysicalMemory(VAddr addr, size_t size); | 81 | Result UnmapPhysicalMemory(VAddr addr, size_t size); |
| 58 | Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); | 82 | Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); |
| 59 | Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); | 83 | Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); |
| 60 | Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, | ||
| 61 | KMemoryPermission perm); | ||
| 62 | Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, | ||
| 63 | KMemoryState state, KMemoryPermission perm) { | ||
| 64 | R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, | ||
| 65 | this->GetRegionAddress(state), | ||
| 66 | this->GetRegionSize(state) / PageSize, state, perm)); | ||
| 67 | } | ||
| 68 | Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); | ||
| 69 | Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state); | ||
| 70 | Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); | 84 | Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); |
| 71 | KMemoryInfo QueryInfo(VAddr addr); | 85 | KMemoryInfo QueryInfo(VAddr addr); |
| 72 | Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); | 86 | Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); |
| 73 | Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); | 87 | Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); |
| 74 | Result SetMaxHeapSize(size_t size); | 88 | Result SetMaxHeapSize(size_t size); |
| 75 | Result SetHeapSize(VAddr* out, size_t size); | 89 | Result SetHeapSize(VAddr* out, size_t size); |
| 76 | ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, | ||
| 77 | VAddr region_start, size_t region_num_pages, | ||
| 78 | KMemoryState state, KMemoryPermission perm, | ||
| 79 | PAddr map_addr = 0); | ||
| 80 | |||
| 81 | Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, | 90 | Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, |
| 82 | KMemoryPermission perm, bool is_aligned, bool check_heap); | 91 | KMemoryPermission perm, bool is_aligned, bool check_heap); |
| 83 | Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap); | 92 | Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap); |
| @@ -113,6 +122,40 @@ public: | |||
| 113 | 122 | ||
| 114 | bool CanContain(VAddr addr, size_t size, KMemoryState state) const; | 123 | bool CanContain(VAddr addr, size_t size, KMemoryState state) const; |
| 115 | 124 | ||
| 125 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 126 | KPhysicalAddress phys_addr, KProcessAddress region_start, | ||
| 127 | size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { | ||
| 128 | R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, | ||
| 129 | region_num_pages, state, perm)); | ||
| 130 | } | ||
| 131 | |||
| 132 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 133 | KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { | ||
| 134 | R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, | ||
| 135 | this->GetRegionAddress(state), | ||
| 136 | this->GetRegionSize(state) / PageSize, state, perm)); | ||
| 137 | } | ||
| 138 | |||
| 139 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state, | ||
| 140 | KMemoryPermission perm) { | ||
| 141 | R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false, | ||
| 142 | this->GetRegionAddress(state), | ||
| 143 | this->GetRegionSize(state) / PageSize, state, perm)); | ||
| 144 | } | ||
| 145 | |||
| 146 | Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||
| 147 | KMemoryPermission perm); | ||
| 148 | Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state); | ||
| 149 | |||
| 150 | Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, | ||
| 151 | KProcessAddress region_start, size_t region_num_pages, KMemoryState state, | ||
| 152 | KMemoryPermission perm); | ||
| 153 | Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state, | ||
| 154 | KMemoryPermission perm); | ||
| 155 | Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state); | ||
| 156 | void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, | ||
| 157 | const KPageGroup& pg); | ||
| 158 | |||
| 116 | protected: | 159 | protected: |
| 117 | struct PageLinkedList { | 160 | struct PageLinkedList { |
| 118 | private: | 161 | private: |
| @@ -166,11 +209,9 @@ private: | |||
| 166 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = | 209 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = |
| 167 | KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; | 210 | KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; |
| 168 | 211 | ||
| 169 | Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); | 212 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, |
| 170 | Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, | 213 | KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, |
| 171 | bool is_pa_valid, VAddr region_start, size_t region_num_pages, | 214 | size_t region_num_pages, KMemoryState state, KMemoryPermission perm); |
| 172 | KMemoryState state, KMemoryPermission perm); | ||
| 173 | Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); | ||
| 174 | bool IsRegionContiguous(VAddr addr, u64 size) const; | 215 | bool IsRegionContiguous(VAddr addr, u64 size) const; |
| 175 | void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); | 216 | void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); |
| 176 | KMemoryInfo QueryInfoImpl(VAddr addr); | 217 | KMemoryInfo QueryInfoImpl(VAddr addr); |
| @@ -265,6 +306,11 @@ private: | |||
| 265 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, | 306 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, |
| 266 | size_t size, KMemoryPermission prot_perm); | 307 | size_t size, KMemoryPermission prot_perm); |
| 267 | 308 | ||
| 309 | Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 310 | size_t num_pages, KMemoryPermission perm); | ||
| 311 | Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 312 | const KPageGroup& pg, const KPageProperties properties, bool reuse_ll); | ||
| 313 | |||
| 268 | mutable KLightLock m_general_lock; | 314 | mutable KLightLock m_general_lock; |
| 269 | mutable KLightLock m_map_physical_memory_lock; | 315 | mutable KLightLock m_map_physical_memory_lock; |
| 270 | 316 | ||
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index a1abf5d68..e201bb0cd 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -417,9 +417,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 417 | } | 417 | } |
| 418 | 418 | ||
| 419 | void KProcess::Run(s32 main_thread_priority, u64 stack_size) { | 419 | void KProcess::Run(s32 main_thread_priority, u64 stack_size) { |
| 420 | AllocateMainThreadStack(stack_size); | 420 | ASSERT(AllocateMainThreadStack(stack_size) == ResultSuccess); |
| 421 | resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); | 421 | resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); |
| 422 | resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, main_thread_stack_size); | ||
| 423 | 422 | ||
| 424 | const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; | 423 | const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; |
| 425 | ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); | 424 | ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); |
| @@ -675,20 +674,31 @@ void KProcess::ChangeState(State new_state) { | |||
| 675 | } | 674 | } |
| 676 | 675 | ||
| 677 | Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { | 676 | Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { |
| 678 | ASSERT(stack_size); | 677 | // Ensure that we haven't already allocated stack. |
| 679 | 678 | ASSERT(main_thread_stack_size == 0); | |
| 680 | // The kernel always ensures that the given stack size is page aligned. | 679 | |
| 681 | main_thread_stack_size = Common::AlignUp(stack_size, PageSize); | 680 | // Ensure that we're allocating a valid stack. |
| 682 | 681 | stack_size = Common::AlignUp(stack_size, PageSize); | |
| 683 | const VAddr start{page_table.GetStackRegionStart()}; | 682 | // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory); |
| 684 | const std::size_t size{page_table.GetStackRegionEnd() - start}; | 683 | R_UNLESS(stack_size + image_size >= image_size, ResultOutOfMemory); |
| 685 | 684 | ||
| 686 | CASCADE_RESULT(main_thread_stack_top, | 685 | // Place a tentative reservation of memory for our new stack. |
| 687 | page_table.AllocateAndMapMemory( | 686 | KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax, |
| 688 | main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, | 687 | stack_size); |
| 689 | KMemoryState::Stack, KMemoryPermission::UserReadWrite)); | 688 | R_UNLESS(mem_reservation.Succeeded(), ResultLimitReached); |
| 689 | |||
| 690 | // Allocate and map our stack. | ||
| 691 | if (stack_size) { | ||
| 692 | KProcessAddress stack_bottom; | ||
| 693 | R_TRY(page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, | ||
| 694 | KMemoryState::Stack, KMemoryPermission::UserReadWrite)); | ||
| 695 | |||
| 696 | main_thread_stack_top = stack_bottom + stack_size; | ||
| 697 | main_thread_stack_size = stack_size; | ||
| 698 | } | ||
| 690 | 699 | ||
| 691 | main_thread_stack_top += main_thread_stack_size; | 700 | // We succeeded! Commit our memory reservation. |
| 701 | mem_reservation.Commit(); | ||
| 692 | 702 | ||
| 693 | R_SUCCEED(); | 703 | R_SUCCEED(); |
| 694 | } | 704 | } |
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index 3cf2b5d91..df505edfe 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp | |||
| @@ -94,15 +94,15 @@ Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t m | |||
| 94 | R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission); | 94 | R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission); |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | return target_process.PageTable().MapPages(address, *page_group, KMemoryState::Shared, | 97 | return target_process.PageTable().MapPageGroup(address, *page_group, KMemoryState::Shared, |
| 98 | ConvertToKMemoryPermission(map_perm)); | 98 | ConvertToKMemoryPermission(map_perm)); |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) { | 101 | Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) { |
| 102 | // Validate the size. | 102 | // Validate the size. |
| 103 | R_UNLESS(size == unmap_size, ResultInvalidSize); | 103 | R_UNLESS(size == unmap_size, ResultInvalidSize); |
| 104 | 104 | ||
| 105 | return target_process.PageTable().UnmapPages(address, *page_group, KMemoryState::Shared); | 105 | return target_process.PageTable().UnmapPageGroup(address, *page_group, KMemoryState::Shared); |
| 106 | } | 106 | } |
| 107 | 107 | ||
| 108 | } // namespace Kernel | 108 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index aca442196..67fa5d71c 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -1492,8 +1492,8 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p | |||
| 1492 | KMemoryAttribute::All, KMemoryAttribute::None)); | 1492 | KMemoryAttribute::All, KMemoryAttribute::None)); |
| 1493 | 1493 | ||
| 1494 | // Map the group. | 1494 | // Map the group. |
| 1495 | R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode, | 1495 | R_TRY(dst_pt.MapPageGroup(dst_address, pg, KMemoryState::SharedCode, |
| 1496 | KMemoryPermission::UserReadWrite)); | 1496 | KMemoryPermission::UserReadWrite)); |
| 1497 | 1497 | ||
| 1498 | return ResultSuccess; | 1498 | return ResultSuccess; |
| 1499 | } | 1499 | } |
diff --git a/src/input_common/drivers/joycon.cpp b/src/input_common/drivers/joycon.cpp index 7122093c6..40cda400d 100644 --- a/src/input_common/drivers/joycon.cpp +++ b/src/input_common/drivers/joycon.cpp | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <fmt/format.h> | 4 | #include <fmt/format.h> |
| 5 | 5 | ||
| 6 | #include "common/param_package.h" | 6 | #include "common/param_package.h" |
| 7 | #include "common/polyfill_ranges.h" | ||
| 7 | #include "common/settings.h" | 8 | #include "common/settings.h" |
| 8 | #include "common/thread.h" | 9 | #include "common/thread.h" |
| 9 | #include "input_common/drivers/joycon.h" | 10 | #include "input_common/drivers/joycon.h" |
diff --git a/src/input_common/helpers/stick_from_buttons.cpp b/src/input_common/helpers/stick_from_buttons.cpp index f3a0b3419..096c23b07 100644 --- a/src/input_common/helpers/stick_from_buttons.cpp +++ b/src/input_common/helpers/stick_from_buttons.cpp | |||
| @@ -11,6 +11,11 @@ namespace InputCommon { | |||
| 11 | 11 | ||
| 12 | class Stick final : public Common::Input::InputDevice { | 12 | class Stick final : public Common::Input::InputDevice { |
| 13 | public: | 13 | public: |
| 14 | // Some games such as EARTH DEFENSE FORCE: WORLD BROTHERS | ||
| 15 | // do not play nicely with the theoretical maximum range. | ||
| 16 | // Using a value one lower from the maximum emulates real stick behavior. | ||
| 17 | static constexpr float MAX_RANGE = 32766.0f / 32767.0f; | ||
| 18 | |||
| 14 | using Button = std::unique_ptr<Common::Input::InputDevice>; | 19 | using Button = std::unique_ptr<Common::Input::InputDevice>; |
| 15 | 20 | ||
| 16 | Stick(Button up_, Button down_, Button left_, Button right_, Button modifier_, Button updater_, | 21 | Stick(Button up_, Button down_, Button left_, Button right_, Button modifier_, Button updater_, |
| @@ -196,7 +201,7 @@ public: | |||
| 196 | } | 201 | } |
| 197 | 202 | ||
| 198 | void UpdateStatus() { | 203 | void UpdateStatus() { |
| 199 | const float coef = modifier_status.value ? modifier_scale : 1.0f; | 204 | const float coef = modifier_status.value ? modifier_scale : MAX_RANGE; |
| 200 | 205 | ||
| 201 | bool r = right_status; | 206 | bool r = right_status; |
| 202 | bool l = left_status; | 207 | bool l = left_status; |
| @@ -290,7 +295,7 @@ public: | |||
| 290 | if (down_status) { | 295 | if (down_status) { |
| 291 | --y; | 296 | --y; |
| 292 | } | 297 | } |
| 293 | const float coef = modifier_status.value ? modifier_scale : 1.0f; | 298 | const float coef = modifier_status.value ? modifier_scale : MAX_RANGE; |
| 294 | status.x.raw_value = static_cast<float>(x) * coef * (y == 0 ? 1.0f : SQRT_HALF); | 299 | status.x.raw_value = static_cast<float>(x) * coef * (y == 0 ? 1.0f : SQRT_HALF); |
| 295 | status.y.raw_value = static_cast<float>(y) * coef * (x == 0 ? 1.0f : SQRT_HALF); | 300 | status.y.raw_value = static_cast<float>(y) * coef * (x == 0 ? 1.0f : SQRT_HALF); |
| 296 | return status; | 301 | return status; |
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp index fb5799c42..c898ce12f 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp | |||
| @@ -436,6 +436,10 @@ Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id c | |||
| 436 | if (info.type == TextureType::Buffer) { | 436 | if (info.type == TextureType::Buffer) { |
| 437 | lod = Id{}; | 437 | lod = Id{}; |
| 438 | } | 438 | } |
| 439 | if (Sirit::ValidId(ms)) { | ||
| 440 | // This image is multisampled, lod must be implicit | ||
| 441 | lod = Id{}; | ||
| 442 | } | ||
| 439 | const ImageOperands operands(offset, lod, ms); | 443 | const ImageOperands operands(offset, lod, ms); |
| 440 | return Emit(&EmitContext::OpImageSparseFetch, &EmitContext::OpImageFetch, ctx, inst, ctx.F32[4], | 444 | return Emit(&EmitContext::OpImageSparseFetch, &EmitContext::OpImageFetch, ctx, inst, ctx.F32[4], |
| 441 | TextureImage(ctx, info, index), coords, operands.MaskOptional(), operands.Span()); | 445 | TextureImage(ctx, info, index), coords, operands.MaskOptional(), operands.Span()); |
diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp index a0c155fdb..3b97721e1 100644 --- a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp +++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp | |||
| @@ -35,6 +35,7 @@ Id ImageType(EmitContext& ctx, const TextureDescriptor& desc) { | |||
| 35 | const spv::ImageFormat format{spv::ImageFormat::Unknown}; | 35 | const spv::ImageFormat format{spv::ImageFormat::Unknown}; |
| 36 | const Id type{ctx.F32[1]}; | 36 | const Id type{ctx.F32[1]}; |
| 37 | const bool depth{desc.is_depth}; | 37 | const bool depth{desc.is_depth}; |
| 38 | const bool ms{desc.is_multisample}; | ||
| 38 | switch (desc.type) { | 39 | switch (desc.type) { |
| 39 | case TextureType::Color1D: | 40 | case TextureType::Color1D: |
| 40 | return ctx.TypeImage(type, spv::Dim::Dim1D, depth, false, false, 1, format); | 41 | return ctx.TypeImage(type, spv::Dim::Dim1D, depth, false, false, 1, format); |
| @@ -42,9 +43,9 @@ Id ImageType(EmitContext& ctx, const TextureDescriptor& desc) { | |||
| 42 | return ctx.TypeImage(type, spv::Dim::Dim1D, depth, true, false, 1, format); | 43 | return ctx.TypeImage(type, spv::Dim::Dim1D, depth, true, false, 1, format); |
| 43 | case TextureType::Color2D: | 44 | case TextureType::Color2D: |
| 44 | case TextureType::Color2DRect: | 45 | case TextureType::Color2DRect: |
| 45 | return ctx.TypeImage(type, spv::Dim::Dim2D, depth, false, false, 1, format); | 46 | return ctx.TypeImage(type, spv::Dim::Dim2D, depth, false, ms, 1, format); |
| 46 | case TextureType::ColorArray2D: | 47 | case TextureType::ColorArray2D: |
| 47 | return ctx.TypeImage(type, spv::Dim::Dim2D, depth, true, false, 1, format); | 48 | return ctx.TypeImage(type, spv::Dim::Dim2D, depth, true, ms, 1, format); |
| 48 | case TextureType::Color3D: | 49 | case TextureType::Color3D: |
| 49 | return ctx.TypeImage(type, spv::Dim::Dim3D, depth, false, false, 1, format); | 50 | return ctx.TypeImage(type, spv::Dim::Dim3D, depth, false, false, 1, format); |
| 50 | case TextureType::ColorCube: | 51 | case TextureType::ColorCube: |
diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp index f5c86fcb1..9718c6921 100644 --- a/src/shader_recompiler/ir_opt/texture_pass.cpp +++ b/src/shader_recompiler/ir_opt/texture_pass.cpp | |||
| @@ -524,6 +524,7 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo | |||
| 524 | 524 | ||
| 525 | const auto& cbuf{texture_inst.cbuf}; | 525 | const auto& cbuf{texture_inst.cbuf}; |
| 526 | auto flags{inst->Flags<IR::TextureInstInfo>()}; | 526 | auto flags{inst->Flags<IR::TextureInstInfo>()}; |
| 527 | bool is_multisample{false}; | ||
| 527 | switch (inst->GetOpcode()) { | 528 | switch (inst->GetOpcode()) { |
| 528 | case IR::Opcode::ImageQueryDimensions: | 529 | case IR::Opcode::ImageQueryDimensions: |
| 529 | flags.type.Assign(ReadTextureType(env, cbuf)); | 530 | flags.type.Assign(ReadTextureType(env, cbuf)); |
| @@ -538,6 +539,12 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo | |||
| 538 | } | 539 | } |
| 539 | break; | 540 | break; |
| 540 | case IR::Opcode::ImageFetch: | 541 | case IR::Opcode::ImageFetch: |
| 542 | if (flags.type == TextureType::Color2D || flags.type == TextureType::Color2DRect || | ||
| 543 | flags.type == TextureType::ColorArray2D) { | ||
| 544 | is_multisample = !inst->Arg(4).IsEmpty(); | ||
| 545 | } else { | ||
| 546 | inst->SetArg(4, IR::U32{}); | ||
| 547 | } | ||
| 541 | if (flags.type != TextureType::Color1D) { | 548 | if (flags.type != TextureType::Color1D) { |
| 542 | break; | 549 | break; |
| 543 | } | 550 | } |
| @@ -613,6 +620,7 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo | |||
| 613 | index = descriptors.Add(TextureDescriptor{ | 620 | index = descriptors.Add(TextureDescriptor{ |
| 614 | .type = flags.type, | 621 | .type = flags.type, |
| 615 | .is_depth = flags.is_depth != 0, | 622 | .is_depth = flags.is_depth != 0, |
| 623 | .is_multisample = is_multisample, | ||
| 616 | .has_secondary = cbuf.has_secondary, | 624 | .has_secondary = cbuf.has_secondary, |
| 617 | .cbuf_index = cbuf.index, | 625 | .cbuf_index = cbuf.index, |
| 618 | .cbuf_offset = cbuf.offset, | 626 | .cbuf_offset = cbuf.offset, |
diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h index f93181e1e..d308db942 100644 --- a/src/shader_recompiler/shader_info.h +++ b/src/shader_recompiler/shader_info.h | |||
| @@ -109,6 +109,7 @@ using ImageBufferDescriptors = boost::container::small_vector<ImageBufferDescrip | |||
| 109 | struct TextureDescriptor { | 109 | struct TextureDescriptor { |
| 110 | TextureType type; | 110 | TextureType type; |
| 111 | bool is_depth; | 111 | bool is_depth; |
| 112 | bool is_multisample; | ||
| 112 | bool has_secondary; | 113 | bool has_secondary; |
| 113 | u32 cbuf_index; | 114 | u32 cbuf_index; |
| 114 | u32 cbuf_offset; | 115 | u32 cbuf_offset; |
diff --git a/src/yuzu/configuration/configure_dialog.cpp b/src/yuzu/configuration/configure_dialog.cpp index 4301313cf..2aaefcc05 100644 --- a/src/yuzu/configuration/configure_dialog.cpp +++ b/src/yuzu/configuration/configure_dialog.cpp | |||
| @@ -66,7 +66,6 @@ ConfigureDialog::ConfigureDialog(QWidget* parent, HotkeyRegistry& registry_, | |||
| 66 | 66 | ||
| 67 | web_tab->SetWebServiceConfigEnabled(enable_web_config); | 67 | web_tab->SetWebServiceConfigEnabled(enable_web_config); |
| 68 | hotkeys_tab->Populate(registry); | 68 | hotkeys_tab->Populate(registry); |
| 69 | setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint); | ||
| 70 | 69 | ||
| 71 | input_tab->Initialize(input_subsystem); | 70 | input_tab->Initialize(input_subsystem); |
| 72 | 71 | ||
diff --git a/src/yuzu/configuration/configure_motion_touch.cpp b/src/yuzu/configuration/configure_motion_touch.cpp index d1b870c72..fb1292f07 100644 --- a/src/yuzu/configuration/configure_motion_touch.cpp +++ b/src/yuzu/configuration/configure_motion_touch.cpp | |||
| @@ -89,7 +89,6 @@ ConfigureMotionTouch::ConfigureMotionTouch(QWidget* parent, | |||
| 89 | "using-a-controller-or-android-phone-for-motion-or-touch-input'><span " | 89 | "using-a-controller-or-android-phone-for-motion-or-touch-input'><span " |
| 90 | "style=\"text-decoration: underline; color:#039be5;\">Learn More</span></a>")); | 90 | "style=\"text-decoration: underline; color:#039be5;\">Learn More</span></a>")); |
| 91 | 91 | ||
| 92 | setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint); | ||
| 93 | SetConfiguration(); | 92 | SetConfiguration(); |
| 94 | UpdateUiDisplay(); | 93 | UpdateUiDisplay(); |
| 95 | ConnectEvents(); | 94 | ConnectEvents(); |
diff --git a/src/yuzu/configuration/configure_per_game.cpp b/src/yuzu/configuration/configure_per_game.cpp index 93db47cfd..7e757eafd 100644 --- a/src/yuzu/configuration/configure_per_game.cpp +++ b/src/yuzu/configuration/configure_per_game.cpp | |||
| @@ -66,8 +66,6 @@ ConfigurePerGame::ConfigurePerGame(QWidget* parent, u64 title_id_, const std::st | |||
| 66 | 66 | ||
| 67 | setFocusPolicy(Qt::ClickFocus); | 67 | setFocusPolicy(Qt::ClickFocus); |
| 68 | setWindowTitle(tr("Properties")); | 68 | setWindowTitle(tr("Properties")); |
| 69 | // remove Help question mark button from the title bar | ||
| 70 | setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint); | ||
| 71 | 69 | ||
| 72 | addons_tab->SetTitleId(title_id); | 70 | addons_tab->SetTitleId(title_id); |
| 73 | 71 | ||
diff --git a/src/yuzu/configuration/configure_tas.cpp b/src/yuzu/configuration/configure_tas.cpp index 1edc5f1f3..5a545aa70 100644 --- a/src/yuzu/configuration/configure_tas.cpp +++ b/src/yuzu/configuration/configure_tas.cpp | |||
| @@ -17,7 +17,6 @@ ConfigureTasDialog::ConfigureTasDialog(QWidget* parent) | |||
| 17 | 17 | ||
| 18 | setFocusPolicy(Qt::ClickFocus); | 18 | setFocusPolicy(Qt::ClickFocus); |
| 19 | setWindowTitle(tr("TAS Configuration")); | 19 | setWindowTitle(tr("TAS Configuration")); |
| 20 | setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint); | ||
| 21 | 20 | ||
| 22 | connect(ui->tas_path_button, &QToolButton::pressed, this, | 21 | connect(ui->tas_path_button, &QToolButton::pressed, this, |
| 23 | [this] { SetDirectory(DirectoryTarget::TAS, ui->tas_path_edit); }); | 22 | [this] { SetDirectory(DirectoryTarget::TAS, ui->tas_path_edit); }); |
diff --git a/src/yuzu/debugger/controller.cpp b/src/yuzu/debugger/controller.cpp index 19f3775a3..e2f55ebae 100644 --- a/src/yuzu/debugger/controller.cpp +++ b/src/yuzu/debugger/controller.cpp | |||
| @@ -20,9 +20,8 @@ ControllerDialog::ControllerDialog(Core::HID::HIDCore& hid_core_, | |||
| 20 | setWindowTitle(tr("Controller P1")); | 20 | setWindowTitle(tr("Controller P1")); |
| 21 | resize(500, 350); | 21 | resize(500, 350); |
| 22 | setMinimumSize(500, 350); | 22 | setMinimumSize(500, 350); |
| 23 | // Remove the "?" button from the titlebar and enable the maximize button | 23 | // Enable the maximize button |
| 24 | setWindowFlags((windowFlags() & ~Qt::WindowContextHelpButtonHint) | | 24 | setWindowFlags(windowFlags() | Qt::WindowMaximizeButtonHint); |
| 25 | Qt::WindowMaximizeButtonHint); | ||
| 26 | 25 | ||
| 27 | widget = new PlayerControlPreview(this); | 26 | widget = new PlayerControlPreview(this); |
| 28 | refreshConfiguration(); | 27 | refreshConfiguration(); |
diff --git a/src/yuzu/debugger/profiler.cpp b/src/yuzu/debugger/profiler.cpp index d3e2d3c12..493ee0b17 100644 --- a/src/yuzu/debugger/profiler.cpp +++ b/src/yuzu/debugger/profiler.cpp | |||
| @@ -49,9 +49,8 @@ MicroProfileDialog::MicroProfileDialog(QWidget* parent) : QWidget(parent, Qt::Di | |||
| 49 | setObjectName(QStringLiteral("MicroProfile")); | 49 | setObjectName(QStringLiteral("MicroProfile")); |
| 50 | setWindowTitle(tr("&MicroProfile")); | 50 | setWindowTitle(tr("&MicroProfile")); |
| 51 | resize(1000, 600); | 51 | resize(1000, 600); |
| 52 | // Remove the "?" button from the titlebar and enable the maximize button | 52 | // Enable the maximize button |
| 53 | setWindowFlags((windowFlags() & ~Qt::WindowContextHelpButtonHint) | | 53 | setWindowFlags(windowFlags() | Qt::WindowMaximizeButtonHint); |
| 54 | Qt::WindowMaximizeButtonHint); | ||
| 55 | 54 | ||
| 56 | #if MICROPROFILE_ENABLED | 55 | #if MICROPROFILE_ENABLED |
| 57 | 56 | ||
diff --git a/src/yuzu/install_dialog.cpp b/src/yuzu/install_dialog.cpp index 84ec4fe13..673bbaa83 100644 --- a/src/yuzu/install_dialog.cpp +++ b/src/yuzu/install_dialog.cpp | |||
| @@ -46,7 +46,6 @@ InstallDialog::InstallDialog(QWidget* parent, const QStringList& files) : QDialo | |||
| 46 | vbox_layout->addLayout(hbox_layout); | 46 | vbox_layout->addLayout(hbox_layout); |
| 47 | 47 | ||
| 48 | setLayout(vbox_layout); | 48 | setLayout(vbox_layout); |
| 49 | setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint); | ||
| 50 | setWindowTitle(tr("Install Files to NAND")); | 49 | setWindowTitle(tr("Install Files to NAND")); |
| 51 | } | 50 | } |
| 52 | 51 | ||
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index 571eacf9f..62aaf41bf 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp | |||
| @@ -2758,8 +2758,7 @@ void GMainWindow::OnMenuInstallToNAND() { | |||
| 2758 | ui->action_Install_File_NAND->setEnabled(false); | 2758 | ui->action_Install_File_NAND->setEnabled(false); |
| 2759 | 2759 | ||
| 2760 | install_progress = new QProgressDialog(QString{}, tr("Cancel"), 0, total_size, this); | 2760 | install_progress = new QProgressDialog(QString{}, tr("Cancel"), 0, total_size, this); |
| 2761 | install_progress->setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint & | 2761 | install_progress->setWindowFlags(windowFlags() & ~Qt::WindowMaximizeButtonHint); |
| 2762 | ~Qt::WindowMaximizeButtonHint); | ||
| 2763 | install_progress->setAttribute(Qt::WA_DeleteOnClose, true); | 2762 | install_progress->setAttribute(Qt::WA_DeleteOnClose, true); |
| 2764 | install_progress->setFixedWidth(installDialog.GetMinimumWidth() + 40); | 2763 | install_progress->setFixedWidth(installDialog.GetMinimumWidth() + 40); |
| 2765 | install_progress->show(); | 2764 | install_progress->show(); |
| @@ -4456,6 +4455,11 @@ int main(int argc, char* argv[]) { | |||
| 4456 | } | 4455 | } |
| 4457 | #endif | 4456 | #endif |
| 4458 | 4457 | ||
| 4458 | #if QT_VERSION < QT_VERSION_CHECK(6, 0, 0) | ||
| 4459 | // Disables the "?" button on all dialogs. Disabled by default on Qt6. | ||
| 4460 | QCoreApplication::setAttribute(Qt::AA_DisableWindowContextHelpButton); | ||
| 4461 | #endif | ||
| 4462 | |||
| 4459 | // Enables the core to make the qt created contexts current on std::threads | 4463 | // Enables the core to make the qt created contexts current on std::threads |
| 4460 | QCoreApplication::setAttribute(Qt::AA_DontCheckOpenGLContextThreadAffinity); | 4464 | QCoreApplication::setAttribute(Qt::AA_DontCheckOpenGLContextThreadAffinity); |
| 4461 | QApplication app(argc, argv); | 4465 | QApplication app(argc, argv); |
diff --git a/src/yuzu/util/limitable_input_dialog.cpp b/src/yuzu/util/limitable_input_dialog.cpp index bbb370595..5f6a9c193 100644 --- a/src/yuzu/util/limitable_input_dialog.cpp +++ b/src/yuzu/util/limitable_input_dialog.cpp | |||
| @@ -16,8 +16,6 @@ LimitableInputDialog::LimitableInputDialog(QWidget* parent) : QDialog{parent} { | |||
| 16 | LimitableInputDialog::~LimitableInputDialog() = default; | 16 | LimitableInputDialog::~LimitableInputDialog() = default; |
| 17 | 17 | ||
| 18 | void LimitableInputDialog::CreateUI() { | 18 | void LimitableInputDialog::CreateUI() { |
| 19 | setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint); | ||
| 20 | |||
| 21 | text_label = new QLabel(this); | 19 | text_label = new QLabel(this); |
| 22 | text_entry = new QLineEdit(this); | 20 | text_entry = new QLineEdit(this); |
| 23 | text_label_invalid = new QLabel(this); | 21 | text_label_invalid = new QLabel(this); |
diff --git a/src/yuzu/util/sequence_dialog/sequence_dialog.cpp b/src/yuzu/util/sequence_dialog/sequence_dialog.cpp index 4b10fa517..1670aa596 100644 --- a/src/yuzu/util/sequence_dialog/sequence_dialog.cpp +++ b/src/yuzu/util/sequence_dialog/sequence_dialog.cpp | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | 8 | ||
| 9 | SequenceDialog::SequenceDialog(QWidget* parent) : QDialog(parent) { | 9 | SequenceDialog::SequenceDialog(QWidget* parent) : QDialog(parent) { |
| 10 | setWindowTitle(tr("Enter a hotkey")); | 10 | setWindowTitle(tr("Enter a hotkey")); |
| 11 | setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint); | ||
| 12 | 11 | ||
| 13 | key_sequence = new QKeySequenceEdit; | 12 | key_sequence = new QKeySequenceEdit; |
| 14 | 13 | ||
diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h index 6fcf04e1b..67d230462 100644 --- a/src/yuzu_cmd/default_ini.h +++ b/src/yuzu_cmd/default_ini.h | |||
| @@ -5,8 +5,8 @@ | |||
| 5 | 5 | ||
| 6 | namespace DefaultINI { | 6 | namespace DefaultINI { |
| 7 | 7 | ||
| 8 | const char* sdl2_config_file = R"( | 8 | const char* sdl2_config_file = |
| 9 | 9 | R"( | |
| 10 | [ControlsP0] | 10 | [ControlsP0] |
| 11 | # The input devices and parameters for each Switch native input | 11 | # The input devices and parameters for each Switch native input |
| 12 | # The config section determines the player number where the config will be applied on. For example "ControlsP0", "ControlsP1", ... | 12 | # The config section determines the player number where the config will be applied on. For example "ControlsP0", "ControlsP1", ... |
| @@ -143,6 +143,8 @@ mouse_enabled = | |||
| 143 | # 0 (default): Disabled, 1: Enabled | 143 | # 0 (default): Disabled, 1: Enabled |
| 144 | keyboard_enabled = | 144 | keyboard_enabled = |
| 145 | 145 | ||
| 146 | )" | ||
| 147 | R"( | ||
| 146 | [Core] | 148 | [Core] |
| 147 | # Whether to use multi-core for CPU emulation | 149 | # Whether to use multi-core for CPU emulation |
| 148 | # 0: Disabled, 1 (default): Enabled | 150 | # 0: Disabled, 1 (default): Enabled |
| @@ -242,6 +244,8 @@ cpuopt_unsafe_fastmem_check = | |||
| 242 | # 0: Disabled, 1 (default): Enabled | 244 | # 0: Disabled, 1 (default): Enabled |
| 243 | cpuopt_unsafe_ignore_global_monitor = | 245 | cpuopt_unsafe_ignore_global_monitor = |
| 244 | 246 | ||
| 247 | )" | ||
| 248 | R"( | ||
| 245 | [Renderer] | 249 | [Renderer] |
| 246 | # Which backend API to use. | 250 | # Which backend API to use. |
| 247 | # 0: OpenGL, 1 (default): Vulkan | 251 | # 0: OpenGL, 1 (default): Vulkan |
| @@ -360,6 +364,8 @@ bg_red = | |||
| 360 | bg_blue = | 364 | bg_blue = |
| 361 | bg_green = | 365 | bg_green = |
| 362 | 366 | ||
| 367 | )" | ||
| 368 | R"( | ||
| 363 | [Audio] | 369 | [Audio] |
| 364 | # Which audio output engine to use. | 370 | # Which audio output engine to use. |
| 365 | # auto (default): Auto-select | 371 | # auto (default): Auto-select |