diff options
| author | 2019-07-07 09:42:54 -0700 | |
|---|---|---|
| committer | 2019-07-07 11:45:53 -0700 | |
| commit | 13a8fde3ad2a4a37cf1bb8dcb367b4c8fc8b4d9b (patch) | |
| tree | 5baf26505ec000e221c1119ba4dd2d0bca93de0e /src | |
| parent | Merge pull request #2674 from lioncash/reporter (diff) | |
| download | yuzu-13a8fde3ad2a4a37cf1bb8dcb367b4c8fc8b4d9b.tar.gz yuzu-13a8fde3ad2a4a37cf1bb8dcb367b4c8fc8b4d9b.tar.xz yuzu-13a8fde3ad2a4a37cf1bb8dcb367b4c8fc8b4d9b.zip | |
Implement MapPhysicalMemory/UnmapPhysicalMemory
This implements svcMapPhysicalMemory/svcUnmapPhysicalMemory for Yuzu,
which can be used to map memory at a desired address by games since
3.0.0.
It also properly parses SystemResourceSize from NPDM, and makes
information available via svcGetInfo.
This is needed for games like Super Smash Bros. and Diablo 3 -- this
PR's implementation does not run into the "ASCII reads" issue mentioned
in the comments of #2626, which was caused by the following bugs in
Yuzu's memory management that this PR also addresses:
* Yuzu's memory coalescing does not properly merge blocks. This results
in a polluted address space/svcQueryMemory results that would be
impossible to replicate on hardware, which can lead to game code making
the wrong assumptions about memory layout.
* This implements better merging for AllocatedMemoryBlocks.
* Yuzu's implementation of svcMirrorMemory unprotected the entire
virtual memory range containing the range being mirrored. This could
lead to games attempting to map data at that unprotected
range/attempting to access that range after yuzu improperly unmapped
it.
* This PR fixes it by simply calling ReprotectRange instead of
Reprotect.
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/file_sys/program_metadata.cpp | 4 | ||||
| -rw-r--r-- | src/core/file_sys/program_metadata.h | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/process.cpp | 1 | ||||
| -rw-r--r-- | src/core/hle/kernel/process.h | 11 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 110 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc_wrap.h | 5 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 320 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.h | 41 |
8 files changed, 475 insertions, 21 deletions
diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp index eb76174c5..7310b3602 100644 --- a/src/core/file_sys/program_metadata.cpp +++ b/src/core/file_sys/program_metadata.cpp | |||
| @@ -94,6 +94,10 @@ u64 ProgramMetadata::GetFilesystemPermissions() const { | |||
| 94 | return aci_file_access.permissions; | 94 | return aci_file_access.permissions; |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | u32 ProgramMetadata::GetSystemResourceSize() const { | ||
| 98 | return npdm_header.system_resource_size; | ||
| 99 | } | ||
| 100 | |||
| 97 | const ProgramMetadata::KernelCapabilityDescriptors& ProgramMetadata::GetKernelCapabilities() const { | 101 | const ProgramMetadata::KernelCapabilityDescriptors& ProgramMetadata::GetKernelCapabilities() const { |
| 98 | return aci_kernel_capabilities; | 102 | return aci_kernel_capabilities; |
| 99 | } | 103 | } |
diff --git a/src/core/file_sys/program_metadata.h b/src/core/file_sys/program_metadata.h index 43bf2820a..88ec97d85 100644 --- a/src/core/file_sys/program_metadata.h +++ b/src/core/file_sys/program_metadata.h | |||
| @@ -58,6 +58,7 @@ public: | |||
| 58 | u32 GetMainThreadStackSize() const; | 58 | u32 GetMainThreadStackSize() const; |
| 59 | u64 GetTitleID() const; | 59 | u64 GetTitleID() const; |
| 60 | u64 GetFilesystemPermissions() const; | 60 | u64 GetFilesystemPermissions() const; |
| 61 | u32 GetSystemResourceSize() const; | ||
| 61 | const KernelCapabilityDescriptors& GetKernelCapabilities() const; | 62 | const KernelCapabilityDescriptors& GetKernelCapabilities() const; |
| 62 | 63 | ||
| 63 | void Print() const; | 64 | void Print() const; |
| @@ -76,7 +77,8 @@ private: | |||
| 76 | u8 reserved_3; | 77 | u8 reserved_3; |
| 77 | u8 main_thread_priority; | 78 | u8 main_thread_priority; |
| 78 | u8 main_thread_cpu; | 79 | u8 main_thread_cpu; |
| 79 | std::array<u8, 8> reserved_4; | 80 | std::array<u8, 4> reserved_4; |
| 81 | u32_le system_resource_size; | ||
| 80 | u32_le process_category; | 82 | u32_le process_category; |
| 81 | u32_le main_stack_size; | 83 | u32_le main_stack_size; |
| 82 | std::array<u8, 0x10> application_name; | 84 | std::array<u8, 0x10> application_name; |
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index f45ef05f6..51245cbb4 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp | |||
| @@ -172,6 +172,7 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) { | |||
| 172 | program_id = metadata.GetTitleID(); | 172 | program_id = metadata.GetTitleID(); |
| 173 | ideal_core = metadata.GetMainThreadCore(); | 173 | ideal_core = metadata.GetMainThreadCore(); |
| 174 | is_64bit_process = metadata.Is64BitProgram(); | 174 | is_64bit_process = metadata.Is64BitProgram(); |
| 175 | system_resource_size = metadata.GetSystemResourceSize(); | ||
| 175 | 176 | ||
| 176 | vm_manager.Reset(metadata.GetAddressSpaceType()); | 177 | vm_manager.Reset(metadata.GetAddressSpaceType()); |
| 177 | 178 | ||
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 83ea02bee..b0e795577 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h | |||
| @@ -168,8 +168,9 @@ public: | |||
| 168 | return capabilities.GetPriorityMask(); | 168 | return capabilities.GetPriorityMask(); |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | u32 IsVirtualMemoryEnabled() const { | 171 | /// Gets the amount of secure memory to allocate for memory management. |
| 172 | return is_virtual_address_memory_enabled; | 172 | u32 GetSystemResourceSize() const { |
| 173 | return system_resource_size; | ||
| 173 | } | 174 | } |
| 174 | 175 | ||
| 175 | /// Whether this process is an AArch64 or AArch32 process. | 176 | /// Whether this process is an AArch64 or AArch32 process. |
| @@ -298,12 +299,16 @@ private: | |||
| 298 | /// Title ID corresponding to the process | 299 | /// Title ID corresponding to the process |
| 299 | u64 program_id = 0; | 300 | u64 program_id = 0; |
| 300 | 301 | ||
| 302 | /// Specifies additional memory to be reserved for the process's memory management by the | ||
| 303 | /// system. When this is non-zero, secure memory is allocated and used for page table allocation | ||
| 304 | /// instead of using the normal global page tables/memory block management. | ||
| 305 | u32 system_resource_size = 0; | ||
| 306 | |||
| 301 | /// Resource limit descriptor for this process | 307 | /// Resource limit descriptor for this process |
| 302 | SharedPtr<ResourceLimit> resource_limit; | 308 | SharedPtr<ResourceLimit> resource_limit; |
| 303 | 309 | ||
| 304 | /// The ideal CPU core for this process, threads are scheduled on this core by default. | 310 | /// The ideal CPU core for this process, threads are scheduled on this core by default. |
| 305 | u8 ideal_core = 0; | 311 | u8 ideal_core = 0; |
| 306 | u32 is_virtual_address_memory_enabled = 0; | ||
| 307 | 312 | ||
| 308 | /// The Thread Local Storage area is allocated as processes create threads, | 313 | /// The Thread Local Storage area is allocated as processes create threads, |
| 309 | /// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part | 314 | /// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 332573a95..abb374892 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -729,8 +729,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha | |||
| 729 | StackRegionBaseAddr = 14, | 729 | StackRegionBaseAddr = 14, |
| 730 | StackRegionSize = 15, | 730 | StackRegionSize = 15, |
| 731 | // 3.0.0+ | 731 | // 3.0.0+ |
| 732 | IsVirtualAddressMemoryEnabled = 16, | 732 | SystemResourceSize = 16, |
| 733 | PersonalMmHeapUsage = 17, | 733 | SystemResourceUsage = 17, |
| 734 | TitleId = 18, | 734 | TitleId = 18, |
| 735 | // 4.0.0+ | 735 | // 4.0.0+ |
| 736 | PrivilegedProcessId = 19, | 736 | PrivilegedProcessId = 19, |
| @@ -756,8 +756,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha | |||
| 756 | case GetInfoType::StackRegionSize: | 756 | case GetInfoType::StackRegionSize: |
| 757 | case GetInfoType::TotalPhysicalMemoryAvailable: | 757 | case GetInfoType::TotalPhysicalMemoryAvailable: |
| 758 | case GetInfoType::TotalPhysicalMemoryUsed: | 758 | case GetInfoType::TotalPhysicalMemoryUsed: |
| 759 | case GetInfoType::IsVirtualAddressMemoryEnabled: | 759 | case GetInfoType::SystemResourceSize: |
| 760 | case GetInfoType::PersonalMmHeapUsage: | 760 | case GetInfoType::SystemResourceUsage: |
| 761 | case GetInfoType::TitleId: | 761 | case GetInfoType::TitleId: |
| 762 | case GetInfoType::UserExceptionContextAddr: | 762 | case GetInfoType::UserExceptionContextAddr: |
| 763 | case GetInfoType::TotalPhysicalMemoryAvailableWithoutMmHeap: | 763 | case GetInfoType::TotalPhysicalMemoryAvailableWithoutMmHeap: |
| @@ -822,8 +822,22 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha | |||
| 822 | *result = process->GetTotalPhysicalMemoryUsed(); | 822 | *result = process->GetTotalPhysicalMemoryUsed(); |
| 823 | return RESULT_SUCCESS; | 823 | return RESULT_SUCCESS; |
| 824 | 824 | ||
| 825 | case GetInfoType::IsVirtualAddressMemoryEnabled: | 825 | case GetInfoType::SystemResourceSize: |
| 826 | *result = process->IsVirtualMemoryEnabled(); | 826 | *result = process->GetSystemResourceSize(); |
| 827 | return RESULT_SUCCESS; | ||
| 828 | |||
| 829 | case GetInfoType::SystemResourceUsage: | ||
| 830 | // On hardware, this returns the amount of system resource memory that has | ||
| 831 | // been used by the kernel. This is problematic for Yuzu to emulate, because | ||
| 832 | // system resource memory is used for page tables -- and yuzu doesn't really | ||
| 833 | // have a way to calculate how much memory is required for page tables for | ||
| 834 | // the current process at any given time. | ||
| 835 | // TODO: Is this even worth implementing? No game should ever use it, since | ||
| 836 | // the amount of remaining page table space should never be relevant except | ||
| 837 | // for diagnostics. Is returning a value other than zero wise? | ||
| 838 | LOG_WARNING(Kernel_SVC, | ||
| 839 | "(STUBBED) Attempted to query system resource usage, returned 0"); | ||
| 840 | *result = 0; | ||
| 827 | return RESULT_SUCCESS; | 841 | return RESULT_SUCCESS; |
| 828 | 842 | ||
| 829 | case GetInfoType::TitleId: | 843 | case GetInfoType::TitleId: |
| @@ -946,6 +960,86 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha | |||
| 946 | } | 960 | } |
| 947 | } | 961 | } |
| 948 | 962 | ||
| 963 | /// Maps memory at a desired address | ||
| 964 | static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { | ||
| 965 | LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); | ||
| 966 | |||
| 967 | if (!Common::Is4KBAligned(addr)) { | ||
| 968 | LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr); | ||
| 969 | return ERR_INVALID_ADDRESS; | ||
| 970 | } | ||
| 971 | |||
| 972 | if (!Common::Is4KBAligned(size)) { | ||
| 973 | LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size); | ||
| 974 | return ERR_INVALID_SIZE; | ||
| 975 | } | ||
| 976 | |||
| 977 | if (size == 0) { | ||
| 978 | LOG_ERROR(Kernel_SVC, "Size is zero"); | ||
| 979 | return ERR_INVALID_SIZE; | ||
| 980 | } | ||
| 981 | |||
| 982 | if (!(addr < addr + size)) { | ||
| 983 | LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address"); | ||
| 984 | return ERR_INVALID_MEMORY_RANGE; | ||
| 985 | } | ||
| 986 | |||
| 987 | auto* const current_process = Core::CurrentProcess(); | ||
| 988 | auto& vm_manager = current_process->VMManager(); | ||
| 989 | |||
| 990 | if (current_process->GetSystemResourceSize() == 0) { | ||
| 991 | LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); | ||
| 992 | return ERR_INVALID_STATE; | ||
| 993 | } | ||
| 994 | |||
| 995 | if (!vm_manager.IsWithinMapRegion(addr, size)) { | ||
| 996 | LOG_ERROR(Kernel_SVC, "Range not within map region"); | ||
| 997 | return ERR_INVALID_MEMORY_RANGE; | ||
| 998 | } | ||
| 999 | |||
| 1000 | return vm_manager.MapPhysicalMemory(addr, size); | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | /// Unmaps memory previously mapped via MapPhysicalMemory | ||
| 1004 | static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { | ||
| 1005 | LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); | ||
| 1006 | |||
| 1007 | if (!Common::Is4KBAligned(addr)) { | ||
| 1008 | LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr); | ||
| 1009 | return ERR_INVALID_ADDRESS; | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | if (!Common::Is4KBAligned(size)) { | ||
| 1013 | LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size); | ||
| 1014 | return ERR_INVALID_SIZE; | ||
| 1015 | } | ||
| 1016 | |||
| 1017 | if (size == 0) { | ||
| 1018 | LOG_ERROR(Kernel_SVC, "Size is zero"); | ||
| 1019 | return ERR_INVALID_SIZE; | ||
| 1020 | } | ||
| 1021 | |||
| 1022 | if (!(addr < addr + size)) { | ||
| 1023 | LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address"); | ||
| 1024 | return ERR_INVALID_MEMORY_RANGE; | ||
| 1025 | } | ||
| 1026 | |||
| 1027 | auto* const current_process = Core::CurrentProcess(); | ||
| 1028 | auto& vm_manager = current_process->VMManager(); | ||
| 1029 | |||
| 1030 | if (current_process->GetSystemResourceSize() == 0) { | ||
| 1031 | LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); | ||
| 1032 | return ERR_INVALID_STATE; | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | if (!vm_manager.IsWithinMapRegion(addr, size)) { | ||
| 1036 | LOG_ERROR(Kernel_SVC, "Range not within map region"); | ||
| 1037 | return ERR_INVALID_MEMORY_RANGE; | ||
| 1038 | } | ||
| 1039 | |||
| 1040 | return vm_manager.UnmapPhysicalMemory(addr, size); | ||
| 1041 | } | ||
| 1042 | |||
| 949 | /// Sets the thread activity | 1043 | /// Sets the thread activity |
| 950 | static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) { | 1044 | static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) { |
| 951 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity); | 1045 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity); |
| @@ -2303,8 +2397,8 @@ static const FunctionDef SVC_Table[] = { | |||
| 2303 | {0x29, SvcWrap<GetInfo>, "GetInfo"}, | 2397 | {0x29, SvcWrap<GetInfo>, "GetInfo"}, |
| 2304 | {0x2A, nullptr, "FlushEntireDataCache"}, | 2398 | {0x2A, nullptr, "FlushEntireDataCache"}, |
| 2305 | {0x2B, nullptr, "FlushDataCache"}, | 2399 | {0x2B, nullptr, "FlushDataCache"}, |
| 2306 | {0x2C, nullptr, "MapPhysicalMemory"}, | 2400 | {0x2C, SvcWrap<MapPhysicalMemory>, "MapPhysicalMemory"}, |
| 2307 | {0x2D, nullptr, "UnmapPhysicalMemory"}, | 2401 | {0x2D, SvcWrap<UnmapPhysicalMemory>, "UnmapPhysicalMemory"}, |
| 2308 | {0x2E, nullptr, "GetFutureThreadInfo"}, | 2402 | {0x2E, nullptr, "GetFutureThreadInfo"}, |
| 2309 | {0x2F, nullptr, "GetLastThreadInfo"}, | 2403 | {0x2F, nullptr, "GetLastThreadInfo"}, |
| 2310 | {0x30, SvcWrap<GetResourceLimitLimitValue>, "GetResourceLimitLimitValue"}, | 2404 | {0x30, SvcWrap<GetResourceLimitLimitValue>, "GetResourceLimitLimitValue"}, |
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 865473c6f..c2d8d0dc3 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h | |||
| @@ -32,6 +32,11 @@ void SvcWrap(Core::System& system) { | |||
| 32 | FuncReturn(system, func(system, Param(system, 0)).raw); | 32 | FuncReturn(system, func(system, Param(system, 0)).raw); |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | template <ResultCode func(Core::System&, u64, u64)> | ||
| 36 | void SvcWrap(Core::System& system) { | ||
| 37 | FuncReturn(system, func(system, Param(system, 0), Param(system, 1)).raw); | ||
| 38 | } | ||
| 39 | |||
| 35 | template <ResultCode func(Core::System&, u32)> | 40 | template <ResultCode func(Core::System&, u32)> |
| 36 | void SvcWrap(Core::System& system) { | 41 | void SvcWrap(Core::System& system) { |
| 37 | FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw); | 42 | FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw); |
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 501544090..9385a8697 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp | |||
| @@ -12,6 +12,8 @@ | |||
| 12 | #include "core/core.h" | 12 | #include "core/core.h" |
| 13 | #include "core/file_sys/program_metadata.h" | 13 | #include "core/file_sys/program_metadata.h" |
| 14 | #include "core/hle/kernel/errors.h" | 14 | #include "core/hle/kernel/errors.h" |
| 15 | #include "core/hle/kernel/process.h" | ||
| 16 | #include "core/hle/kernel/resource_limit.h" | ||
| 15 | #include "core/hle/kernel/vm_manager.h" | 17 | #include "core/hle/kernel/vm_manager.h" |
| 16 | #include "core/memory.h" | 18 | #include "core/memory.h" |
| 17 | #include "core/memory_setup.h" | 19 | #include "core/memory_setup.h" |
| @@ -49,9 +51,8 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { | |||
| 49 | type != next.type) { | 51 | type != next.type) { |
| 50 | return false; | 52 | return false; |
| 51 | } | 53 | } |
| 52 | if (type == VMAType::AllocatedMemoryBlock && | 54 | if (type == VMAType::AllocatedMemoryBlock) { |
| 53 | (backing_block != next.backing_block || offset + size != next.offset)) { | 55 | return true; |
| 54 | return false; | ||
| 55 | } | 56 | } |
| 56 | if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) { | 57 | if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) { |
| 57 | return false; | 58 | return false; |
| @@ -100,7 +101,7 @@ bool VMManager::IsValidHandle(VMAHandle handle) const { | |||
| 100 | ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, | 101 | ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, |
| 101 | std::shared_ptr<std::vector<u8>> block, | 102 | std::shared_ptr<std::vector<u8>> block, |
| 102 | std::size_t offset, u64 size, | 103 | std::size_t offset, u64 size, |
| 103 | MemoryState state) { | 104 | MemoryState state, VMAPermission perm) { |
| 104 | ASSERT(block != nullptr); | 105 | ASSERT(block != nullptr); |
| 105 | ASSERT(offset + size <= block->size()); | 106 | ASSERT(offset + size <= block->size()); |
| 106 | 107 | ||
| @@ -119,7 +120,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, | |||
| 119 | VMAPermission::ReadWriteExecute); | 120 | VMAPermission::ReadWriteExecute); |
| 120 | 121 | ||
| 121 | final_vma.type = VMAType::AllocatedMemoryBlock; | 122 | final_vma.type = VMAType::AllocatedMemoryBlock; |
| 122 | final_vma.permissions = VMAPermission::ReadWrite; | 123 | final_vma.permissions = perm; |
| 123 | final_vma.state = state; | 124 | final_vma.state = state; |
| 124 | final_vma.backing_block = std::move(block); | 125 | final_vma.backing_block = std::move(block); |
| 125 | final_vma.offset = offset; | 126 | final_vma.offset = offset; |
| @@ -308,6 +309,258 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { | |||
| 308 | return MakeResult<VAddr>(heap_region_base); | 309 | return MakeResult<VAddr>(heap_region_base); |
| 309 | } | 310 | } |
| 310 | 311 | ||
| 312 | ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { | ||
| 313 | const auto last_addr = target + size - 1; | ||
| 314 | VAddr cur_addr = target; | ||
| 315 | std::size_t mapped_size = 0; | ||
| 316 | |||
| 317 | ResultCode result = RESULT_SUCCESS; | ||
| 318 | |||
| 319 | // Check whether we've already mapped the desired memory. | ||
| 320 | { | ||
| 321 | auto vma = FindVMA(target); | ||
| 322 | ASSERT_MSG(vma != vma_map.end(), "MapPhysicalMemory vma != end"); | ||
| 323 | |||
| 324 | while (true) { | ||
| 325 | const auto vma_start = vma->second.base; | ||
| 326 | const auto vma_size = vma->second.size; | ||
| 327 | const auto state = vma->second.state; | ||
| 328 | |||
| 329 | // Handle last block. | ||
| 330 | if (last_addr <= (vma_start + vma_size - 1)) { | ||
| 331 | if (state != MemoryState::Unmapped) { | ||
| 332 | mapped_size += last_addr - cur_addr + 1; | ||
| 333 | } | ||
| 334 | break; | ||
| 335 | } | ||
| 336 | |||
| 337 | if (state != MemoryState::Unmapped) { | ||
| 338 | mapped_size += vma_start + vma_size - cur_addr; | ||
| 339 | } | ||
| 340 | cur_addr = vma_start + vma_size; | ||
| 341 | vma++; | ||
| 342 | ASSERT_MSG(vma != vma_map.end(), "MapPhysicalMemory vma != end"); | ||
| 343 | } | ||
| 344 | |||
| 345 | // If we already have the desired amount mapped, we're done. | ||
| 346 | if (mapped_size == size) { | ||
| 347 | return RESULT_SUCCESS; | ||
| 348 | } | ||
| 349 | } | ||
| 350 | |||
| 351 | // Check that we can map the memory we want. | ||
| 352 | const auto res_limit = Core::CurrentProcess()->GetResourceLimit(); | ||
| 353 | const u64 physmem_remaining = res_limit->GetMaxResourceValue(ResourceType::PhysicalMemory) - | ||
| 354 | res_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory); | ||
| 355 | if (physmem_remaining < (size - mapped_size)) { | ||
| 356 | return ERR_RESOURCE_LIMIT_EXCEEDED; | ||
| 357 | } | ||
| 358 | |||
| 359 | // Keep track of the memory regions we unmap. | ||
| 360 | std::vector<std::pair<u64, u64>> mapped_regions; | ||
| 361 | |||
| 362 | // Iterate, trying to map memory. | ||
| 363 | // Map initially with VMAPermission::None. | ||
| 364 | { | ||
| 365 | cur_addr = target; | ||
| 366 | |||
| 367 | auto vma = FindVMA(target); | ||
| 368 | ASSERT_MSG(vma != vma_map.end(), "MapPhysicalMemory vma != end"); | ||
| 369 | |||
| 370 | while (true) { | ||
| 371 | const auto vma_start = vma->second.base; | ||
| 372 | const auto vma_size = vma->second.size; | ||
| 373 | const auto state = vma->second.state; | ||
| 374 | |||
| 375 | // Handle last block. | ||
| 376 | if (last_addr <= (vma_start + vma_size - 1)) { | ||
| 377 | if (state == MemoryState::Unmapped) { | ||
| 378 | const auto map_res = MapMemoryBlock( | ||
| 379 | cur_addr, std::make_shared<std::vector<u8>>(last_addr - cur_addr + 1, 0), 0, | ||
| 380 | last_addr - cur_addr + 1, MemoryState::Heap, VMAPermission::None); | ||
| 381 | result = map_res.Code(); | ||
| 382 | if (result.IsSuccess()) { | ||
| 383 | mapped_regions.push_back( | ||
| 384 | std::make_pair(cur_addr, last_addr - cur_addr + 1)); | ||
| 385 | } | ||
| 386 | } | ||
| 387 | break; | ||
| 388 | } | ||
| 389 | |||
| 390 | if (state == MemoryState::Unmapped) { | ||
| 391 | const auto map_res = MapMemoryBlock( | ||
| 392 | cur_addr, std::make_shared<std::vector<u8>>(vma_start + vma_size - cur_addr, 0), | ||
| 393 | 0, vma_start + vma_size - cur_addr, MemoryState::Heap, VMAPermission::None); | ||
| 394 | result = map_res.Code(); | ||
| 395 | if (result.IsSuccess()) { | ||
| 396 | mapped_regions.push_back( | ||
| 397 | std::make_pair(cur_addr, vma_start + vma_size - cur_addr)); | ||
| 398 | } else { | ||
| 399 | break; | ||
| 400 | } | ||
| 401 | } | ||
| 402 | cur_addr = vma_start + vma_size; | ||
| 403 | vma = FindVMA(cur_addr); | ||
| 404 | ASSERT_MSG(vma != vma_map.end(), "MapPhysicalMemory vma != end"); | ||
| 405 | } | ||
| 406 | } | ||
| 407 | |||
| 408 | // If we failed, unmap memory. | ||
| 409 | if (result.IsError()) { | ||
| 410 | for (const auto& it : mapped_regions) { | ||
| 411 | const auto unmap_res = UnmapRange(it.first, it.second); | ||
| 412 | ASSERT_MSG(unmap_res.IsSuccess(), "MapPhysicalMemory un-map on error"); | ||
| 413 | } | ||
| 414 | |||
| 415 | return result; | ||
| 416 | } | ||
| 417 | |||
| 418 | // We didn't fail, so reprotect all the memory to ReadWrite. | ||
| 419 | { | ||
| 420 | cur_addr = target; | ||
| 421 | |||
| 422 | auto vma = FindVMA(target); | ||
| 423 | ASSERT_MSG(vma != vma_map.end(), "MapPhysicalMemory vma != end"); | ||
| 424 | |||
| 425 | while (true) { | ||
| 426 | const auto vma_start = vma->second.base; | ||
| 427 | const auto vma_size = vma->second.size; | ||
| 428 | const auto state = vma->second.state; | ||
| 429 | const auto perm = vma->second.permissions; | ||
| 430 | |||
| 431 | // Handle last block. | ||
| 432 | if (last_addr <= (vma_start + vma_size - 1)) { | ||
| 433 | if (state == MemoryState::Heap && perm == VMAPermission::None) { | ||
| 434 | ASSERT_MSG( | ||
| 435 | ReprotectRange(cur_addr, last_addr - cur_addr + 1, VMAPermission::ReadWrite) | ||
| 436 | .IsSuccess(), | ||
| 437 | "MapPhysicalMemory reprotect"); | ||
| 438 | } | ||
| 439 | break; | ||
| 440 | } | ||
| 441 | |||
| 442 | if (state == MemoryState::Heap && perm == VMAPermission::None) { | ||
| 443 | ASSERT_MSG(ReprotectRange(cur_addr, vma_start + vma_size - cur_addr, | ||
| 444 | VMAPermission::ReadWrite) | ||
| 445 | .IsSuccess(), | ||
| 446 | "MapPhysicalMemory reprotect"); | ||
| 447 | } | ||
| 448 | cur_addr = vma_start + vma_size; | ||
| 449 | vma = FindVMA(cur_addr); | ||
| 450 | ASSERT_MSG(vma != vma_map.end(), "MapPhysicalMemory vma != end"); | ||
| 451 | } | ||
| 452 | } | ||
| 453 | |||
| 454 | // Update amount of mapped physical memory. | ||
| 455 | physical_memory_mapped += size - mapped_size; | ||
| 456 | |||
| 457 | return RESULT_SUCCESS; | ||
| 458 | } | ||
| 459 | |||
| 460 | ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { | ||
| 461 | auto last_addr = target + size - 1; | ||
| 462 | VAddr cur_addr = target; | ||
| 463 | std::size_t mapped_size = 0; | ||
| 464 | |||
| 465 | ResultCode result = RESULT_SUCCESS; | ||
| 466 | |||
| 467 | // Check how much of the memory is currently mapped. | ||
| 468 | { | ||
| 469 | auto vma = FindVMA(target); | ||
| 470 | ASSERT_MSG(vma != vma_map.end(), "UnmapPhysicalMemory vma != end"); | ||
| 471 | |||
| 472 | while (true) { | ||
| 473 | const auto vma_start = vma->second.base; | ||
| 474 | const auto vma_size = vma->second.size; | ||
| 475 | const auto state = vma->second.state; | ||
| 476 | const auto attr = vma->second.attribute; | ||
| 477 | |||
| 478 | // Memory within region must be free or mapped heap. | ||
| 479 | if (!((state == MemoryState::Heap && attr == MemoryAttribute::None) || | ||
| 480 | (state == MemoryState::Unmapped))) { | ||
| 481 | return ERR_INVALID_ADDRESS_STATE; | ||
| 482 | } | ||
| 483 | |||
| 484 | // If this is the last block and it's mapped, update mapped size. | ||
| 485 | if (last_addr <= (vma_start + vma_size - 1)) { | ||
| 486 | if (state == MemoryState::Heap) { | ||
| 487 | mapped_size += last_addr - cur_addr + 1; | ||
| 488 | } | ||
| 489 | break; | ||
| 490 | } | ||
| 491 | |||
| 492 | if (state == MemoryState::Heap) { | ||
| 493 | mapped_size += vma_start + vma_size - cur_addr; | ||
| 494 | } | ||
| 495 | cur_addr = vma_start + vma_size; | ||
| 496 | vma++; | ||
| 497 | ASSERT_MSG(vma != vma_map.end(), "UnmapPhysicalMemory vma != end"); | ||
| 498 | } | ||
| 499 | |||
| 500 | // If memory is already unmapped, we're done. | ||
| 501 | if (mapped_size == 0) { | ||
| 502 | return RESULT_SUCCESS; | ||
| 503 | } | ||
| 504 | } | ||
| 505 | |||
| 506 | // Keep track of the memory regions we unmap. | ||
| 507 | std::vector<std::pair<u64, u64>> unmapped_regions; | ||
| 508 | |||
| 509 | // Try to unmap regions. | ||
| 510 | { | ||
| 511 | cur_addr = target; | ||
| 512 | |||
| 513 | auto vma = FindVMA(target); | ||
| 514 | ASSERT_MSG(vma != vma_map.end(), "UnmapPhysicalMemory vma != end"); | ||
| 515 | |||
| 516 | while (true) { | ||
| 517 | const auto vma_start = vma->second.base; | ||
| 518 | const auto vma_size = vma->second.size; | ||
| 519 | const auto state = vma->second.state; | ||
| 520 | const auto perm = vma->second.permissions; | ||
| 521 | |||
| 522 | // Handle last block. | ||
| 523 | if (last_addr <= (vma_start + vma_size - 1)) { | ||
| 524 | if (state == MemoryState::Heap) { | ||
| 525 | result = UnmapRange(cur_addr, last_addr - cur_addr + 1); | ||
| 526 | if (result.IsSuccess()) { | ||
| 527 | unmapped_regions.push_back( | ||
| 528 | std::make_pair(cur_addr, last_addr - cur_addr + 1)); | ||
| 529 | } | ||
| 530 | } | ||
| 531 | break; | ||
| 532 | } | ||
| 533 | |||
| 534 | if (state == MemoryState::Heap) { | ||
| 535 | result = UnmapRange(cur_addr, vma_start + vma_size - cur_addr); | ||
| 536 | if (result.IsSuccess()) { | ||
| 537 | unmapped_regions.push_back( | ||
| 538 | std::make_pair(cur_addr, vma_start + vma_size - cur_addr)); | ||
| 539 | } else { | ||
| 540 | break; | ||
| 541 | } | ||
| 542 | } | ||
| 543 | |||
| 544 | cur_addr = vma_start + vma_size; | ||
| 545 | vma = FindVMA(cur_addr); | ||
| 546 | ASSERT_MSG(vma != vma_map.end(), "UnmapPhysicalMemory vma != end"); | ||
| 547 | } | ||
| 548 | } | ||
| 549 | |||
| 550 | // If we failed, re-map regions. | ||
| 551 | // TODO: Preserve memory contents? | ||
| 552 | if (result.IsError()) { | ||
| 553 | for (const auto& it : unmapped_regions) { | ||
| 554 | const auto remap_res = | ||
| 555 | MapMemoryBlock(it.first, std::make_shared<std::vector<u8>>(it.second, 0), 0, | ||
| 556 | it.second, MemoryState::Heap, VMAPermission::None); | ||
| 557 | ASSERT_MSG(remap_res.Succeeded(), "UnmapPhysicalMemory re-map on error"); | ||
| 558 | } | ||
| 559 | } | ||
| 560 | |||
| 561 | return RESULT_SUCCESS; | ||
| 562 | } | ||
| 563 | |||
| 311 | ResultCode VMManager::MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) { | 564 | ResultCode VMManager::MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) { |
| 312 | constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped; | 565 | constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped; |
| 313 | const auto src_check_result = CheckRangeState( | 566 | const auto src_check_result = CheckRangeState( |
| @@ -455,7 +708,7 @@ ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, Mem | |||
| 455 | // Protect mirror with permissions from old region | 708 | // Protect mirror with permissions from old region |
| 456 | Reprotect(new_vma, vma->second.permissions); | 709 | Reprotect(new_vma, vma->second.permissions); |
| 457 | // Remove permissions from old region | 710 | // Remove permissions from old region |
| 458 | Reprotect(vma, VMAPermission::None); | 711 | ReprotectRange(src_addr, size, VMAPermission::None); |
| 459 | 712 | ||
| 460 | return RESULT_SUCCESS; | 713 | return RESULT_SUCCESS; |
| 461 | } | 714 | } |
| @@ -588,14 +841,14 @@ VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) { | |||
| 588 | VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) { | 841 | VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) { |
| 589 | const VMAIter next_vma = std::next(iter); | 842 | const VMAIter next_vma = std::next(iter); |
| 590 | if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { | 843 | if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { |
| 591 | iter->second.size += next_vma->second.size; | 844 | MergeAdjacentVMA(iter->second, next_vma->second); |
| 592 | vma_map.erase(next_vma); | 845 | vma_map.erase(next_vma); |
| 593 | } | 846 | } |
| 594 | 847 | ||
| 595 | if (iter != vma_map.begin()) { | 848 | if (iter != vma_map.begin()) { |
| 596 | VMAIter prev_vma = std::prev(iter); | 849 | VMAIter prev_vma = std::prev(iter); |
| 597 | if (prev_vma->second.CanBeMergedWith(iter->second)) { | 850 | if (prev_vma->second.CanBeMergedWith(iter->second)) { |
| 598 | prev_vma->second.size += iter->second.size; | 851 | MergeAdjacentVMA(prev_vma->second, iter->second); |
| 599 | vma_map.erase(iter); | 852 | vma_map.erase(iter); |
| 600 | iter = prev_vma; | 853 | iter = prev_vma; |
| 601 | } | 854 | } |
| @@ -604,6 +857,57 @@ VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) { | |||
| 604 | return iter; | 857 | return iter; |
| 605 | } | 858 | } |
| 606 | 859 | ||
| 860 | void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right) { | ||
| 861 | ASSERT(left.CanBeMergedWith(right)); | ||
| 862 | |||
| 863 | // Always merge allocated memory blocks, even when they don't share the same backing block. | ||
| 864 | if (left.type == VMAType::AllocatedMemoryBlock && | ||
| 865 | (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) { | ||
| 866 | // Check if we can save work. | ||
| 867 | if (left.offset == 0 && left.size == left.backing_block->size()) { | ||
| 868 | // Fast case: left is an entire backing block. | ||
| 869 | left.backing_block->insert(left.backing_block->end(), | ||
| 870 | right.backing_block->begin() + right.offset, | ||
| 871 | right.backing_block->begin() + right.offset + right.size); | ||
| 872 | } else { | ||
| 873 | // Slow case: make a new memory block for left and right. | ||
| 874 | auto new_memory = std::make_shared<std::vector<u8>>(); | ||
| 875 | new_memory->insert(new_memory->end(), left.backing_block->begin() + left.offset, | ||
| 876 | left.backing_block->begin() + left.offset + left.size); | ||
| 877 | new_memory->insert(new_memory->end(), right.backing_block->begin() + right.offset, | ||
| 878 | right.backing_block->begin() + right.offset + right.size); | ||
| 879 | left.backing_block = new_memory; | ||
| 880 | left.offset = 0; | ||
| 881 | } | ||
| 882 | |||
| 883 | // Page table update is needed, because backing memory changed. | ||
| 884 | left.size += right.size; | ||
| 885 | UpdatePageTableForVMA(left); | ||
| 886 | |||
| 887 | // Update mappings for unicorn. | ||
| 888 | system.ArmInterface(0).UnmapMemory(left.base, left.size); | ||
| 889 | system.ArmInterface(1).UnmapMemory(left.base, left.size); | ||
| 890 | system.ArmInterface(2).UnmapMemory(left.base, left.size); | ||
| 891 | system.ArmInterface(3).UnmapMemory(left.base, left.size); | ||
| 892 | |||
| 893 | system.ArmInterface(0).MapBackingMemory(left.base, left.size, | ||
| 894 | left.backing_block->data() + left.offset, | ||
| 895 | VMAPermission::ReadWriteExecute); | ||
| 896 | system.ArmInterface(1).MapBackingMemory(left.base, left.size, | ||
| 897 | left.backing_block->data() + left.offset, | ||
| 898 | VMAPermission::ReadWriteExecute); | ||
| 899 | system.ArmInterface(2).MapBackingMemory(left.base, left.size, | ||
| 900 | left.backing_block->data() + left.offset, | ||
| 901 | VMAPermission::ReadWriteExecute); | ||
| 902 | system.ArmInterface(3).MapBackingMemory(left.base, left.size, | ||
| 903 | left.backing_block->data() + left.offset, | ||
| 904 | VMAPermission::ReadWriteExecute); | ||
| 905 | } else { | ||
| 906 | // Just update the size. | ||
| 907 | left.size += right.size; | ||
| 908 | } | ||
| 909 | } | ||
| 910 | |||
| 607 | void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { | 911 | void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { |
| 608 | switch (vma.type) { | 912 | switch (vma.type) { |
| 609 | case VMAType::Free: | 913 | case VMAType::Free: |
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 9fe6ac3f4..16f40ad00 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h | |||
| @@ -349,7 +349,8 @@ public: | |||
| 349 | * @param state MemoryState tag to attach to the VMA. | 349 | * @param state MemoryState tag to attach to the VMA. |
| 350 | */ | 350 | */ |
| 351 | ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, | 351 | ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, |
| 352 | std::size_t offset, u64 size, MemoryState state); | 352 | std::size_t offset, u64 size, MemoryState state, |
| 353 | VMAPermission perm = VMAPermission::ReadWrite); | ||
| 353 | 354 | ||
| 354 | /** | 355 | /** |
| 355 | * Maps an unmanaged host memory pointer at a given address. | 356 | * Maps an unmanaged host memory pointer at a given address. |
| @@ -450,6 +451,34 @@ public: | |||
| 450 | /// | 451 | /// |
| 451 | ResultVal<VAddr> SetHeapSize(u64 size); | 452 | ResultVal<VAddr> SetHeapSize(u64 size); |
| 452 | 453 | ||
| 454 | /// Maps memory at a given address. | ||
| 455 | /// | ||
| 456 | /// @param addr The virtual address to map memory at. | ||
| 457 | /// @param size The amount of memory to map. | ||
| 458 | /// | ||
| 459 | /// @note The destination address must lie within the Map region. | ||
| 460 | /// | ||
| 461 | /// @note This function requires SystemResourceSize is non-zero, | ||
| 462 | /// however, this is just because if it were not then the | ||
| 463 | /// resulting page tables could be exploited on hardware by | ||
| 464 | /// a malicious program. SystemResource usage does not need | ||
| 465 | /// to be explicitly checked or updated here. | ||
| 466 | ResultCode MapPhysicalMemory(VAddr target, u64 size); | ||
| 467 | |||
| 468 | /// Unmaps memory at a given address. | ||
| 469 | /// | ||
| 470 | /// @param addr The virtual address to unmap memory at. | ||
| 471 | /// @param size The amount of memory to unmap. | ||
| 472 | /// | ||
| 473 | /// @note The destination address must lie within the Map region. | ||
| 474 | /// | ||
| 475 | /// @note This function requires SystemResourceSize is non-zero, | ||
| 476 | /// however, this is just because if it were not then the | ||
| 477 | /// resulting page tables could be exploited on hardware by | ||
| 478 | /// a malicious program. SystemResource usage does not need | ||
| 479 | /// to be explicitly checked or updated here. | ||
| 480 | ResultCode UnmapPhysicalMemory(VAddr target, u64 size); | ||
| 481 | |||
| 453 | /// Maps a region of memory as code memory. | 482 | /// Maps a region of memory as code memory. |
| 454 | /// | 483 | /// |
| 455 | /// @param dst_address The base address of the region to create the aliasing memory region. | 484 | /// @param dst_address The base address of the region to create the aliasing memory region. |
| @@ -657,6 +686,11 @@ private: | |||
| 657 | */ | 686 | */ |
| 658 | VMAIter MergeAdjacent(VMAIter vma); | 687 | VMAIter MergeAdjacent(VMAIter vma); |
| 659 | 688 | ||
| 689 | /** | ||
| 690 | * Merges two adjacent VMAs. | ||
| 691 | */ | ||
| 692 | void MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right); | ||
| 693 | |||
| 660 | /// Updates the pages corresponding to this VMA so they match the VMA's attributes. | 694 | /// Updates the pages corresponding to this VMA so they match the VMA's attributes. |
| 661 | void UpdatePageTableForVMA(const VirtualMemoryArea& vma); | 695 | void UpdatePageTableForVMA(const VirtualMemoryArea& vma); |
| 662 | 696 | ||
| @@ -742,6 +776,11 @@ private: | |||
| 742 | // end of the range. This is essentially 'base_address + current_size'. | 776 | // end of the range. This is essentially 'base_address + current_size'. |
| 743 | VAddr heap_end = 0; | 777 | VAddr heap_end = 0; |
| 744 | 778 | ||
| 779 | // The current amount of memory mapped via MapPhysicalMemory. | ||
| 780 | // This is used here (and in Nintendo's kernel) only for debugging, and does not impact | ||
| 781 | // any behavior. | ||
| 782 | u64 physical_memory_mapped = 0; | ||
| 783 | |||
| 745 | Core::System& system; | 784 | Core::System& system; |
| 746 | }; | 785 | }; |
| 747 | } // namespace Kernel | 786 | } // namespace Kernel |